一、安裝Percona數據庫
1. 離線安裝Percona
進入RPM安裝文件目錄,執行下面的腳本
yum localinstall *.rpm
管理MySQL服務
systemctl start mysqld
systemctl stop mysqld
systemctl restart mysqld
2. 在線安裝Percona
使用yum命令安裝
yum install http://www.percona.com/downloads/percona-release/redhat/0.1-3/percona-release-0.1-3.noarch.rpm
yum install Percona-Server-server-57
管理MySQL服務
service mysql start
service mysql stop
service mysql restart
3. 開放防火牆端口
firewall-cmd --zone=public --add-port=3306/tcp --permanent
firewall-cmd --reload
4. 修改MySQL配置文件
vi /etc/my.cnf
[mysqld]
character_set_server = utf8
bind-address = 0.0.0.0
#跳過DNS解析
skip-name-resolve
service mysql restart
5. 禁止開機啓動MySQL
chkconfig mysqld off
6. 初始化MySQL數據庫
查看MySQL初始密碼
cat /var/log/mysqld.log | grep "A temporary password"
修改MySQL密碼
mysql_secure_installation
創建遠程管理員賬戶
mysql -u root -p
CREATE USER 'admin'@'%' IDENTIFIED BY 'Abc_123456';
GRANT all privileges ON *.* TO 'admin'@'%';
FLUSH PRIVILEGES;
二、創建PXC集羣
1. 刪除MariaDB程序包
yum -y remove mari*
2. 開放防火牆端口
firewall-cmd --zone=public --add-port=3306/tcp --permanent
firewall-cmd --zone=public --add-port=4444/tcp --permanent
firewall-cmd --zone=public --add-port=4567/tcp --permanent
firewall-cmd --zone=public --add-port=4568/tcp --permanent
3. 關閉SELINUX
vi /etc/selinux/config
把SELINUX屬性值設置成disabled
reboot
4. 離線安裝PXC
進入RPM文件目錄,執行安裝命令
yum localinstall *.rpm
修改MySQL配置文件、創建賬戶等操作
5. 創建PXC集羣
停止MySQL服務
修改每個PXC節點的/etc/my.cnf文件(在不同節點上,注意調整文件內容)
server-id=1 #PXC集羣中MySQL實例的唯一ID,不能重複,必須是數字
wsrep_provider=/usr/lib64/galera3/libgalera_smm.so
wsrep_cluster_name=pxc-cluster #PXC集羣的名稱
wsrep_cluster_address=gcomm://192.168.99.151,192.168.99.159,192.168.99.215
wsrep_node_name=pxc1 #當前節點的名稱
wsrep_node_address=192.168.99.151 #當前節點的IP
wsrep_sst_method=xtrabackup-v2 #同步方法(mysqldump、rsync、xtrabackup)
wsrep_sst_auth= admin:Abc_123456 #同步使用的帳戶
pxc_strict_mode=ENFORCING #同步嚴厲模式
binlog_format=ROW #基於ROW複製(安全可靠)
default_storage_engine=InnoDB #默認引擎
innodb_autoinc_lock_mode=2 #主鍵自增長不鎖表
主節點的管理命令(第一個啓動的PXC節點)
systemctl start [email protected]
systemctl stop [email protected]
systemctl restart [email protected]
非主節點的管理命令(非第一個啓動的PXC節點)
service start mysql
service stop mysql
service restart mysql
查看PXC集羣狀態信息
show status like 'wsrep_cluster%' ;
* **按照上述配置方法,創建兩組PXC集羣**
6. PXC節點啓動與關閉
* 如果最後關閉的PXC節點是安全退出的,那麼下次啓動要最先啓動這個節點,而且要以主節點啓動
* 如果最後關閉的PXC節點不是安全退出的,那麼要先修改`/var/lib/mysql/grastate.dat` 文件,把其中的`safe_to_bootstrap`屬性值設置爲1,再安裝主節點啓動
三、安裝MyCat
1. JDK安裝與配置
安裝JDK·l
#搜索JDK版本
yum search jdk
#安裝JDK1.8開發版
yum install java-1.8.0-openjdk-devel.x86_64
* 配置環境變量
#查看JDK安裝路徑
ls -lrt /etc/alternatives/java
vi /etc/profile
#在文件結尾加上JDK路徑,例如export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.171-8.b10.el7_5.x86_64/
source /etc/profile
2. 創建數據表
在兩組PXC集羣中分別創建t_user數據表
CREATE TABLE t_user(
id INT UNSIGNED PRIMARY KEY,
username VARCHAR(200) NOT NULL,
password VARCHAR(2000) NOT NULL,
tel CHAR(11) NOT NULL,
locked TINYINT(1) UNSIGNED NOT NULL DEFAULT 0,
INDEX idx_username(username) USING BTREE,
UNIQUE INDEX unq_username(username) USING BTREE
);
3. MyCat安裝與配置
1. 下載MyCat
http://dl.mycat.io/1.6.5/Mycat-server-1.6.5-release-20180122220033-linux.tar.gz
2. 上傳MyCat壓縮包到虛擬機
3. 安裝unzip程序包,解壓縮MyCat
yum install unzip
unzip MyCAT壓縮包名稱
4. 開放防火牆8066和9066端口,關閉SELINUX
5. 修改MyCat的bin目錄中所有.sh文件的權限
chmod -R 777 ./*.sh
6. MyCat啓動與關閉
#cd MyCat的bin目錄
./startup_nowrap.sh #啓動MyCat
ps -aux #查看系統進程
kill -9 MyCat進程編號
7. 修改server.xml文件,設置MyCat帳戶和虛擬邏輯庫
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE mycat:server SYSTEM "server.dtd">
<mycat:server xmlns:mycat="http://io.mycat/">
<system>
<property name="nonePasswordLogin">0</property>
<property name="useHandshakeV10">1</property>
<property name="useSqlStat">0</property>
<property name="useGlobleTableCheck">0</property>
<property name="sequnceHandlerType">2</property>
<property name="subqueryRelationshipCheck">false</property>
<property name="processorBufferPoolType">0</property>
<property name="handleDistributedTransactions">0</property>
<property name="useOffHeapForMerge">1</property>
<property name="memoryPageSize">64k</property>
<property name="spillsFileBufferSize">1k</property>
<property name="useStreamOutput">0</property>
<property name="systemReserveMemorySize">384m</property>
<property name="useZKSwitch">false</property>
</system>
<!--這裏是設置的admin用戶和虛擬邏輯庫-->
<user name="admin" defaultAccount="true">
<property name="password">Abc_123456</property>
<property name="schemas">test</property>
</user>
</mycat:server>
8. 修改schema.xml文件,設置數據庫連接和虛擬數據表
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema xmlns:mycat="http://io.mycat/">
<!--配置數據表-->
<schema name="test" checkSQLschema="false" sqlMaxLimit="100">
<table name="t_user" dataNode="dn1,dn2" rule="mod-long" />
</schema>
<!--配置分片關係-->
<dataNode name="dn1" dataHost="cluster1" database="test" />
<dataNode name="dn2" dataHost="cluster2" database="test" />
<!--配置連接信息-->
<dataHost name="cluster1" maxCon="1000" minCon="10" balance="2"
writeType="1" dbType="mysql" dbDriver="native" switchType="1"
slaveThreshold="100">
<heartbeat>select user()</heartbeat>
<writeHost host="W1" url="192.168.99.151:3306" user="admin"
password="Abc_123456">
<readHost host="W1R1" url="192.168.99.159:3306" user="admin"
password="Abc_123456" />
<readHost host="W1R2" url="192.168.99.215:3306" user="admin"
password="Abc_123456" />
</writeHost>
<writeHost host="W2" url="192.168.99.159:3306" user="admin"
password="Abc_123456">
<readHost host="W2R1" url="192.168.99.151:3306" user="admin"
password="Abc_123456" />
<readHost host="W2R2" url="192.168.99.215:3306" user="admin"
password="Abc_123456" />
</writeHost>
</dataHost>
<dataHost name="cluster2" maxCon="1000" minCon="10" balance="2"
writeType="1" dbType="mysql" dbDriver="native" switchType="1"
slaveThreshold="100">
<heartbeat>select user()</heartbeat>
<writeHost host="W1" url="192.168.99.121:3306" user="admin"
password="Abc_123456">
<readHost host="W1R1" url="192.168.99.122:3306" user="admin"
password="Abc_123456" />
<readHost host="W1R2" url="192.168.99.123:3306" user="admin"
password="Abc_123456" />
</writeHost>
<writeHost host="W2" url="192.168.99.122:3306" user="admin"
password="Abc_123456">
<readHost host="W2R1" url="192.168.99.121:3306" user="admin"
password="Abc_123456" />
<readHost host="W2R2" url="192.168.99.123:3306" user="admin"
password="Abc_123456" />
</writeHost>
</dataHost>
</mycat:schema>
9. 修改rule.xml文件,把mod-long的count值修改成2
<function name="mod-long" class="io.mycat.route.function.PartitionByMod">
<property name="count">2</property>
</function>
10. 重啓MyCat
11. 向t_user表寫入數據,感受數據的切分
USE test;
#第一條記錄被切分到第二個分片
INSERT INTO t_user(id,username,password,tel,locked) VALUES(1,"A",HEX(AES_ENCRYPT('123456','HelloWorld')));
#第二條記錄被切分到第一個分片
INSERT INTO t_user(id,username,password,tel,locked) VALUES(2,"B",HEX(AES_ENCRYPT('123456','HelloWorld')));
4. 配置父子表
1. 在conf目錄下創建`customer-hash-int`文件,內容如下:
101=0
102=0
103=0
104=1
105=1
106=1
2. 在rule.xml文件中加入自定義<function>和<tableRule>
<function name="customer-hash-int"
class="io.mycat.route.function.PartitionByFileMap">
<property name="mapFile">customer-hash-int.txt</property>
</function>
<tableRule name="sharding-customer">
<rule>
<columns>sharding_id</columns>
<algorithm>customer-hash-int</algorithm>
</rule>
</tableRule>
3. 修改schema.xml文件,添加父子表定義
<table name="t_customer" dataNode="dn1,dn2" rule="sharding-customer">
<childTable name="t_orders" primaryKey="ID" joinKey="customer_id"
parentKey="id"/>
</table>
4. 在MyCat上執行如下SQL:
USE test;
CREATE TABLE t_customer(
id INT UNSIGNED PRIMARY KEY,
username VARCHAR(200) NOT NULL,
sharding_id INT NOT NULL
);
CREATE TABLE t_orders(
id INT UNSIGNED PRIMARY KEY,
customer_id INT NOT NULL,
datetime TIMESTAMP DEFAULT CURRENT_TIMSTAMP
);
5. 向t_customer表和t_orders表寫入數據,查看字表數據跟隨父表切分到同一個分片
5. 創建雙機熱備的MyCat集羣
用兩個虛擬機實例,各自部署MyCat
用一個虛擬機實例部署Haproxy
安裝Haproxy
yum install -y haproxy
* 編輯配置文件
vi /etc/haproxy/haproxy.cfg
```
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
listen admin_stats
bind 0.0.0.0:4001
mode http
stats uri /dbs
stats realm Global\ statistics
stats auth admin:abc123456
listen proxy-mysql
bind 0.0.0.0:3306
mode tcp
balance roundrobin
option tcplog #日誌格式
server mycat_1 192.168.99.131:3306 check port 8066 maxconn 2000
server mycat_2 192.168.99.132:3306 check port 8066 maxconn 2000
option tcpka #使用keepalive檢測死鏈
啓動Haproxy
service haproxy start
訪問Haproxy監控畫面
http://192.168.99.131:4001/dbs
3. 用另外一個虛擬機同樣按照上述操作安裝Haproxy
4. 在某個Haproxy虛擬機實例上部署Keepalived
開啓防火牆的VRRP協議
#開啓VRRP
firewall-cmd --direct --permanent --add-rule ipv4 filter INPUT 0 --protocol vrrp -j ACCEPT
#應用設置
firewall-cmd --reload
安裝Keepalived
yum install -y keepalived
編輯配置文件
vim /etc/keepalived/keepalived.conf
```
```
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
192.168.99.133
}
}
啓動Keepalived
service keepalived start
ping 192.168.99.133
5. 在另外一個Haproxy虛擬機上,按照上述方法部署Keepalived
6. 使用MySQL客戶端連接192.168.99.133,執行增刪改查數據
四、Sysbench基準測試
1. 安裝Sysbench
在線安裝
curl -s https://packagecloud.io/install/
repositories/akopytov/sysbench/script.rpm.sh | sudo bash
yum -y install sysbench
本地安裝
下載壓縮文件
https://codeload.github.com/akopytov/sysbench/zip/1.0
安裝依賴包
yum install -y automake libtool
yum install -y mysql-devel
執行安裝
#cd sysbench
./autogen.sh
./configure
make
make install
sysbench --version
2. 執行測試
準備測試庫
sysbench /usr/share/sysbench/tests/include/oltp_legacy/oltp.lua --mysql-host=192.168.99.131 --mysql-port=3306 --mysql-user=admin --mysql-password=Abc_123456 --oltp-tables-count=10 --oltp-table-size=100000 prepare
執行測試
sysbench /usr/share/sysbench/tests/include/oltp_legacy/oltp.lua --mysql-host=192.168.99.131 --mysql-port=3306 --mysql-user=admin --mysql-password=Abc_123456 --oltp-test-mode=complex --threads=10 --time=300 --report-interval=10 run >> /home/mysysbench.log
清理數據
sysbench /usr/share/sysbench/tests/include/oltp_legacy/oltp.lua --mysql-host=192.168.99.131 --mysql-port=3306 --mysql-user=admin --mysql-password=Abc_123456 --oltp-tables-count=10 cleanup
五、tpcc-mysql 壓力測試
1. 準備工作
修改my.cnf配置文件
vi /etc/my.cnf
pxc_strict_mode=DISABLED
修改某個Haproxy的配置文件
server mysql_1 192.168.99.151:3306 check port 3306 weight 1 maxconn 2000
server mysql_2 192.168.99.159:3306 check port 3306 weight 1 maxconn 2000
server mysql_3 192.168.99.215:3306 check port 3306 weight 1 maxconn 2000
重新啓動Haproxy
安裝依賴程序包
yum install -y gcc
yum install -y mysql-devel
2. 安裝tpcc-mysql
下載壓縮包
https://codeload.github.com/Percona-Lab/tpcc-mysql/zip/master
執行安裝
#cd tpcc的src目錄
make
執行`create_table.sql`和`add_fkey_idx.sql`兩個文件
執行數據初始化
./tpcc_load -h 192.168.99.131 -d tpcc -u admin -p Abc_123456 -w
執行壓力測試
./tpcc_start -h 192.168.99.131 -d tpcc -u admin -p Abc_123456 -w 1 -c 5 -r 300 -l 600 ->tpcc-output-log
六、導入數據
1. 生成1000萬條數據
import java.io.FileWriter
import java.io.BufferedWriter
class Test {
def static void main(String[] args) {
var writer=new FileWriter("D:/data.txt")
var buff=new BufferedWriter(writer)
for(i:1..10000000){
buff.write(i+",測試數據\n")
}
buff.close
writer.close
}
}
2. 執行文件切分
上傳data.txt文件到linux
執行文件切分
split -l 1000000 -d data.txt
3. 準備數據庫
每個PXC分片只開啓一個節點
修改PXC節點文件,然後重啓PXC服務
innodb_flush_log_at_trx_commit = 0
innodb_flush_method = O_DIRECT
innodb_buffer_pool_size = 200M
創建t_test數據表
CREATE TABLE t_test(
id INT UNSIGNED PRIMARY KEY,
name VARCHAR(200) NOT NULL
);
配置MyCat
<table name="t_test" dataNode="dn1,dn2" rule="mod-long" />
```
```xml
<dataHost name="cluster1" maxCon="1000" minCon="10" balance="0" writeType="1"
dbType="mysql" dbDriver="native" switchType="1" slaveThreshold="100">
<heartbeat>select user()</heartbeat>
<writeHost host="W1" url="192.168.99.151:3306" user="admin"
password="Abc_123456"/>
</dataHost>
<dataHost name="cluster2" maxCon="1000" minCon="10" balance="0" writeType="1"
dbType="mysql" dbDriver="native" switchType="1" slaveThreshold="100">
<heartbeat>select user()</heartbeat>
<writeHost host="W1" url="192.168.99.121:3306" user="admin"
password="Abc_123456"/>
</dataHost>
4. 執行Java程序,多線程導入數據
import org.eclipse.xtend.lib.annotations.Accessors
import java.io.File
import java.sql.DriverManager
class Task implements Runnable{
@Accessors
File file;
override run() {
var url="jdbc:mysql://192.168.99.131:8066/test"
var username="admin"
var password="Abc_123456"
var con=DriverManager.getConnection(url,username,password)
var sql='''
load data local intfile '/home/data/«file.name»' ignore into table t_test
character set 'utf8'
fields terminated by ',' optionally enclosed by '\"'
lines terminated by '\n' (id,name);
'''
var pst=con.prepareStatement(sql);
pst.execute
con.close
LoadData.updateNum();
}
}
import com.mysql.jdbc.Driver
import java.sql.DriverManager
import java.util.concurrent.LinkedBlockingQueue
import java.util.concurrent.ThreadPoolExecutor
import java.util.concurrent.TimeUnit
import java.io.File
class LoadData {
var static int num=0;
var static int end=0;
var static pool=new ThreadPoolExecutor(1,5,60,TimeUnit.SECONDS,new LinkedBlockingQueue(200))
def static void main(String[] args) {
DriverManager.registerDriver(new Driver)
var folder=new File("/home/data")
var files=folder.listFiles
end=files.length //線程池結束條件
files.forEach[one|
var task=new Task();
task.file=one;
pool.execute(task)
]
}
synchronized def static updateNum(){
num++;
if(num==end){
pool.shutdown();
println("執行結束")
}
}
}
七、大數據歸檔
1. 安裝TokuDB
安裝jemlloc
yum install -y jemalloc
編輯配置文件
vi /etc/my.cnf
……
[mysqld_safe]
malloc-lib=/usr/lib64/libjemalloc.so.1
……
重啓MySQL
開啓Linux大頁內存
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
安裝TokuDB
yum install -y Percona-Server-tokudb-57.x86_64
ps-admin --enable -uroot -p
service mysql restart
ps-admin --enable -uroot -p
查看安裝結果
show engines ;
2. 配置Replication集羣
在兩個TokuDB數據庫上創建用戶
CREATE USER 'backup'@'%' IDENTIFIED BY 'Abc_123456' ;
GRANT super, reload, replication slave ON *.* TO 'backup'@'%' ;
FLUSH PRIVILEGES ;
修改兩個TokuDB的配置文件,如下:
[mysqld]
server_id = 101
log_bin = mysql_bin
relay_log = relay_bin
[mysqld]
server_id = 102
log_bin = mysql_bin
relay_log = relay_bin
* 重新啓動兩個TokuDB節點
* 分別在兩個TokuDB上執行下面4句SQL
```mysql
#關閉同步服務
stop slave;
#設置同步的Master節點
change master to master_host="192.168.99.155",master_port=3306,master_user="backup",
master_password="Abc_123456";
#啓動同步服務
start slave;
#查看同步狀態
show slave status;
#關閉同步服務
stop slave;
#設置同步的Master節點
change master to master_host="192.168.99.102",master_port=3306,master_user="backup",
master_password="Abc_123456";
#啓動同步服務
start slave;
#查看同步狀態
show slave status;
3. 創建歸檔表
CREATE TABLE t_purchase (
id INT UNSIGNED PRIMARY KEY,
purchase_price DECIMAL(10,2) NOT NULL,
purchase_num INT UNSIGNED NOT NULL,
purchase_sum DECIMAL (10,2) NOT NULL,
purchase_buyer INT UNSIGNED NOT NULL,
purchase_date TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
company_id INT UNSIGNED NOT NULL,
goods_id INT UNSIGNED NOT NULL,
KEY idx_company_id(company_id),
KEY idx_goods_id(goods_id)
)engine=TokuDB;
4. 配置Haproxy+Keepalived雙機熱備
在兩個節點上安裝Haproxy
yum install -y haproxy
修改配置文件
vi /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
listen admin_stats
bind 0.0.0.0:4001
mode http
stats uri /dbs
stats realm Global\ statistics
stats auth admin:abc123456
listen proxy-mysql
bind 0.0.0.0:4002
mode tcp
balance roundrobin
option tcplog #日誌格式
server backup_1 192.168.99.102:3306 check port 3306 maxconn 2000
server backup_2 192.168.99.155:3306 check port 3306 maxconn 2000
option tcpka #使用keepalive檢測死鏈
重啓Haproxy
開啓防火牆的VRRP協議
firewall-cmd --direct --permanent --add-rule ipv4 filter INPUT 0 --protocol vrrp -j ACCEPT
firewall-cmd --reload
在兩個節點上安裝Keepalived
yum install -y keepalived
編輯Keepalived配置文件
vim /etc/keepalived/keepalived.conf
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
192.168.99.211
}
}
重啓Keepalived
5. 準備歸檔數據
在兩個PXC分片上創建進貨表
CREATE TABLE t_purchase (
id INT UNSIGNED PRIMARY KEY,
purchase_price DECIMAL(10,2) NOT NULL,
purchase_num INT UNSIGNED NOT NULL,
purchase_sum DECIMAL (10,2) NOT NULL,
purchase_buyer INT UNSIGNED NOT NULL,
purchase_date TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
company_id INT UNSIGNED NOT NULL,
goods_id INT UNSIGNED NOT NULL,
KEY idx_company_id(company_id),
KEY idx_goods_id(goods_id)
)
* 配置MyCat的schema.xml文件,並重啓MyCat
<table name="t_purchase" dataNode="dn1,dn2" rule="mod-long" />
6. 執行數據歸檔
安裝pt-archiver
yum install percona-toolkit
pt-archiver --version
pt-archiver --help
* 執行數據歸檔
pt-archiver --source h=192.168.99.102,P=8066,u=admin,p=Abc_123456,D=test,t=t_purchase --dest h=192.168.99.102,P=3306,u=admin,p=Abc_123456,D=test,t=t_purchase --no-check-charset --where 'purchase_date<"2018-09"' --progress 5000 --bulk-delete --bulk-insert --limit=10000 --statistics