内容纲要
我们用CTDB实现glusterFS的高可用
实验环境(client node1 node2 node 3 node4)
1.创建一个v2的卷
gluster volume create v2 node1:/data/xx node2:/data/xx
1.安装CTDB SAMBA(每个节点都要做)
yum install ctdb* -y
yum install samba samba-client.x86_64 samba-common -y
2.编辑/var/lib/glusterd/hooks/1/start/post/S29CTDBsetup.sh 把META改成v2 (每个节点都要做)
3.编辑/var/lib/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh 把META改成v2 (每个节点都要做)
4.修改/etc/samba/smb.conf (每个节点都要做)
在[global]后添加clustering=yes
5.设置v2卷的一些属性值(在任意节点上做都可以)
[root@node1 /]# gluster volume set v2 stat-prefetch off
[root@node1 /]# gluster volume set v2 server.allow-insecure on
[root@node1 /]# gluster volume set v2 storage.batch-fsync-delay-usec 0
6.开启v2卷
gluster volume start v2
7.到/etc/ctdb/创建文件nodes文件,把node的ip都写上去
touch nodes
vim nodes
192.168.122.101
192.168.122.102
192.168.122.103
192.168.122.104
8.找一个浮动ip,写入到public_addresses
echo "192.168.122.220/24 eth0" >> public_addresses
9.开启ctdb服务
systemctl enable ctdb;systemctl start ctdb
systemctl start smb.service;systemctl enable smb.service
10.查看ctdb
ctdb -v ip
[root@node1 log]# ctdb -v ip
Public IPs on node 0
192.168.122.220 node[0] active[eth0] available[eth0] configured[eth0]
11.查看192.168.122.220在哪台节点上运行
[root@node1 log]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:ec:2b:33 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.101/24 brd 192.168.122.255 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.122.220/24 brd 192.168.122.255 scope global secondary eth0
valid_lft forever preferred_lft forever
inet6 fe80::5054:ff:feec:2b33/64 scope link
valid_lft forever preferred_lft forever
在node1上运行
12.客户端挂载v2
[root@client /]# mount 192.168.122.220:/v2 /v2
[root@client /]# df -hT
文件系统 类型 容量 已用 可用 已用% 挂载点
/dev/vda1 xfs 20G 1.3G 19G 7% /
devtmpfs devtmpfs 488M 0 488M 0% /dev
tmpfs tmpfs 497M 0 497M 0% /dev/shm
tmpfs tmpfs 497M 13M 484M 3% /run
tmpfs tmpfs 497M 0 497M 0% /sys/fs/cgroup
tmpfs tmpfs 100M 0 100M 0% /run/user/0
192.168.122.220:/v2 nfs 2.0G 33M 2.0G 2% /v2
13.我们现在测试关闭node1节点,看看他会不会跳到其他节点上。
[root@node1 log]# poweroff
PolicyKit daemon disconnected from the bus.
We are no longer a registered authentication agent.
Connection to node1 closed by remote host.
Connection to node1 closed.
[root@node2 /]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 52:54:00:72:0d:77 brd ff:ff:ff:ff:ff:ff
inet 192.168.122.102/24 brd 192.168.122.255 scope global eth0
valid_lft forever preferred_lft forever
inet 192.168.122.220/24 brd 192.168.122.255 scope global secondary eth0
valid_lft forever preferred_lft forever
inet6 fe80::5054:ff:fe72:d77/64 scope link
valid_lft forever preferred_lft forever
OK,他现在跳到了node2上。