Setup distributed replicated highly available nfs server with drbd and pacemaker on centos7
-------- Setup DRBD two node cluster ----------
Importelrepo gpgkey
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
Install elrepo
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
--Task to perform on both node
Install drbd pkg
yum install -y kmod-drbd84 drbd84-utils
enable iptable rules
firewall-cmd --permanent --add-rich-rule='rule family="ipv4" source address="192.168.122.102" port port="7789" protocol="tcp" accept'
firewall-cmd --reload
Take backup of original config file
mv /etc/drbd.d/global_common.conf /etc/drbd.d/global_common.conf.orig
create new file with your configuration
vim /etc/drbd.d/global_common.conf
--place bellow content in file
global {
usage-count yes;
}
common {
net {
protocol C;
sndbuf-size 0;
}
}
disk {
fencing resource-only;
}
handlers {
fence-peer “/usr/lib/drbd/crm-fence-peer.sh”;
after-resync-target “/usr/lib/drbd/crm-unfence-peer.sh”;
}
save file and exit
create drbd disk and device definition file
vi /etc/drbd.d/test.res
--place bellow content
resource test {
on drbd1.mylab.local {
device /dev/drbd0;
disk /dev/vdb1;
meta-disk internal;
address 192.168.1.43:7789;
}
on drbd2.mylab.local {
device /dev/drbd0;
disk /dev/vdb1;
meta-disk internal;
address 192.168.1.44:7789;
}
}
save and exit
where in file /dev/drbd0 is drbd replicated disk. /de/vdb1 is physical attached disk in both server. drbd resource name is given as test you can take your own.
create drbd resource as defined in config file 'test'
drbdadm create-md test
drbdadm up test
check drbd status
drbdadm status test
make any node as master ( task to perform on any one node )
drbdadm primary --force test
drbdadm primary test
make other node as secondary
drbdadm secondary test
check replication status at any time for drbd
cat /proc/drbd
mark drbd daemon to start during system boot
systemctl enable drbd
-------Setup Pacemaker HA cluster----------
yum install pcs fence-agents-all -y
firewall-cmd --permanent --add-service=high-availability
firewall-cmd --add-service=high-availability
passwd hacluster
systemctl start pcsd
systemctl enable pcsd
pcs cluster auth drbd1.mylab.local drbd2.mylab.local
pcs cluster setup --start --name drbdcluster drbd1.mylab.local drbd2.mylab.local
-----bellow setup is not recomended in production
pcs property set stonith-enabled=false
pcs property set no-quorum-policy=ignore
-------
pcs property set default-resource-stickiness="INFINITY"
pcs resource create VirtIP ocf:heartbeat:IPaddr2 ip=192.168.1.45 cidr_netmask=32 op monitor interval=30s
---drbd config
pcs cluster cib drbd_cfg
pcs -f drbd_cfg resource create DrbdData ocf:linbit:drbd drbd_resource=test op monitor interval=60s
pcs -f drbd_cfg resource master DrbdDataClone DrbdData master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
pcs -f drbd_cfg constraint colocation add DrbdDataClone with VirtIP INFINITY
pcs -f drbd_cfg constraint order VirtIP then DrbdDataClone
pcs cluster cib-push drbd_cfg
---drbdfs resource
pcs cluster cib fs_cfg
pcs -f fs_cfg resource create DrbdFS Filesystem device="/dev/drbd0" directory="/data" fstype="xfs"
pcs -f fs_cfg constraint colocation add DrbdFS with DrbdDataClone INFINITY with-rsc-role=Master
pcs -f fs_cfg constraint order promote DrbdDataClone then start DrbdFS
---nfs server resource
pcs -f fs_cfg resource create nfssrv systemd:nfs-server op monitor interval=30s
pcs -f fs_cfg constraint colocation add nfssrv with DrbdFS INFINITY
pcs -f fs_cfg constraint order DrbdFS then nfssrv
pcs cluster cib-push fs_cfg
firewall-cmd --permanent --add-service=nfs
firewall-cmd --permanent --add-service=mountd
firewall-cmd --permanent --add-service=rpc-bind
firewall-cmd --reload
---bellow is essensial in nfs HA environment
systemctl stop nfs-lock && systemctl disable nfs-lock
---- if rhev fencing to be used in production dont set stonith as false and use bellow cmd to create rhev stonith
pcs stonith create rhevfence fence_rhevm stonith-timeout=120 pcmk_host_list="node1 node2" pcmk_host_map="node1:rhev-node1 node2:rhev-node2" ipaddr=10.10.100.200 ssl=1 ssl_insecure=1 [email protected] passwd=xxx port="rhev-node1 rhev-node2" shell_timeout=20 power_wait=25 action=reboot
Importelrepo gpgkey
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
Install elrepo
rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
--Task to perform on both node
Install drbd pkg
yum install -y kmod-drbd84 drbd84-utils
enable iptable rules
firewall-cmd --permanent --add-rich-rule='rule family="ipv4" source address="192.168.122.102" port port="7789" protocol="tcp" accept'
firewall-cmd --reload
Take backup of original config file
mv /etc/drbd.d/global_common.conf /etc/drbd.d/global_common.conf.orig
create new file with your configuration
vim /etc/drbd.d/global_common.conf
--place bellow content in file
global {
usage-count yes;
}
common {
net {
protocol C;
sndbuf-size 0;
}
}
disk {
fencing resource-only;
}
handlers {
fence-peer “/usr/lib/drbd/crm-fence-peer.sh”;
after-resync-target “/usr/lib/drbd/crm-unfence-peer.sh”;
}
save file and exit
create drbd disk and device definition file
vi /etc/drbd.d/test.res
--place bellow content
resource test {
on drbd1.mylab.local {
device /dev/drbd0;
disk /dev/vdb1;
meta-disk internal;
address 192.168.1.43:7789;
}
on drbd2.mylab.local {
device /dev/drbd0;
disk /dev/vdb1;
meta-disk internal;
address 192.168.1.44:7789;
}
}
save and exit
where in file /dev/drbd0 is drbd replicated disk. /de/vdb1 is physical attached disk in both server. drbd resource name is given as test you can take your own.
create drbd resource as defined in config file 'test'
drbdadm create-md test
drbdadm up test
check drbd status
drbdadm status test
make any node as master ( task to perform on any one node )
drbdadm primary --force test
drbdadm primary test
make other node as secondary
drbdadm secondary test
check replication status at any time for drbd
cat /proc/drbd
mark drbd daemon to start during system boot
systemctl enable drbd
-------Setup Pacemaker HA cluster----------
yum install pcs fence-agents-all -y
firewall-cmd --permanent --add-service=high-availability
firewall-cmd --add-service=high-availability
passwd hacluster
systemctl start pcsd
systemctl enable pcsd
pcs cluster auth drbd1.mylab.local drbd2.mylab.local
pcs cluster setup --start --name drbdcluster drbd1.mylab.local drbd2.mylab.local
-----bellow setup is not recomended in production
pcs property set stonith-enabled=false
pcs property set no-quorum-policy=ignore
-------
pcs property set default-resource-stickiness="INFINITY"
pcs resource create VirtIP ocf:heartbeat:IPaddr2 ip=192.168.1.45 cidr_netmask=32 op monitor interval=30s
---drbd config
pcs cluster cib drbd_cfg
pcs -f drbd_cfg resource create DrbdData ocf:linbit:drbd drbd_resource=test op monitor interval=60s
pcs -f drbd_cfg resource master DrbdDataClone DrbdData master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
pcs -f drbd_cfg constraint colocation add DrbdDataClone with VirtIP INFINITY
pcs -f drbd_cfg constraint order VirtIP then DrbdDataClone
pcs cluster cib-push drbd_cfg
---drbdfs resource
pcs cluster cib fs_cfg
pcs -f fs_cfg resource create DrbdFS Filesystem device="/dev/drbd0" directory="/data" fstype="xfs"
pcs -f fs_cfg constraint colocation add DrbdFS with DrbdDataClone INFINITY with-rsc-role=Master
pcs -f fs_cfg constraint order promote DrbdDataClone then start DrbdFS
---nfs server resource
pcs -f fs_cfg resource create nfssrv systemd:nfs-server op monitor interval=30s
pcs -f fs_cfg constraint colocation add nfssrv with DrbdFS INFINITY
pcs -f fs_cfg constraint order DrbdFS then nfssrv
pcs cluster cib-push fs_cfg
firewall-cmd --permanent --add-service=nfs
firewall-cmd --permanent --add-service=mountd
firewall-cmd --permanent --add-service=rpc-bind
firewall-cmd --reload
---bellow is essensial in nfs HA environment
systemctl stop nfs-lock && systemctl disable nfs-lock
---- if rhev fencing to be used in production dont set stonith as false and use bellow cmd to create rhev stonith
pcs stonith create rhevfence fence_rhevm stonith-timeout=120 pcmk_host_list="node1 node2" pcmk_host_map="node1:rhev-node1 node2:rhev-node2" ipaddr=10.10.100.200 ssl=1 ssl_insecure=1 [email protected] passwd=xxx port="rhev-node1 rhev-node2" shell_timeout=20 power_wait=25 action=reboot
Comments
Post a Comment