Setup pcs cluster lab on kvm libvirt virtualization
~~~~~ Setup pcs cluster lab on kvm libvirt ~~~~~~~~~
----------------------------------------------------
host - kvm.mylab.local
guest- hanode1.mylab.local | 192.168.122.83
guest- hanode2.mylab.local | 192.168.122.84
~~~~~~~~~~~~~On KVM Host~~~~~~~~~~~~~
yum install fence-virt fence-virtd fence-virtd-libvirt fence-virtd-multicast fence-virtd-serial
mkdir /etc/cluster
dd if=/dev/urandom of=/etc/cluster/fence_xvm.key bs=4k count=1
-- hit enter for all in above
firewall-cmd --permanent --add-service=fence_virt
firewall-cmd --permanent --add-port=1229/udp
firewall-cmd --permanent --add-port=1229/tcp
firewall-cmd --reload
systemctl start fence_virtd.service
systemctl enable fence_virtd.service
systemctl status fence_virtd.service
fence_virtd -c
ssh [email protected] mkdir /etc/cluster
ssh [email protected] mkdir /etc/cluster
scp /etc/cluster/fence_xvm.key [email protected]:/etc/cluster/
scp /etc/cluster/fence_xvm.key [email protected]:/etc/cluster/
fence_xvm -o list
~~~~~~~~~~~on both nodes ~~~~~~~~~~~~~~~
yum install pcs pacemaker corosync fence-agents-virsh fence-virt \
pacemaker-remote fence-agents-all lvm2-cluster resource-agents \
psmisc policycoreutils-python gfs2-utils -y
echo "centos" | passwd hacluster --stdin
systemctl start pcsd.service; systemctl enable pcsd.service
firewall-cmd --permanent --add-service=high-availability
firewall-cmd --permanent --add-port=1229/tcp
firewall-cmd --reload
fence_xvm -o off -H hanode2 ## to check fencing
----Only if selinux was enabled during pcs setup
ls -Z /etc/cluster/fence_xvm.key
selinux_restorecon /etc/cluster/fence_xvm.key
~~~~~~~~~~~~~on any one of node~~~~~~~~~~~~
pcs cluster auth hanode1.mylab.local hanode2.mylab.local -u hacluster -p centos
pcs cluster setup --start --name webcluster hanode1.mylab.local hanode2.mylab.local
pcs cluster enable --all
pcs status
crm_mon -r1
pcs cluster status
-- setup fencing part
pcs stonith create fencedev1 fence_xvm pcmk_host_map="hanode1.mylab.local:hanode1 hanode2.mylab.local:hanode2" key_file=/etc/cluster/fence_xvm.key
pcs stonith create fencedev2 fence_xvm pcmk_host_map="hanode1.mylab.local:hanode1 hanode2.mylab.local:hanode2" key_file=/etc/cluster/fence_xvm.key
pcs constraint location fencedev1 prefers hanode1.mylab.local
pcs constraint location fencedev2 prefers hanode2.mylab.local
pcs constraint list
pcs stonith
pcs status
--to check
pcs stonith fence hanode2
pcs stonith show --full
pcs property --all | grep -i stonith
----------------------------------------------------
host - kvm.mylab.local
guest- hanode1.mylab.local | 192.168.122.83
guest- hanode2.mylab.local | 192.168.122.84
~~~~~~~~~~~~~On KVM Host~~~~~~~~~~~~~
yum install fence-virt fence-virtd fence-virtd-libvirt fence-virtd-multicast fence-virtd-serial
mkdir /etc/cluster
dd if=/dev/urandom of=/etc/cluster/fence_xvm.key bs=4k count=1
-- hit enter for all in above
firewall-cmd --permanent --add-service=fence_virt
firewall-cmd --permanent --add-port=1229/udp
firewall-cmd --permanent --add-port=1229/tcp
firewall-cmd --reload
systemctl start fence_virtd.service
systemctl enable fence_virtd.service
systemctl status fence_virtd.service
fence_virtd -c
ssh [email protected] mkdir /etc/cluster
ssh [email protected] mkdir /etc/cluster
scp /etc/cluster/fence_xvm.key [email protected]:/etc/cluster/
scp /etc/cluster/fence_xvm.key [email protected]:/etc/cluster/
fence_xvm -o list
~~~~~~~~~~~on both nodes ~~~~~~~~~~~~~~~
yum install pcs pacemaker corosync fence-agents-virsh fence-virt \
pacemaker-remote fence-agents-all lvm2-cluster resource-agents \
psmisc policycoreutils-python gfs2-utils -y
echo "centos" | passwd hacluster --stdin
systemctl start pcsd.service; systemctl enable pcsd.service
firewall-cmd --permanent --add-service=high-availability
firewall-cmd --permanent --add-port=1229/tcp
firewall-cmd --reload
fence_xvm -o off -H hanode2 ## to check fencing
----Only if selinux was enabled during pcs setup
ls -Z /etc/cluster/fence_xvm.key
selinux_restorecon /etc/cluster/fence_xvm.key
~~~~~~~~~~~~~on any one of node~~~~~~~~~~~~
pcs cluster auth hanode1.mylab.local hanode2.mylab.local -u hacluster -p centos
pcs cluster setup --start --name webcluster hanode1.mylab.local hanode2.mylab.local
pcs cluster enable --all
pcs status
crm_mon -r1
pcs cluster status
-- setup fencing part
pcs stonith create fencedev1 fence_xvm pcmk_host_map="hanode1.mylab.local:hanode1 hanode2.mylab.local:hanode2" key_file=/etc/cluster/fence_xvm.key
pcs stonith create fencedev2 fence_xvm pcmk_host_map="hanode1.mylab.local:hanode1 hanode2.mylab.local:hanode2" key_file=/etc/cluster/fence_xvm.key
pcs constraint location fencedev1 prefers hanode1.mylab.local
pcs constraint location fencedev2 prefers hanode2.mylab.local
pcs constraint list
pcs stonith
pcs status
--to check
pcs stonith fence hanode2
pcs stonith show --full
pcs property --all | grep -i stonith
Comments
Post a Comment