0% found this document useful (0 votes)
6 views7 pages

HA SQL Server With Pacemaker Docs

This document provides a comprehensive guide for setting up a high-availability SQL Server cluster using Pacemaker and configuring it for database mirroring. It includes prerequisites, user and SSH configurations, installation of necessary packages, and detailed SQL commands for creating certificates, availability groups, and managing firewall settings. The instructions also cover the process of ensuring both servers have identical databases and the steps for establishing secure connections between them.

Uploaded by

mattgwaweru
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
6 views7 pages

HA SQL Server With Pacemaker Docs

This document provides a comprehensive guide for setting up a high-availability SQL Server cluster using Pacemaker and configuring it for database mirroring. It includes prerequisites, user and SSH configurations, installation of necessary packages, and detailed SQL commands for creating certificates, availability groups, and managing firewall settings. The instructions also cover the process of ensuring both servers have identical databases and the steps for establishing secure connections between them.

Uploaded by

mattgwaweru
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as TXT, PDF, TXT or read online on Scribd
You are on page 1/ 7

Prerequisites

you will need ssms or azure data studio


Have sql server installed
on both servers in moberxterm run the following

useradd <username>
passwd <username>

sudo vi /etc/sudoers # under root add your user i.e

root ALL=(ALL) ALL


<username> ALL=(ALL:ALL) ALL

#save the file

#edit sshd file and permitt the following

sudo vi /etc/ssh/sshd_config

PermitRootLogin yes
PasswordAuthentication yes # if there is any '#' infront of these delete it.

#save the file and

sudo systemctl restart sshd

# duplicate the linux tab and login with your username and password.

#execute the following using your user account in both servers...

hostname ## copy the output and save it for the next step

sudo vi /etc/hosts

<ip-address-server1> <hostname_server1> node1


<ip-address-server2> <hostname_server2> node2

#save the file

ssh-keygen #leave everything as default by pressing enter until you exit code
ssh-copy-id -i /home/bob/.ssh/id_rsa 130.61.34.94 # copy the private key to server2

ssh node2 # make sure you can login to server2 without any passwords
ssh node1 #from server2

# login as root on both servers and execute the following;

subscription-manager repos --enable=rhel-8-for-x86_64-highavailability-rpms

yum install pcs pacemaker fence-agents-all

firewall-cmd --permanent --add-service=high-availability


firewall-cmd --add-service=high-availability

passwd hacluster #you must set a password on each node for the user ID
hacluster,which is the pcs administration account. It is recommended that the
password for user hacluster be the same on each server.
systemctl start pcsd.service
systemctl enable pcsd.service

yum install pcp-zeroconf

pcs host auth node1 node2

pcs cluster setup --force my_cluster node1 node2

pcs cluster start

systemctl enable pacemaker.service

systemctl enable corosync.service

pcs status #corosync, pacemaker and pcsd should all be active and enabled.

pcs stonith create myapc fence_apc_snmp ipaddr="130.61.170.154;130.61.34.94"


pcmk_host_map="mssql01:1;mssql02:2" login="apc" passwd="sqlpass123!" #configure
fencing

pcs stonith config myapc

yum install -y isci-initiator-utils lvm2

dnf -y install iscsi-initiator-utils

yum install fence-virt fence-virtd-libvirt

firewall-cmd --permanent --add-port=3260/tcp

firewall-cmd --reload

mkdir -p /etc/cluster

dd if=/dev/urandom of=/etc/cluster/fence_xvm.key bs=4k count=1 #on server1

scp /etc/cluster/fence_xvm.key mssql02:/etc/cluster/ #copy the key to server2

dnf install fence-virtd-serial

fence_virtd -c # accept all defaults by pressing yes or enter whenever neccessary

# on server1 do the following

pcs stonith create fence-node2 fence_xvm pcmk_host_list=node2

pcs stonith create fence-node1 fence_xvm pcmk_host_list=node1

# run the following on both servers


pcs -f stonith_cfg property

pcs -f stonith_cfg property set stonith-enabled=true

systemctl enable fence_virtd


systemctl status fence_virtd.service

pcs stonith status ### it should be in a stopped state

firewall-cmd --add-port=1229/udp --permanent --zone=public

firewall-cmd --add-port=1229/tcp --permanent --zone=public

firewall-cmd --reload

firewall-cmd --list-all --zone=public

yum -y install http*

firewall-cmd --permanent --add-service=http

firewall-cmd --reload

firewall-cmd --list-all

cd /var/www/html/

vi index.html ## write any info you want e.g welcome to node1

systemctl restart libvirtd.service

systemctl status libvirtd.service

https://130.61.170.154:2224 # write in you browser, the ip address of one of your


servers, login as hacluster and add existing node by giving your node
name...refresh the nodes so that the fencing is active

sudo /opt/mssql/bin/mssql-conf set hadr.hadrenabled 1


sudo systemctl restart mssql-server

sudo yum install openssh-server


sudo mkdir /var/opt/mssql/certs/

proceed to azure data studio and connect to both servers and run the following
codes to create certificates for both servers

FOR SEVER 1.....

CREATE MASTER KEY ENCRYPTION BY PASSWORD = '<StrongPassword!>';


GO
CREATE CERTIFICATE TEST1_Cert
WITH SUBJECT = 'Availability Group certificate for TEST1';
GO
BACKUP CERTIFICATE TEST1_Cert TO FILE = '/var/opt/mssql/data/TEST1_Cert.cer';
GO
CREATE ENDPOINT AvailabilityGroupEndpoint STATE = STARTED AS TCP (
LISTENER_PORT = 5022,
LISTENER_IP = ALL
)
FOR DATABASE_MIRRORING(AUTHENTICATION = CERTIFICATE TEST1_Cert, ROLE = ALL);
GO

FOR SERVER 2

CREATE MASTER KEY ENCRYPTION BY PASSWORD = '<StrongPassword!>';


GO
CREATE CERTIFICATE TEST2_Cert
WITH SUBJECT = 'Availability Group certificate for TEST1';
GO
BACKUP CERTIFICATE TEST2_Cert TO FILE = '/var/opt/mssql/data/TEST2_Cert.cer';
GO
CREATE ENDPOINT AvailabilityGroupEndpoint STATE = STARTED AS TCP (
LISTENER_PORT = 5022,
LISTENER_IP = ALL
)
FOR DATABASE_MIRRORING(AUTHENTICATION = CERTIFICATE TEST2_Cert, ROLE = ALL);
GO

GO BACK TO MOBERXTERM AND RUN THE FOLLOWING CODES ON BOTH SERVERS

sudo su

cd /var/opt/mssql/data/

ll # view if the certificates created exists...

# since we made connections with these servers using cloud-keys, upload those keys
to both servers
and make connection from one server within root using the following code

ssh -i /home/cloud-user/ebu-cloud-instance.key cloud-user@130.61.111.68


##connect to second server

# copy the new connection's certificate to its home directory and change its
ownership

sudo cp /var/opt/mssql/data/TEST1_Cert.cer ~/
sudo chown cloud-user:cloud-user TEST2_Cert.cer
exit ## exit the connection

# now copy that file from its home directory to this server using scp command

#sample untested scp command

scp -i TEST2_Cert.cer /var/opt/mssql/data/ /home/cloud-user/ebu-cloud-instance.key


cloud-user@130.61.111.68:

# MAKE SURE THAT BOTH CERTS APPEAR IN /var/opt/mssql/data/

## do this step from the other server

ssh -i /home/cloud-user/ebu-cloud-instance.key cloud-user@130.61.30.62


sudo cp /var/opt/mssql/data/TEST2_Cert.cer ~/
sudo chown cloud-user:cloud-user TEST1_Cert.cer
exit
# now copy that file from its home directory to this server using scp command
# MAKE SURE THAT BOTH CERTS APPEAR IN /var/opt/mssql/data/

server 1
cp TEST2_Cert.cer /var/opt/mssql/data/
server 2
cp TEST1_Cert.cer /var/opt/mssql/data/

back in data studio make sure you have identical databases in both servers...with
the same data...back them up ( right click on the database.. and select the backup
option ) ... on the second server....put the database in a non-recovery
state....the code is(write in script in azure on second server)

####backup with the recovery file from the first server..(copy the db recovery file
of 1st server to 2nd server)

cp testDB1-2024426-11-54-43.bak /home/cloud-user

sudo chown cloud-user:cloud-user testDB1-2024426-11-54-43.bak


then download
upload to server 2
copy to mssql dir
chown to mssql

cp testDB1-2024426-11-54-43.bak /var/opt/mssql/data/

chown -R mssql:mssql testDB1-2024426-11-54-43.bak

RESTORE DATABASE testDB1 FROM DISK = '/var/opt/mssql/data/testDB1-2024426-11-54-


43.bak' WITH REPLACE,NORECOVERY;

# you can check moberxterm in the /var/opt/mssql/data/ directory for backup file
location...

on source server in azure data studio... run the following

CREATE LOGIN test2_Login WITH PASSWORD = '<StrongPassword>';


CREATE USER test2_User FOR LOGIN test2_Login;
GO

CREATE CERTIFICATE test2_Cert AUTHORIZATION test2_User


FROM FILE = '/var/opt/mssql/data/ test-sql-srv1-ha_Cert.cer';
GO

GRANT CONNECT ON ENDPOINT::AvailabilityGroupEndpoint TO test2_Login;


GO

in the second server....

CREATE LOGIN test1_Login WITH PASSWORD = '<StrongPassword>';


CREATE USER test1_User FOR LOGIN test1_Login;
GO

CREATE CERTIFICATE test1_Cert AUTHORIZATION test1_User


FROM FILE = '/var/opt/mssql/data/ test-sql-srv2-ha_Cert.cer';
GO

GRANT CONNECT ON ENDPOINT::AvailabilityGroupEndpoint TO test1_Login;


GO

...................run the following on the first


server...............................................
NOTE skip this code for the primary server and use the one with pace maker which is
far below ....

CREATE AVAILABILITY GROUP [testDB1]


WITH (CLUSTER_TYPE = NONE)
FOR DATABASE testDB1
REPLICA ON
N'instance-20240426-0858' WITH (
ENDPOINT_URL = N'TCP://130.61.111.68:5022',
FAILOVER_MODE = MANUAL,
AVAILABILITY_MODE = SYNCHRONOUS_COMMIT,
PRIMARY_ROLE(ALLOW_CONNECTIONS = READ_WRITE, READ_ONLY_ROUTING_LIST = (
('instance-2') -- Correct replica name here
)),
SECONDARY_ROLE(ALLOW_CONNECTIONS = ALL, READ_ONLY_ROUTING_URL =
N'TCP://130.61.111.68:1433')
),
N'instance-2' WITH (
ENDPOINT_URL = N'TCP://130.61.30.62:5022',
FAILOVER_MODE = MANUAL,
SEEDING_MODE = AUTOMATIC,
AVAILABILITY_MODE = SYNCHRONOUS_COMMIT,
PRIMARY_ROLE(ALLOW_CONNECTIONS = READ_WRITE, READ_ONLY_ROUTING_LIST = (
('instance-20240426-0858') -- Correct replica name here
)),
SECONDARY_ROLE(ALLOW_CONNECTIONS = ALL, READ_ONLY_ROUTING_URL =
N'TCP://130.61.30.62:1433')
);

-- Add a listener
ALTER AVAILABILITY GROUP [testDB1] ADD LISTENER 'AGListener'
(
WITH IP (('130.61.111.68', '0.0.0.0')), Port = 1433
);

before running this code, go back to bash for the two servers and run the codes
from the online docs...

https://learn.microsoft.com/en-us/sql/linux/sql-server-linux-deploy-pacemaker-
cluster?view=sql-server-ver16&tabs=rhel

##### The code with pacemaker is here ###########


CREATE AVAILABILITY GROUP [testDB1]
WITH (CLUSTER_TYPE = EXTERNAL)
FOR DATABASE testDB1
REPLICA ON
N'instance-20240426-0858' WITH (
ENDPOINT_URL = N'TCP://130.61.111.68:5022',
FAILOVER_MODE = MANUAL,
AVAILABILITY_MODE = SYNCHRONOUS_COMMIT,
PRIMARY_ROLE(ALLOW_CONNECTIONS = READ_WRITE, READ_ONLY_ROUTING_LIST = (
('instance-2') -- Correct replica name here
)),
SECONDARY_ROLE(ALLOW_CONNECTIONS = ALL, READ_ONLY_ROUTING_URL =
N'TCP://130.61.111.68:1433')
),
N'instance-2' WITH (
ENDPOINT_URL = N'TCP://130.61.30.62:5022',
FAILOVER_MODE = MANUAL,
SEEDING_MODE = AUTOMATIC,
AVAILABILITY_MODE = SYNCHRONOUS_COMMIT,
PRIMARY_ROLE(ALLOW_CONNECTIONS = READ_WRITE, READ_ONLY_ROUTING_LIST = (
('instance-20240426-0858') -- Correct replica name here
)),
SECONDARY_ROLE(ALLOW_CONNECTIONS = ALL, READ_ONLY_ROUTING_URL =
N'TCP://130.61.30.62:1433')
);

-- Add Pacemaker cluster configuration


ALTER AVAILABILITY GROUP [testDB1] ADD CLUSTER_TYPE = EXTERNAL;
ALTER AVAILABILITY GROUP [testDB1] ADD CLUSTER_RESOURCE 'pacemaker-resource' (
RESOURCE_TYPE = 'pacemaker',
FAILOVER_MODE = MANUAL,
AVAILABILITY_MODE = SYNCHRONOUS_COMMIT
);

ALTER AVAILABILITY GROUP [testDB1] ADD CLUSTER_NODE 'node1' (


NODE_NAME = 'instance-20240426-0858',
NODE_ADDRESS = '130.61.111.68',
NODE_PORT = 5022
);

ALTER AVAILABILITY GROUP [testDB1] ADD CLUSTER_NODE 'node2' (


NODE_NAME = 'instance-2',
NODE_ADDRESS = '130.61.30.62',
NODE_PORT = 5022
);

ALTER AVAILABILITY GROUP [testDB1] ADD CLUSTER_RELATIONSHIP 'relationship1' (


RELATIONSHIP_TYPE = 'master-slave',
NODE1_NAME = 'node1',
NODE2_NAME = 'node2'
);

########## On the second server..... do this #################


ALTER AVAILABILITY GROUP [TestDB] JOIN WITH (CLUSTER_TYPE = EXTERNAL);
ALTER DATABASE TestDB SET HADR AVAILABILITY GROUP = TestDB;

You might also like