There is 3 node cluster, 2 diskful nodes and 1 diskless as a quorum server.
2 diskful nodes directly connected using Nvidia ConnectX-5 100 gb/s NICs.
3 nodes connected using 1gb/s links.
The idea is to use rdma for diskful nodes replication, all other drbd communications via tcp.
Just don’t know how the drbd resource config file should look like.
Could you please help, here is below how I’m trying to make such setup.
[root@qs drbd.d]# cat ha-ha-ha_nfs.res
resource ha-ha-ha_nfs {
disk {
c-plan-ahead 0;
resync-rate 32M;
al-extents 6007;
}
net
{
load-balance-paths yes;
protocol C;
transport tcp;
sndbuf-size 10M;
rcvbuf-size 10M;
max-buffers 80K;
max-epoch-size 20000;
timeout 90;
ping-timeout 10;
ping-int 15;
connect-int 15;
fencing resource-and-stonith;
}
handlers {
fence-peer “/usr/lib/drbd/crm-fence-peer.9.sh”;
after-resync-target “/usr/lib/drbd/crm-unfence-peer.9.sh”;
}
volume 19 {
device /dev/drbd17;
disk /dev/nfs_vg/ha_nfs_internal_lv;
meta-disk internal;
}
volume 20 {
device /dev/drbd18;
disk /dev/nfs_vg/ha_nfs_exports_lv;
meta-disk internal;
}
on memverge {
address 10.72.14.152:7003;
node-id 21;
}
on memverge2 {
address 10.72.14.154:7003;
node-id 22;
}
on qs {
volume 19 {
disk none;
}
volume 20 {
disk none;
}
address 10.72.14.156:7003;
node-id 23;
}
connection-mesh {
hosts memverge memverge2 qs;
}
connection
{
transport rdma;
path
{
host “memverge” address ipv4 192.168.0.6:7900;
host “memverge2” address ipv4 192.168.0.8:7900;
}
path
{
host “memverge” address ipv4 1.1.1.6:7900;
host “memverge2” address ipv4 1.1.1.8:7900;
}
}
connection
{
transport tcp;
path
{
host “memverge” address ipv4 10.72.14.152:7900;
host “qs” address ipv4 10.72.14.156:7901;
}
path
{
host “memverge2” address ipv4 10.72.14.154:7900;
host “qs” address ipv4 10.72.14.156:7901;
}
}
}
[root@qs drbd.d]#