Ok, here is entire file -
[root@memverge anton]# cat /etc/drbd.d/ha-nfs.res
resource ha-nfs {
options {
auto-promote no;
}
handlers {
fence-peer “/usr/lib/drbd/crm-fence-peer.9.sh”;
after-resync-target “/usr/lib/drbd/crm-unfence-peer.9.sh”;
}
disk {
c-plan-ahead 0;
resync-rate 32M;
al-extents 6007;
}
volume 29 {
device /dev/drbd1;
disk /dev/block_nfs_vg/ha_nfs_internal_lv;
meta-disk internal;
}
volume 30 {
device /dev/drbd2;
disk /dev/block_nfs_vg/ha_nfs_exports_lv;
meta-disk internal;
}
volume 31 {
device /dev/drbd3;
disk /dev/block_nfs_vg/ha_block_exports_lv;
meta-disk internal;
}
on memverge {
address 10.72.14.152:7900;
node-id 27;
}
on memverge2 {
address 10.72.14.154:7900;
node-id 28;
}
connection-mesh {
hosts memverge memverge2;
}
net
{
transport tcp;
protocol C;
sndbuf-size 10M;
rcvbuf-size 10M;
max-buffers 80K;
max-epoch-size 20000;
timeout 90;
ping-timeout 10;
ping-int 15;
connect-int 15;
fencing resource-and-stonith;
}
connection
{
path
{
host memverge address 192.168.0.6:7900;
host memverge2 address 192.168.0.8:7900;
}
path
{
host memverge address 1.1.1.6:7900;
host memverge2 address 1.1.1.8:7900;
}
net
{
transport tcp;
protocol C;
sndbuf-size 10M;
rcvbuf-size 10M;
max-buffers 80K;
max-epoch-size 20000;
timeout 90;
ping-timeout 10;
ping-int 15;
connect-int 15;
fencing resource-and-stonith;
}
}
}
[root@memverge anton]#
[root@memverge anton]# modinfo -d drbd
drbd - Distributed Replicated Block Device v9.2.12
[root@memverge anton]#
[root@memverge anton]# drbdsetup show
resource “ha-nfs” {
options {
auto-promote no;
}
_this_host {
node-id 27;
volume 29 {
device minor 1;
disk “/dev/block_nfs_vg/ha_nfs_internal_lv”;
meta-disk internal;
disk {
al-extents 6007;
}
}
volume 30 {
device minor 2;
disk “/dev/block_nfs_vg/ha_nfs_exports_lv”;
meta-disk internal;
disk {
al-extents 6007;
}
}
volume 31 {
device minor 3;
}
}
connection {
_peer_node_id 28;
path {
_this_host ipv4 192.168.0.6:7900;
_remote_host ipv4 192.168.0.8:7900;
}
path {
_this_host ipv4 1.1.1.6:7900;
_remote_host ipv4 1.1.1.8:7900;
}
net {
transport “tcp”;
timeout 90; # 1/10 seconds
max-epoch-size 20000;
connect-int 15; # seconds
ping-int 15; # seconds
sndbuf-size 10485760; # bytes
rcvbuf-size 10485760; # bytes
ping-timeout 10; # 1/10 seconds
fencing resource-and-stonith;
max-buffers 81920;
_name “memverge2”;
}
volume 29 {
disk {
resync-rate 32768k; # bytes/second
c-plan-ahead 0; # 1/10 seconds
}
}
volume 30 {
disk {
resync-rate 32768k; # bytes/second
c-plan-ahead 0; # 1/10 seconds
}
}
volume 31 {
disk {
resync-rate 32768k; # bytes/second
c-plan-ahead 0; # 1/10 seconds
}
}
}
}
[root@memverge anton]#
Also, are you getting any messages in your system journal from drbd when you attempt this drbdadm adjust
?
[root@memverge anton]# drbdadm adjust all
No valid meta data found
[root@memverge anton]# dmesg
[ 1040.903634] drbd ha-nfs/31 drbd3 memverge2: pdsk( DUnknown → Diskless ) repl( Off → Established ) [peer-state]
[root@memverge anton]#
[root@memverge anton]# drbdadm status
ha-nfs role:Secondary
volume:29 disk:UpToDate
volume:30 disk:UpToDate
volume:31 disk:Diskless
memverge2 role:Secondary
volume:29 peer-disk:UpToDate
volume:30 peer-disk:UpToDate
volume:31 peer-disk:Diskless
Now I have started thinking that maybe for iSCSI I have to create a new DRBD resource file, rather than add iSCSI volume to existing NFS DRBD resource config ?, but I would prefer to still stay on a single DRBD resource config for both NFS and iSCSI volume.
What do you think ?, what are the best practices if I want to have NFS and iSCSI ?