Hello
Try your test again, but this time when the one node is in standby, write something to the iSCSI device.
Yes, that works.
[root@memverge anton]# drbdadm status
ha-iscsi role:Primary
volume:31 disk:UpToDate
memverge2 role:Secondary
volume:31 peer-disk:UpToDate
ha-nfs role:Primary
volume:29 disk:UpToDate
volume:30 disk:UpToDate
memverge2 role:Secondary
volume:29 peer-disk:UpToDate
volume:30 peer-disk:UpToDate
[root@memverge anton]#
[root@memverge anton]# pcs node standby memverge
[root@memverge anton]#
[root@memverge anton]# drbdadm status
No currently configured DRBD found.
[root@memverge anton]#
[root@memverge anton]# pcs cluster cib cib.txt
[root@memverge anton]# cat cib.txt |grep uname
node id=“27” uname=“memverge”>
node id=“28” uname=“memverge2”>
expression attribute=“#uname” operation=“ne” value=“memverge2” id=“drbd-fence-by-handler-ha-iscsi-expr-28-ha-iscsi-clone”/>
expression attribute=“#uname” operation=“ne” value=“memverge2” id=“drbd-fence-by-handler-ha-nfs-expr-28-ha-nfs-clone”/>
node_state id=“28” uname=“memverge2” in_ccm=“1743401842” crmd=“1743401842” crm-debug-origin=“controld_update_resource_history” join=“member” expected=“member”>
node_state id=“27” uname=“memverge” in_ccm=“1743166191” crmd=“1743166191” crm-debug-origin=“controld_update_resource_history” join=“member” expected=“member”>
[root@memverge anton]#
[root@memverge2 ~]# drbdadm status
ha-iscsi role:Primary
volume:31 disk:UpToDate
memverge connection:Connecting
ha-nfs role:Primary
volume:29 disk:UpToDate
volume:30 disk:UpToDate
memverge connection:Connecting
[root@memverge2 ~]# dd if=/dev/urandom of=/dev/drbd3 bs=4k count=100000
100000+0 records in
100000+0 records out
409600000 bytes (410 MB, 391 MiB) copied, 0.934165 s, 438 MB/s
[root@memverge2 ~]#
[root@memverge anton]# pcs node unstandby memverge
[root@memverge anton]#
Mar 31 09:32:15 memverge kernel: drbd ha-iscsi/31 drbd3 memverge2: uuid_compare()=target-use-bitmap by rule=bitmap-peer
[root@memverge anton]# pcs cluster cib cib.txt
[root@memverge anton]# cat cib.txt |grep uname
node id=“27” uname=“memverge”>
node id=“28” uname=“memverge2”>
node_state id=“28” uname=“memverge2” in_ccm=“1743401842” crmd=“1743401842” crm-debug-origin=“do_state_transition” join=“member” expected=“member”>
node_state id=“27” uname=“memverge” in_ccm=“1743166191” crmd=“1743166191” crm-debug-origin=“controld_update_resource_history” join=“member” expected=“member”>
[root@memverge anton]#
[root@memverge anton]# drbdadm status
ha-iscsi role:Secondary
volume:31 disk:UpToDate
memverge2 role:Primary
volume:31 peer-disk:UpToDate
ha-nfs role:Secondary
volume:29 disk:UpToDate
volume:30 disk:UpToDate
memverge2 role:Primary
volume:29 peer-disk:UpToDate
volume:30 peer-disk:UpToDate
So it may not be bad idea update handlers to trigger after-resync-target for uuid_compare()=“no sync” too.
Anton