FreeBSD : stockage ZFS haute disponibilité avec HAST

Boris HUISGEN
Objectif : rendre hautement disponible un pool ZFS en le répliquant sur un second serveur grâce à HAST.
Configuration de node1
node1# cat /etc/rc.conf
hostname="node1"
keymap="fr.iso.acc"
zfs_enable="YES"
network_interfaces="lo0 em0 em1"
defaultrouter="192.168.0.254"
ifconfig_em0="inet 192.168.0.101 netmask 255.255.255.0 polling"
ifconfig_em1="inet 192.168.1.101 netmask 255.255.255.0 polling"
sshd_enable="YES"
hastd_enable="YES"
node1# cat /etc/hast.conf
resource disk1 {
    on node1 {
        local /dev/ada2
        remote 192.168.1.102
    }
    on node2 {
        local /dev/ada2
        remote 192.168.1.101
    }
}
resource disk2 {
    on node1 {
        local /dev/ada3
        remote 192.168.1.102
    }
    on node2 {
        local /dev/ada3
        remote 192.168.1.101
    }
}
Configuration de node2
node1# cat /etc/rc.conf
hostname="node2"
keymap="fr.iso.acc"
zfs_enable="YES"
network_interfaces="lo0 em0 em1"
defaultrouter="192.168.0.254"
ifconfig_em0="inet 192.168.0.102 netmask 255.255.255.0 polling"
ifconfig_em1="inet 192.168.1.102 netmask 255.255.255.0 polling"
sshd_enable="YES"
hastd_enable="YES"
node2# scp node1:/etc/hast.conf /etc/
Lancement de HAST
node1# hastctl init disk1
node1# hastctl init disk2
node1# hastctl create disk1
node1# hastctl create disk2
node1# hastctl role primary disk1
node1# hastctl role primary disk2
node2# hastctl init disk1
node2# hastctl init disk2
node2# hastctl create disk1
node2# hastctl create disk2
node2# hastctl role secondary disk1
node2# hastctl role secondary disk2
node1# hastctl status
disk1:
 role: primary
 provslug: disk1
 localpath: /dev/ada2
 extentsize: 2097152 (2.0MB)
 keepdirty: 64
 remoteaddr: 192.168.1.102
 replication: fullsync
 status: degraded
 dirty: 0 (0B)
 statistics:
 reads: 19
 writes: 0
 deletes: 0
 flushes: 0
 activemap updates: 0
disk2:
 role: primary
 provslug: disk2
 localpath: /dev/ada3
 extentsize: 2097152 (2.0MB)
 keepdirty: 64
 remoteaddr: 192.168.1.102
 replication: fullsync
 status: degraded
 dirty: 0 (0B)
 statistics:
 reads: 19
 writes: 0
 deletes: 0
 flushes: 0
 activemap updates: 0
node1# ls -l /dev/hast/
total 0
crw-r-----  1 root  operator    0, 106 Nov  1 17:20 disk1
crw-r-----  1 root  operator    0, 108 Nov  1 17:20 disk2
node1# zpool create data mirror /dev/hast/disk1 /dev/hast/disk2
node1# zpool status data
node1# zpool status data
  pool: data
 state: ONLINE
 scan: none requested
config:
    NAME            STATE     READ WRITE CKSUM
    data            ONLINE       0     0     0
      mirror-0      ONLINE       0     0     0
        hast/disk1  ONLINE       0     0     0
        hast/disk2  ONLINE       0     0     0
errors: No known data errors
Bascule sur node2
node1# zpool export data
node1# hastctl role secondary disk1
node1# hastctl role secondary disk2
node2# hastctl role primary disk1
node2# hastctl role primary disk2
node2# zpool import data