Archives pour octobre, 2012

Xen : sauvegarde d’un VM Windows par LVM + clonage NTFS

root@xen:~# apt-get install ntfsprogs kpartx

Sauvegarde

root@xen:~# lvcreate -s -L1G -nvm_backup /dev/vg/xen_vm
root@xen:~# kpartx -a /dev/vg/xen_vm_backup
root@xen:~# ls -l /dev/mapper/vg-xen_vm_backup*
lrwxrwxrwx 1 root root      7 Oct 24 12:21 /dev/mapper/vg-xen_vm_backup -> ../dm-6
lrwxrwxrwx 1 root root      7 Oct 24 12:22 /dev/mapper/vg-xen_vm_backup1 -> ../dm-9
root@xen:~# ntfsclone -s -o vm-20121024.img /dev/mapper/vg-xen_vm_backup1
root@xen:~# lvdisplay vg/xen_vm > vm-20121024.txt
root@xen:~# kpartx -d /dev/vg/xen_vm_backup
root@xen:~# lvremove vg/xen_vm_backup

Restauration

root@xen:~# kpartx -a /dev/vg/xen_vm
root@xen:~# ntfsclone -r -O /dev/mapper/vg-xen_vm vm-20121024.img
root@xen:~# kpartx -d /dev/vg/xen_vm

Csync2 : synchronisation de fichiers pour cluster

Csync2 is a cluster synchronization tool. It can be used to keep files on multiple hosts in a cluster in sync. Csync2 can handle complex setups with much more than just 2 hosts, handle file deletions and can detect conflicts. It is expedient for HA-clusters, HPC-clusters, COWs and server farms.

Installation

root@node1:~# apt-get install csync2
root@node1:~# openssl genrsa -out csync2_ssl_key.pem 2048
root@node1:~# openssl req -new -x509 -days 365 -key csync2_ssl_key.pem -out csync2_ssl_cert.pem
root@node2:~# apt-get install csync2
root@node2:~# openssl genrsa -out csync2_ssl_key.pem 2048
root@node2:~# openssl req -new -x509 -days 365 -key csync2_ssl_key.pem -out csync2_ssl_cert.pem
root@node3:~# apt-get install csync2
root@node3:~# openssl genrsa -out csync2_ssl_key.pem 2048
root@node3:~# openssl req -new -x509 -days 365 -key csync2_ssl_key.pem -out csync2_ssl_cert.pem

Configuration

root@node1:~# more /etc/csync2.cfg
#nossl * *;

group web
{
	host node1;
	host node2;
	host node3;

	key /etc/csync2_web.key;

	include /etc/csync2.cfg;
	include /etc/nginx/sites-enabled/;
	include %wwwdir%;
	exclude %wwwdir%/.*/awstats/.*;
	exclude %wwwdir%/.*/logs/.*;
	exclude %wwwdir%/.*/tmp/.*;

	action
	{
		pattern /etc/nginx/sites-enabled/*;
		exec "/etc/init.d/nginx reload";
		logfile "/var/log/csync2/action.log";
		do-local;
	}

	#backup-directory /var/backups/csync2;
	#backup-generations 2;

	auto first;
}

prefix wwwdir
{
	on node1: /var/www;
	on node2: /var/www;
	on node3: /home/www;
}
root@node1:~# csync2 -k /etc/csync2_web.key
root@node1:~# scp /etc/csync2.cfg /etc/csync2_web.key node2:/etc node3:/etc

Lancement de la synchronisation

root@node1:~# csync2 -rxv
root@node1:~# csync2 -xv

FreeBSD : gestion des interfaces CARP

Lister les interfaces CARP du groupe principal carp :

# ifconfig -g carp

Assigner une interface CARP au groupe carp :

# ifconfig carp4 group carp

Supprimer une interface CARP du groupe carp :

# ifconfig carp4 -group carp

Debian : configuration iSCSI en mode target et initiator

Installation d’une target

root@target:~# apt-get install iscsitarget iscsitarget-source
root@target:~# module-assistant
root@target:~# cat /etc/iet/ietd.conf
Target iqn.2012-10.domain.my.target:data
    Lun 0 Path=/dev/storage/data,Type=blockio
    Alias data
    IncomingUser test blablablaaa
root@target:~# /etc/init.d/iscsitarget start
root@target:~# cat /proc/net/iet/volume
tid:1 name:iqn.2012-10.domain.my.target:data

Installation d’un initiator

root@init:~# apt-get install open-iscsi
root@init:~# /etc/init.d/open-iscsi start
root@init:~# iscsiadm -m discovery -t sendtargets -p 192.168.1.223
192.168.1.223:3260,1 iqn.2012-10.domain.my.target:data
root@init:~# iscsiadm -m node -T iqn.2012-10.domain.my.target:data -p 192.168.1.223 -o update -n node.session.auth.username -v test
root@init:~# iscsiadm -m node -T iqn.2012-10.domain.my.target:data -p 192.168.1.223 -o update -n node.session.auth.password -v blablablaaa
root@init:~# iscsiadm -m node -T iqn.2012-10.domain.my.target:data -p 192.168.1.223 -l
Logging in to [iface: default, target: iqn.2012-10.domain.my.target:data, portal: 192.168.1.223,3260]
Login to [iface: default, target: iqn.2012-10.domain.my.target:data, portal: 192.168.1.223,3260]: successful
root@init:~# tail -n 10 /var/log/messages
Oct 12 13:58:28 init kernel: [    9.742667] iscsi: registered transport (tcp)
Oct 12 14:03:47 init kernel: [  329.271809] iscsi: registered transport (iser)
Oct 12 14:07:21 init kernel: [  544.033813] scsi3 : iSCSI Initiator over TCP/IP
Oct 12 14:07:23 init kernel: [  545.116336] scsi 3:0:0:0: Direct-Access     IET      VIRTUAL-DISK     0    PQ: 0 ANSI: 4
Oct 12 14:07:23 init kernel: [  545.119073] sd 3:0:0:0: Attached scsi generic sg2 type 0
Oct 12 14:07:23 init kernel: [  545.130552] sd 3:0:0:0: [sdb] 2097152 512-byte logical blocks: (1.07 GB/1.00 GiB)
Oct 12 14:07:23 init kernel: [  545.130985] sd 3:0:0:0: [sdb] Write Protect is off
Oct 12 14:07:23 init kernel: [  545.131804] sd 3:0:0:0: [sdb] Write cache: disabled, read cache: disabled, doesn't support DPO or FUA
Oct 12 14:07:23 init kernel: [  545.134005]  sdb: unknown partition table
Oct 12 14:07:23 init kernel: [  545.145458] sd 3:0:0:0: [sdb] Attached SCSI disk
root@init:~# ls -l /dev/disk/by-path/ | grep sdb
lrwxrwxrwx 1 root root  9 Oct 12 14:07 ip-192.168.1.223:3260-iscsi-iqn.2012-10.domain.my.target:data-lun-0 -> ../../sdb
root@init:~# iscsiadm -m node -T iqn.2012-10.domain.my.target:data -p 192.168.1.223 -o update -n node.conn[0].startup -v automatic
root@init:~# /etc/init.d/open-iscsi
root@target:~# cat /proc/net/iet/session
tid:1 name:iqn.2012-10.domain.my.target:data
sid:281474997486080 initiator:iqn.1993-08.org.debian:01:9cfd28ac883
cid:0 ip:192.168.1.221 state:active hd:none dd:none
root@target:~# fdisk -l
root@target:~# fdisk /dev/sdb
root@target:~# mkfs.ext3 /dev/sdb1
root@target:~# echo "/dev/sdb1 /mnt ext3 defaults,_netdev 0 0" >> /etc/fstab

Firewall Builder : configuration pour un cluster de firewalls PF

Voici un exemple de fichier de règles firewall, généré avec Firewall Builder, dans le cas d’un cluster de deux firewalls PF sous FreeBSD.

Les règles PF employées couvrent quasiment toutes les fonctionnalités possibles :

  • le filtrage d’accès en IPv4 et IPv6
  • la redondance réseau avec CARP + PFSYNC
  • le filtrage des interfaces virtuelles (VLAN, TUN)
  • le filtrage par balisage des paquets (tagging)
  • la limitation des connexions avec blacklistage automatique
  • le load-balancing des connexions sortantes
  • la gestion de la QoS
  • la gestion FTP pour l’accès interne et externe

A noter que je n’ai pas utilisé la gestion cluster de Firewall Builder ; les règles sont donc dupliquées pour chaque noeud.

Fichier de configuration (format v 5.1) : Faille de sécurité ClamAV CVE-2008-5314 (un téléchargement)

DRBD : configuration master/master + cluster filesystem OCFS2

Réplication DRBD

root@node1#:~# apt-get install drbd8-utils
root@node1#:~# cat /etc/drbd.d/global_common.conf
global {
	usage-count no;
}

common {
	protocol C;

	handlers {
		pri-on-incon-degr "/usr/lib/drbd/notify-pri-on-incon-degr.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
		pri-lost-after-sb "/usr/lib/drbd/notify-pri-lost-after-sb.sh; /usr/lib/drbd/notify-emergency-reboot.sh; echo b > /proc/sysrq-trigger ; reboot -f";
		local-io-error "/usr/lib/drbd/notify-io-error.sh; /usr/lib/drbd/notify-emergency-shutdown.sh; echo o > /proc/sysrq-trigger ; halt -f";
	}

	startup {
		wfc-timeout 0;
		degr-wfc-timeout 120;
		become-primary-on both;
	}

	disk {
		on-io-error detach;
	}

	net {
		cram-hmac-alg "sha1";
		shared-secret "blablablaaaaaaaaaaa";
		max-buffers 2048;
		ko-count 4;
		allow-two-primaries;
		after-sb-0pri discard-zero-changes;
		after-sb-1pri discard-secondary;
		after-sb-2pri disconnect;
	}

	syncer {
		rate 40M;
		verify-alg "crc32c";
	}
}
root@node1:~# cat /etc/drbd.d/r0.res
resource r0 {
        on drbd1 {
                device /dev/drbd0;
                disk /dev/vg/data;
                address 192.168.1.211:7788;
                meta-disk internal;
        }

        on drbd2 {
                device /dev/drbd0;
                disk /dev/vg/data;
                address 192.168.1.212:7788;
                meta-disk internal;
        }
}
root@node1:~# drbdadm create-md r0
root@node1:~# /etc/init.d/drbd start
root@node1:~# drbdadm syncer r0
root@node1:~# drbdadm connect r0
root@node2:~# apt-get install drbd8-utils
root@node2:~# scp root@node1:/etc/drbd.d/* /etc/drbd.d/
root@node2:~# drbdadm create-md r0
root@node2:~# /etc/init.d/drbd start
root@node2:~# drbdadm syncer r0
root@node2:~# drbdadm connect r0
root@node1:~# drbdadm -- --overwrite-data-of-peer primary r0
root@node1:~# drbdadm primary r0
root@node1:~# cat /proc/drbd
version: 8.3.7 (api:88/proto:86-91)
srcversion: EE47D8BF18AC166BE219757
0: cs:Connected ro:Primary/Primary ds:UpToDate/UpToDate C r----
ns:4300921 nr:3429 dw:110206 dr:8403592 al:39 bm:260 lo:0 pe:0 ua:0 ap:0 ep:1 wo:b oos:0
root@node2:~# drbdadm primary r0
root@node2:~# cat /proc/drbd
version: 8.3.7 (api:88/proto:86-91)
srcversion: EE47D8BF18AC166BE219757
0: cs:Connected ro:Primary/Primary ds:UpToDate/UpToDate C r----
ns:756 nr:825 dw:1581 dr:3131 al:2 bm:1 lo:0 pe:0 ua:0 ap:0 ep:1 wo:b oos:0

Mise en place du filesystem OCFS2

root@node1:~# apt-get install ocfs2-tools
root@node1:~# cat /etc/ocfs2/cluster.conf
cluster:
    name = ocfs2
    node_count = 2

node:
    number = 1
    name = node1
    ip_address = 192.168.1.211
    ip_port = 7777
    number = 1
    cluster = ocfs2

node:
    number = 2
    name = node2
    ip_address = 192.168.1.212
    ip_port = 7777
    cluster = ocfs2
root@node1:~# /etc/init.d/ocfs2 start
root@node1:~# /etc/init.d/o2cb start
root@node1:~# mkfs.ocfs2 /dev/drbd0
root@node1:~# mount.ocfs2 /dev/drbd0 /mnt/
root@node2:~# apt-get install ocfs2-tools
root@node2:~# scp root@node1:/etc/ocfs2/* /etc/ocfs2/
root@node2:~# /etc/init.d/ocfs2 start
root@node2:~# /etc/init.d/o2cb start
root@node2:~# mount.ocfs2 /dev/drbd0 /mnt/

Nginx : configuration reverse-proxy

root@nginx1:~# cat /etc/nginx/conf.d/proxy.conf
proxy_redirect            off;
proxy_set_header          Host $host;
proxy_set_header          X-Real-IP $remote_addr;
proxy_set_header          X-Forwarded-For $proxy_add_x_forwarded_for;
client_max_body_size      16m;
client_body_buffer_size   128k;
client_header_buffer_size 64k;
proxy_connect_timeout     10;
proxy_send_timeout        60;
proxy_read_timeout        60;
proxy_buffer_size         16k;
proxy_buffers             32 16k;
proxy_busy_buffers_size   64k;
proxy_cache_key           "$scheme://$host$request_uri";
proxy_cache_path          /var/cache/nginx levels=1:2 keys_zone=cache:10m inactive=7d max_size=512m;
root@nginx1:~# cat /etc/nginx/sites-enabled/monsite
server {
   listen      80;
   listen      [::]:80 default ipv6only=on;
   server_name monsite.fr *.monsite.fr;
   access_log  /var/log/nginx/localhost.access.log;

   location / {
      proxy_pass            http://192.168.10.4/;
      proxy_cache           cache;
      proxy_cache_valid     12h;
      proxy_cache_use_stale error timeout invalid_header updating;
   }
}

MySQL : script de sauvegarde par snapshot LVM

# cat mysql_backup.sh
#!/usr/bin/perl -w

my $host = 'localhost';
my $dbname = 'mysql';
my $user = 'backup';
my $passwd = 'kV6qcZUbH0';
my $vg = 'vg';
my $lv = 'sql';
my $size = '1G';
my $backupdir = '/home/backup/mysql';

### SCRIPT ###

use POSIX;
use DBI;

my $snapshot = $lv . "_" . strftime("%Y%m%d_%H%M%S", localtime);
print "Creating snapshot $snapshot...\n";

$dbh = DBI->connect("dbi:mysql:$dbname;host=$host", $user, $passwd)
or die "Connection error: $DBI::errstr\n";

$dbh->do("SET SESSION AUTOCOMMIT=0");
$dbh->do("FLUSH TABLES WITH READ LOCK");

system("lvcreate -s -L$size -n$snapshot $vg/$lv");

$sth = $dbh->prepare("SHOW MASTER STATUS");
$sth->execute();
while ($row = $sth->fetchrow_hashref) {
 foreach $col (keys %{$row}) {
 print "$col: " . $$row{$col}."\n";
 }
}

$dbh->do("UNLOCK TABLES");

system("mkdir -p $backupdir/tmp");
system("mount /dev/mapper/$vg-$snapshot $backupdir/tmp");
system("tar czf $snapshot.tar.gz -C $backupdir/tmp .");
system("umount /dev/mapper/$vg-$snapshot");
system("rm -fr $backupdir/tmp");
system("lvremove -f $vg/$snapshot");

print "Done.\n";

MySQL : script de sauvegarde par snapshot ZFS

# cat mysql_backup.sh
#!/usr/bin/perl -w

my $host = 'localhost';
my $dbname = 'mysql';
my $user = 'backup';
my $passwd = 'WD8Y2YOraM';
my $pool = 'rpool/sql';
my $backupdir = '/home/backup/mysql';

### SCRIPT ###

use POSIX;
use DBI;

my $snapshot = strftime("%Y%m%d_%H%M%S", localtime);
print "Creating snapshot $snapshot...\n";

$dbh = DBI->connect("dbi:mysql:$dbname;host=$host", $user, $passwd)
   or die "Connection error: $DBI::errstr\n";

$dbh->do("SET SESSION AUTOCOMMIT=0");
$dbh->do("FLUSH TABLES WITH READ LOCK");
system("zfs snapshot -r $pool\@$snapshot");
$sth = $dbh->prepare("SHOW MASTER STATUS");
$sth->execute();
while ($row = $sth->fetchrow_hashref) {
 foreach $col (keys %{$row}) {
 print "$col: " . $$row{$col}."\n";
 }
}

$dbh->do("UNLOCK TABLES");

system("zfs send -vR $pool\@$snapshot | gzip > $backup_dir/$snapshot.gz");
system("zfs destroy -r $pool\@$snapshot");

print "Done.\n"

FreeBSD : clean install ZFS avec support des BE

Les environnements d’initialisation – Boot Environment (BE) – est une fonctionnalité backportée de Solaris et liée au portage de ZFS sur FreeBSD. Son intérêt est de créer différentes versions du filesystem racine afin de garantir un démarrage sûr du système suite à une opération de maintenance comme l’installation, la mise à jour ou tout simplement une reconfiguration. Elle s’appuie intégralement sur des snapshots ZFS.

Techniquement, le pool racine ZFS doit respecter une organisation du type : <nom du pool>/ROOT/<nom du BE>. Ainsi pour un pool nommé rpool et le BE default, le fileystem racine est rpool/ROOT/default. La gestion des BE est ensuite effectuée grâce à la commande beadm qui permet de les créer et de sélectionner le BE à utiliser au prochain démarrage.

Cet article vise à présenter l’installation et l’utilisation d’un système supportant les BE.

Installation du système

Toute la procédure s’effectue depuis le CD de FreeBSD en mode live.

# gpart create -s gpt ada0
# gpart add -b 34 -s 94 -t freebsd-boot ada0
# gpart add -s 4G -t freebsd-swap -l swap0 ada0
# gpart add -t freebsd-zfs -l disk0 ada0
# gpart bootcode -b /boot/pmbr -p /boot/gptzfsboot -i 1 ada0
# gpart create -s gpt ada1
# gpart add -b 34 -s 94 -t freebsd-boot ada1
# gpart add -s 4G -t freebsd-swap -l swap1 ada1
# gpart add -t freebsd-zfs -l disk1 ada1
# gpart bootcode -b /boot/pmbr -p /boot/gptzfsboot -i 1 ada1
# kldload geom_mirror.ko
# gmirror label -F -h -b round-robin swap /dev/gpt/swap0 /dev/gpt/swap1
# kldload opensolaris.ko
# kldload zfs.ko
# gnop create -S 4096 /dev/gpt/disk0
# gnop create -S 4096 /dev/gpt/disk1
# zpool create -o altroot=/mnt -o cachefile=/var/tmp/zpool.cache rpool mirror /dev/gpt/disk0.nop /dev/gpt/disk1.nop
# zpool export rpool
# gnop destroy /dev/gpt/disk0.nop
# gnop destroy /dev/gpt/disk1.nop
# zpool import -o altroot=/mnt -o cachefile=/var/tmp/zpool.cache rpool
# zfs set checksum=fletcher4 rpool
# zfs set atime=off rpool
# zfs create rpool/ROOT
# zfs create -o mountpoint=/ rpool/ROOT/default
# zfs set freebsd:boot-environment=1 rpool/ROOT/default
# zpool set bootfs=rpool/ROOT/default rpool
# zfs create -o mountpoint=/usr/local rpool/local
# zfs create -o mountpoint=/var rpool/var
# zfs create -o compression=lzjb -o exec=off -o setuid=off rpool/var/crash
# zfs create -o exec=off -o setuid=off rpool/var/db
# zfs create -o compression=lzjb -o exec=on -o setuid=off rpool/var/db/pkg
# zfs create -o exec=off -o setuid=off rpool/var/empty
# zfs create -o compression=lzjb -o exec=off -o setuid=off rpool/var/log
# zfs create -o compression=gzip -o exec=off -o setuid=off rpool/var/mail
# zfs create -o exec=off -o setuid=off rpool/var/run
# zfs create -o compression=lzjb -o exec=on -o setuid=off rpool/var/tmp
# zfs create -o mountpoint=/tmp -o compression=on -o exec=on -o setuid=off rpool/tmp
# zfs create -o mountpoint=/home rpool/home
# chmod 1777 /mnt/tmp
# chmod 1777 /mnt/var/tmp
# sh
# cd /usr/freebsd-dist
# export DESTDIR=/mnt
# for file in base.txz lib32.txz kernel.txz doc.txz;
> do (cat $file | tar --unlink -xpJf - -C ${DESTDIR:-/}); done
# zfs set readonly=on zroot/var/empty
# cp /var/tmp/zpool.cache /mnt/ROOT/default/boot/zfs/zpool.cache
# ee /mnt/etc/fstab
/dev/mirror/swap none swap sw 0 0
# ee /mnt/boot/loader.conf
geom_mirror_load="YES"
zfs_load="YES"
vfs.root.mountfrom="zfs:rpool/ROOT/default"
# ee /mnt/etc/rc.conf
hostname="freebsd9"
keymap="fr.iso.acc"
zfs_enable="YES"

network_interfaces="lo0 em0"
defaultrouter="192.168.0.1"
ipv6_defaultrouter="fdcb:9921:3552:afd7::1"
ifconfig_em0="inet 192.168.0.2 netmask 255.255.255.0 polling"
ifconfig_em0_ipv6="inet fdcb:9921:3552:afd7::2 prefixlen 64"

clear_tmp_enable="YES"
tcp_drop_synfin="YES"

moused_enable="NO"
sendmail_enable="NONE"
sshd_enable="YES"
syslogd_flags="-ss"
# ee /mnt/etc/resolv.conf
nameserver 192.168.0.254
search my.domain
# echo 'daily_status_gmirror_enable="YES"' >> /mnt/etc/periodic.conf
# echo 'daily_status_zfs_enable="YES"'' >> /mnt/etc/periodic.conf
# echo 'daily_status_smart_devices="/dev/ada0 /dev/ada1"' >> /mnt/etc/periodic.conf
# echo 'daily_status_mail_rejects_enable="NO"' >> /mnt/etc/periodic.conf
# echo 'daily_status_include_submit_mailq="NO"' >> /mnt/etc/periodic.conf
# echo 'daily_submit_queuerun="NO"' >> /mnt/etc/periodic.conf
# echo 'WRKDIRPREFIX=/usr/obj' >> /mnt/etc/make.conf
# chroot /mnt
# passwd
# tzsetup
# cd /etc/mail
# nano aliases
# make aliases
# exit
# zfs umount -a
# zfs set mountpoint=/rpool rpool
# zfs set mountpoint=/rpool/ROOT rpool/ROOT
# zfs set mountpoint=legacy rpool/ROOT/default
# shutdown -r now

Démarrage du système

# zfs list
NAME                 USED  AVAIL  REFER  MOUNTPOINT
rpool                687M  14.7G   152K  /rpool
rpool/ROOT           683M  14.7G   144K  /rpool/ROOT
rpool/ROOT/default   683M  14.7G   683M  legacy
rpool/home           144K  14.7G   144K  /home
rpool/local          144K  14.7G   144K  /usr/local
rpool/tmp            176K  14.7G   176K  /tmp
rpool/var           1.89M  14.7G   564K  /var
rpool/var/crash      148K  14.7G   148K  /var/crash
rpool/var/db         388K  14.7G   244K  /var/db
rpool/var/db/pkg     144K  14.7G   144K  /var/db/pkg
rpool/var/empty      144K  14.7G   144K  /var/empty
rpool/var/log        188K  14.7G   188K  /var/log
rpool/var/mail       144K  14.7G   144K  /var/mail
rpool/var/run        204K  14.7G   204K  /var/run
rpool/var/tmp        152K  14.7G   152K  /var/tmp
# zfs snapshot rpool/ROOT/default@clean

Création d’un nouvel environnement

On commence par installer la commande beadm :

# cd /usr/ports/sysutils/beadm
# make install

On peut dès lors vérifier que le BE default est bien détecté :

# beadm list
BE      Active Mountpoint  Space Created
default NR     /            1.8G 2012-10-08 04:33

La création d’un BE nécessite uniquement de spécifier son nom :

# beadm create upgrade
Created successfully
# beadm list
BE      Active Mountpoint  Space Created
default NR     /            1.8G 2012-04-19 15:05
upgrade -      -          136.0K 2012-10-08 06:25

Il est également possible de cloner un BE existant en spécifiant son nom.

Au niveau du pool ZFS, le BE se matérialise par un nouveau clone :

# zfs list
NAME                 USED  AVAIL  REFER  MOUNTPOINT
rpool               1.83G  13.6G   152K  /rpool
rpool/ROOT          1.68G  13.6G   144K  /rpool/ROOT
rpool/ROOT/default  1.68G  13.6G  1.68G  legacy
rpool/ROOT/upgrade   144K  13.6G  1.68G  legacy
rpool/home           144K  13.6G   144K  /home
rpool/local         4.14M  13.6G  4.14M  /usr/local
rpool/tmp            184K  13.6G   184K  /tmp
rpool/var            146M  13.6G   564K  /var
rpool/var/crash      148K  13.6G   148K  /var/crash
rpool/var/db         145M  13.6G   144M  /var/db
rpool/var/db/pkg     196K  13.6G   196K  /var/db/pkg
rpool/var/empty      144K  13.6G   144K  /var/empty
rpool/var/log        192K  13.6G   192K  /var/log
rpool/var/mail       144K  13.6G   144K  /var/mail
rpool/var/run        204K  13.6G   204K  /var/run
rpool/var/tmp        152K  13.6G   152K  /var/tmp

Reste enfin à activer ce BE et redémarrer le système :

# beadm activate upgrade
Activated successfully
# shutdown -r now

A présent, l’ensemble des opérations affecte le BE upgrade. En cas d’erreur, une réactivation du BE default permettra un redémarrage du système.

Haut de page