Archives pour décembre, 2014

Docker : cleanup des images orphelines d’un registre privé

#!/bin/bash
set -eu

shopt -s nullglob

base_dir=$1
if [ -z "$base_dir" ] ; then
    echo "$0 <DIR>"
    exit 1;
fi

readonly output_dir=$(mktemp -d -t trace-images-XXXX)
readonly jq=$(which jq)

readonly repository_dir=$base_dir/repositories
readonly image_dir=$base_dir/images

readonly all_images=$output_dir/all
readonly used_images=$output_dir/used
readonly unused_images=$output_dir/unused

function image_history() {
    local readonly image_hash=$1

    $jq '.[]' $image_dir/$image_hash/ancestry | tr -d '"'
}

echo "Collecting orphan images at $repository_dir"
for library in $repository_dir/*; do
    echo "Library $(basename $library)"

    for repo in $library/*; do
        echo "- $(basename $repo)" >&2

        for tag in $repo/tag_*; do
            echo "  + $(basename $tag)" >&2

            tagged_image=$(cat $tag)
            image_history $tagged_image
        done
    done
done | sort | uniq > $used_images

ls $image_dir > $all_images

grep -v -F -f $used_images $all_images > $unused_images || true

readonly all_image_count=$(wc -l $all_images | awk '{print $1}')
readonly used_image_count=$(wc -l $used_images | awk '{print $1}')
readonly unused_image_count=$(wc -l $unused_images | awk '{print $1}')

readonly unused_image_size=$( if [ $unused_image_count -gt 0 ] ; then \
    cd $image_dir; du -hc $(cat $unused_images) | tail -n1 | cut -f1; \
else echo 0; fi
    )

if [ $unused_image_count -le 0 ] ; then
    echo "No unused images, exiting";
    exit 0;
fi

echo -e "\nTrimming _index_images..."
readonly unused_images_flatten=$output_dir/unused.flatten
cat $unused_images | sed -e 's/\(.*\)/\"\1\" /' | tr -d "\n" > $unused_images_flatten

for library in $repository_dir/*; do
    echo "Library $(basename $library)" >&2

    for repo in $library/*; do
        echo " Repo $(basename $repo)" >&2
        mkdir -p "$output_dir/$(basename $repo)"
        jq '.' "$repo/_index_images" > "$output_dir/$(basename $repo)/_index_images.old"
        jq -s '.[0] - [ .[1:][] | {id: .} ]' "$repo/_index_images" $unused_images_flatten > "$output_dir/$(basename $repo)/_index_images"
        cp "$output_dir/$(basename $repo)/_index_images" "$repo/_index_images"
    done
done

echo ""
echo "These are unused images:"
cat $unused_images
echo "${all_image_count} images, ${used_image_count} used, ${unused_image_count} unused, size ${unused_image_size}"

echo ""
echo "Cleaning unused images"
cat $unused_images | xargs -I{} rm -rf $image_dir/{}

Docker : lancer un shell dans un container

Depuis Docker version 1.3 :

# docker exec -ti container_name /bin/bash

CoreOS : fix crash du network stack sous Xen 4.2

Dec 16 11:30:08 ip-172-10-4-2.ap-southeast-1.compute.internal kernel: xen_netfront: xennet: skb rides the rocket: 19 slots, 65226 bytes
# ethtool -K eth0 sg off

Amazon EC2 : fix de configuration Xen avec instance HVM

# dmsg
[335773.344080] xen:balloon: Cannot add additional memory (-17)
[335805.413385] xen:balloon: Cannot add additional memory (-17)
[335837.536078] xen:balloon: Cannot add additional memory (-17)
 
# cat /sys/devices/system/xen_memory/xen_memory0/info/current_kb > /sys/devices/system/xen_memory/xen_memory0/target_kb

CoreOS : cloud config pour instance EC2 avec stockage local Docker + swap

Le fichier cloud-config ci-dessous initialise une instance EC2 CoreOS avec les options suivantes :

  • paramétrage des timeout fleet et etcd
  • swap d’1 Go sur le disque local (/dev/xvdb1)
  • partition BRTFS pour les containers Docker sur disque local (/dev/xvdb2)
  • désactivation du reboot automatique par sécurité (locksmithd)
#cloud-config

coreos:
  update:
    reboot-strategy: off
  etcd:
    # generate a new token for each unique cluster from https://discovery.etcd.io/new
    discovery: https://discovery.etcd.io/7c024472504cf1cef428ccc8af8e63f0
    # multi-region and multi-cloud deployments need to use $public_ipv4
    addr: $private_ipv4:4001
    peer-addr: $private_ipv4:7001
    peer-election-timeout: 6000
    peer-heartbeat-interval: 1500
    snapshot: true
    snapshot-count: 100
  fleet:
    public-ip: $public_ipv4
    metadata: region=eu-west
    etcd-request-timeout: 1500
  units:
    - name: etcd.service
      command: start
    - name: fleet.service
      command: start
    - name: format-ephemeral.service
      command: start
      content: |
        [Unit]
        Description=Formats the ephemeral drive
        [Service]
        Type=oneshot
        RemainAfterExit=yes
        ExecStartPre=/usr/sbin/wipefs -f /dev/xvdb
        ExecStartPre=/usr/bin/sh -c "(/usr/bin/echo ',1024,S' && /usr/bin/echo ';')|/usr/sbin/sfdisk /dev/xvdb -uM"
        ExecStartPre=/usr/sbin/mkswap -f /dev/xvdb1
        ExecStartPre=/usr/sbin/swapon /dev/xvdb1
        ExecStart=/usr/sbin/mkfs.btrfs -f /dev/xvdb2
    - name: var-lib-docker.mount
      command: start
      content: |
        [Unit]
        Description=Mount ephemeral to /var/lib/docker
        Requires=format-ephemeral.service
        After=format-ephemeral.service
        Before=docker.service
        [Mount]
        What=/dev/xvdb2
        Where=/var/lib/docker
        Type=btrfs

CoreOS : désactivation des mises à jour automatiques

# systemctl stop update-engine.service
# systemctl mask update-engine.service

Btrfs : No space is left on device

# btrfs fi balance start -dusage=5 /
Done, had to relocate 5 out of 32 chunks

En cas d’opération longue, le statut peut être vérifié par la commande :

# btrfs balance status /
Balance on '/' is running
1 out of about 7 chunks balanced (2 considered),  86% left

Linux : modifier l’IP de sortie en configuration multi-alias

# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether 00:1e:c9:ae:fc:6f brd ff:ff:ff:ff:ff:ff
inet 172.16.170.4/16 brd 172.29.255.255 scope global eth0:1
inet 172.16.170.5/16 brd 172.29.255.255 scope global secondary eth0:2
inet 172.16.170.6/16 brd 172.29.255.255 scope global secondary eth0:3
inet 172.16.170.7/16 brd 172.29.255.255 scope global secondary eth0:4
inet 172.16.0.3/16 brd 172.29.255.255 scope global secondary eth0
inet6 fe80::21e:c9ff:feae:fc6f/64 scope link
valid_lft forever preferred_lft forever
# ip route
default via 172.16.0.1 dev eth0
172.16.0.0/16 dev eth0  proto kernel  scope link  src 172.16.170.4
# ip route get 8.8.8.8
8.8.8.8 via 172.16.0.1 dev eth0  src 172.16.170.4
cache
# ip route replace default via 172.16.0.1 src 172.16.0.3
# ip route get 8.8.8.8
8.8.8.8 via 172.16.0.1 dev eth0  src 172.16.0.3
cache

Amazon Route 53 : alias sur le root domain

route53_root_alias

CloudFlare : redirection du root domain vers un CNAME

Dans le cas d’un hébergement web de type cloud, il est impossible de déclarer un CNAME au niveau du root domain (alias @).

Avec  CloudFlare, la solution est de déclarer l’alias @ avec une IP publique (quelle qu’elle soit) afin qu’il soit possible d’activer la mise en cache (qui sera donc obligatoire) et d’ajouter les pages rules suivantes :

  • Forward http://monsite.fr/* to http://www.monsite.fr/$1
  • Forward http://monsite.fr to http://www.monsite.fr/

Les reverse proxy Nginx de CloudFlare renverront ainsi une redirection 302 jusqu’au navigateur client.

Haut de page