#!/usr/bin/env bash # set -x sudo apt install -y \ sanoid # git \ # dh-make \ # libconfig-inifiles-perl \ echo "sanoid provides: sanoid, syncoid, findoid in /usr/sbin" echo "defaults are in /usr/share/sanoid/sanoid.defaults.conf" echo "override them in /etc/sanoid/sanoid.conf" echo "Installed files:" cat <<-EOF syncoid /usr/sbin/ sanoid /usr/sbin/ findoid /usr/sbin/ sanoid.defaults.conf /usr/share/sanoid/ debian/sanoid-prune.service /lib/systemd/system CHANGELIST /usr/share/doc/sanoid/changelog EOF # Both sanoid and syncoid are oneshot processes so it makes little sense to # provide an init file, cron is just fine. In this case the systemd file is there # because systemd decided to manage cron tasks. # # Cronjob for non-systemd systems: every 15 minutes. # If you require a different interval, you will need to disable the # timer or the cron job according to your system configuration. conf_print_sanoid_cron() { cat <<-EOF PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin */15 * * * * root [ -f /etc/sanoid/sanoid.conf ] && if [ ! -d /run/systemd/system ]; then TZ=UTC /usr/sbin/sanoid --cron --quiet; fi EOF } conf_print_sanoid_cron | sudo tee /etc/cron.d/sanoid # Do the same for syncoid for backups. conf_print_syncoid_cron() { cat <<-EOF PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin */15 * * * * root [ -f /etc/sanoid/syncoid.conf ] && if [ ! -d /run/systemd/system ]; then TZ=UTC /usr/sbin/syncoid --cron --quiet; fi EOF } # conf_print_syncoid_cron | sudo tee /etc/cron.d/syncoid # Copied from /usr/share/doc/sanoid/example.conf sudo mkdir -p /etc/sanoid/ conf_print_sanoid() { cat <<-EOF #-- Organisational datasets # DATA [rpool/DATA] use_template = ignore recursive = yes process_children_only = yes # ROOT [rpool/ROOT] use_template = ignore recursive = yes process_children_only = yes #-- END # These datasets contain the docker zsh backing store "graph" (layers). # Just restore broken container with docker-compose down && docker-compose up [rpool/docker] use_template = ignore recursive = yes # Docker persistent data [rpool/DATA/docker-volumes] use_template = docker-persistent recursive = yes [rpool/archive] use_template = ignore recursive = yes [rpool/swap] use_template = ignore recursive = yes [rpool/tmp] use_template = ignore recursive = yes [rpool/ROOT/devuan-1] use_template = root recursive = yes # rpool/ROOT/devuan-1/opt # rpool/ROOT/devuan-1/usr # rpool/ROOT/devuan-1/usr_local # rpool/ROOT/devuan-1/usr_share # rpool/ROOT/devuan-1/var # rpool/ROOT/devuan-1/var_lib # Specific override for Virtual Machines to use scripts [rpool/ROOT/devuan-1/var_lib_virt] use_template = root recursive = yes pre_snapshot_script = /usr/local/bin/sanoid_virt-freeze-all.sh post_snapshot_script = /usr/local/bin/sanoid_virt-thaw-all.sh # -- User Data -- [rpool/home] use_template = production recursive = yes [rpool/space] use_template = production ############################# # templates below this line # ############################# [template_production] frequently = 0 hourly = 36 daily = 30 monthly = 3 yearly = 0 autosnap = yes autoprune = yes [template_root] # Root changes fast; shorter history often suffices hourly = 24 daily = 7 monthly = 1 yearly = 0 autosnap = yes autoprune = yes [template_ignore] autoprune = no autosnap = no monitor = no ############################## # Docker Persistent Template # ############################## [template_docker-persistent] # Frequent snapshots for active databases/configs frequently = 0 hourly = 24 daily = 7 monthly = 1 yearly = 0 # Safety checks autosnap = yes autoprune = yes # Don't take a snapshot if the dataset hasn't changed # (Saves metadata overhead) # skip_hourless = yes pre_snapshot_script = /usr/local/bin/sanoid_zfs-skip-empty.sh #-- END [template_backup] autoprune = yes frequently = 0 hourly = 30 daily = 90 monthly = 12 yearly = 0 ### don't take new snapshots - snapshots on backup ### datasets are replicated in from source, not ### generated locally autosnap = no ### monitor hourlies and dailies, but don't warn or ### crit until they're over 48h old, since replication ### is typically daily only hourly_warn = 2880 hourly_crit = 3600 daily_warn = 48 daily_crit = 60 #-- END [template_hotspare] autoprune = yes frequently = 0 hourly = 30 daily = 90 weekly = 4 monthly = 3 yearly = 0 ### don't take new snapshots - snapshots on backup ### datasets are replicated in from source, not ### generated locally autosnap = no ### monitor hourlies and dailies, but don't warn or ### crit until they're over 4h old, since replication ### is typically hourly only hourly_warn = 4h hourly_crit = 6h daily_warn = 2d daily_crit = 4d EOF } conf_print_sanoid | sudo tee /etc/sanoid/sanoid.conf ######################## # Pre-snapshot scripts # ######################## # In ZFS, even if no data has changed, creating a snapshot still consumes a # small amount of space for metadata and adds an entry to the ZFS history. # If you have hundreds of datasets being snapshotted every 15 minutes, this # "metadata bloat" can make commands like zfs list -t snapshot feel sluggish # over time. If you think this is an issue for ypu use zfs-skip-empty.sh as # a pre_snapshot_script conf_print_skip_empty() { cat <<'EOF' #!/bin/bash # Usage: ./sanoid-threshold.sh # or # Add this to you /etc/sanoid.conf to fire this script. # [tank/important_data] # use_template = production # # Only snapshot if more than 5MB changed # pre_snapshot_script = /usr/local/bin/sanoid-threshold.sh 5M DATASET=$1 RAW_THRESHOLD=$2 convert_to_bytes() { local number=$(echo "$1" | grep -oE '^[0-9.]+') local unit=$(echo "$1" | grep -oI '[KMGPT]' | tr '[:lower:]' '[:upper:]') case "$unit" in K) awk "BEGIN { printf \"%.0f\", $number * 1024 }" ;; M) awk "BEGIN { printf \"%.0f\", $number * 1024^2 }" ;; G) awk "BEGIN { printf \"%.0f\", $number * 1024^3 }" ;; T) awk "BEGIN { printf \"%.0f\", $number * 1024^4 }" ;; *) printf "%.0f" "$number" ;; esac } if [[ -z "$DATASET" || -z "$RAW_THRESHOLD" ]]; then logger -t sanoid "Threshold Error: Usage: $0 " exit 1 fi if ! zfs list -H "$DATASET" >/dev/null 2>&1; then logger -t sanoid "Threshold Error: Dataset $DATASET not found." exit 1 fi THRESHOLD=$(convert_to_bytes "$RAW_THRESHOLD") WRITTEN_BYTES=$(zfs get -Hp -o value written "$DATASET") if [[ "$WRITTEN_BYTES" -lt "$THRESHOLD" ]]; then WRITTEN_HUMAN=$(zfs get -H -o value written "$DATASET") # Optional: Comment out the logger below if your logs get too noisy logger -t sanoid "Skipping $DATASET: Written $WRITTEN_HUMAN < Threshold $RAW_THRESHOLD." exit 1 fi exit 0 EOF } conf_print_skip_hourless | sudo tee /usr/local/bin/sanoid_zfs-skip-empty.sh chmod +x /usr/local/bin/sanoid_zfs-skip-empty.sh # VM Consistency (The "Freeze/Thaw" Logic) # The inclusion of virt-freeze-all.sh and virt-thaw-all.sh to ensure data integrity: # * Pre-snapshot: virsh domfsfreeze tells the Guest OS (via qemu-guest-agent) to flush its write buffers and temporarily pause I/O. # * Snapshot: Sanoid takes an atomic ZFS snapshot. # * Post-snapshot: virsh domfsthaw resumes I/O. # Result: You get an "application-consistent" backup rather than a "crash-consistent" one. # Give sudo access to virsh or is part of the libvirt group. # qemu-guest-agent must be running in the vm conf_print_virt_freeze() { cat <<'EOF' #!/bin/sh # /usr/local/bin/virt-freeze.sh VM_NAME="$1" if [ -z "$VM_NAME" ]; then echo "Usage: $0 " exit 1 fi # Check if the VM is running STATE=$(/usr/bin/virsh domstate "$VM_NAME" 2>/dev/null) if [ "$STATE" != "running" ]; then echo "VM $VM_NAME is not running or does not exist. Skipping freeze." exit 0 fi echo "Freezing filesystems for $VM_NAME..." # domfsfreeze returns the number of frozen filesystems on success if /usr/bin/virsh domfsfreeze "$VM_NAME" > /dev/null; then echo "Successfully frozen $VM_NAME." else echo "Error: Failed to freeze $VM_NAME. Ensure qemu-guest-agent is active." exit 1 fi EOF } conf_print_virt_freeze | sudo tee /usr/local/bin/sanoid_virt-freeze.sh sudo chmod +x /usr/local/bin/sanoid_virt-freeze.sh #--- conf_print_virt_unfreeze() { cat <<'EOF' #!/bin/sh # /usr/local/bin/virt-unfreeze.sh VM_NAME="$1" if [ -z "$VM_NAME" ]; then echo "Usage: $0 " exit 1 fi # Check if the VM is running STATE=$(/usr/bin/virsh domstate "$VM_NAME" 2>/dev/null) if [ "$STATE" != "running" ]; then echo "VM $VM_NAME is not running. Skipping unfreeze." exit 0 fi echo "Thawing filesystems for $VM_NAME..." # domfsthaw returns the number of thawed filesystems on success if /usr/bin/virsh domfsthaw "$VM_NAME" > /dev/null; then echo "Successfully thawed $VM_NAME." else echo "Error: Failed to thaw $VM_NAME." exit 1 fi EOF } conf_print_virt_unfreeze | sudo tee /usr/local/bin/sanoid_virt-unfreeze.sh sudo chmod +x /usr/local/bin/sanoid_virt-unfreeze.sh #--- conf_print_virt_thaw_all() { cat <<'EOF' #!/bin/bash # /usr/local/bin/virt-thaw-all.sh # 1. Get running VM names, filtering out empty lines with awk RUNNING_VMS=$(/usr/bin/virsh list --state-running --name | awk 'NF') if [ -z "$RUNNING_VMS" ]; then echo "No running VMs found." exit 0 fi # 2. Loop through each running VM for VM_NAME in $RUNNING_VMS; do echo "Processing $VM_NAME..." # Use the native thaw command. # It handles the guest agent communication for you. if /usr/bin/virsh domfsthaw "$VM_NAME" > /dev/null 2>&1; then echo "Successfully thawed $VM_NAME." else # If native fails, we capture the error for the user ERROR=$(/usr/bin/virsh domfsthaw "$VM_NAME" 2>&1) echo "Error thawing $VM_NAME: $ERROR" fi done echo "Finished processing all VMs." EOF } conf_print_virt_thaw_all | sudo tee /usr/local/bin/sanoid_virt-thaw-all.sh sudo chmod +x /usr/local/bin/sanoid_virt-thaw-all.sh #--- conf_print_virt_freeze-all() { cat <<'EOF' #!/bin/bash # 1. Get running VM names, filtering out empty lines RUNNING_VMS=$(/usr/bin/virsh list --state-running --name | awk 'NF') if [ -z "$RUNNING_VMS" ]; then echo "No running VMs found." exit 0 fi # 2. Loop through each running VM for VM_NAME in $RUNNING_VMS; do echo "Attempting to freeze $VM_NAME..." # Using the native virsh command is generally cleaner # It returns the number of frozen filesystems on success if /usr/bin/virsh domfsfreeze "$VM_NAME" > /dev/null 2>&1; then echo "Successfully frozen $VM_NAME." else echo "Error: Could not freeze $VM_NAME. Check if QEMU Guest Agent is running." fi done echo "Finished processing all VMs." EOF } conf_print_virt_freeze-all | sudo tee /usr/local/bin/sanoid_virt-freeze-all.sh sudo chmod +x /usr/local/bin/sanoid_virt-freeze-all.sh #--- # exit 0 # # # build a debian package from the git repo # USER="jimsalterjrs" # PROJECT="sanoid" # LICENSE="mit" # SECTION="admin" # HOMEPAGE="https://openoid.net" # BUILD_HOME="/var/tmp/sanoid-git_build" # #VERSION="2.10.0" # PACKAGE="sanoid" # VERSION=$(curl "https://api.github.com/repos/${USER}/${PROJECT}/tags?per_page=5" | jq -r '.[0] | .name') # VERSION=${VERSION:1} # strip the preceding 'v' # DEBEMAIL="person@company.tld" # DEBFULLNAME="Testy McTester" # DESCRIPTION="Policy-driven snapshot management tool for ZFS filesystems." # LONG_DESCRIPTION="Create, thin, and monitor snapshots and pool health from a single configuration file." # # RSS_FEED="https://github.com/${USER}/${PROJECT}/releases.atom" # # xdg-open ${RSS_FEED} # echo ${RSS_FEED} # # # Make the BUILD_HOME directory and clone upstream # mkdir -p ${BUILD_HOME} # cd ${BUILD_HOME} || exit # # git clone http://github.com/${USER}/${PROJECT}.git ${PACKAGE}-${VERSION} # # cd ${PACKAGE}-${VERSION} || exit # # mkdir -p m4 # # cat <.git/gbp.conf # [DEFAULT] # # this is the upstream-branch: # upstream-branch=master # EOF # # # debianize the BUILD_HOME directory # echo ${DEBFULLNAME} # dh_make \ # --createorig \ # --single \ # --native \ # --copyright ${LICENSE} \ # --yes # # cp README.rst debian/README # rm debian/README.source # # # Customize the debian directory values # #sed -i 's,^\(Description: \).*,\1'${DESCRIPTION}',' debian/control # sed -i "s,^\(Description: \).*,\1${DESCRIPTION}," debian/control # sed -i 's,^\(Section: \).*,\1'${SECTION}',' debian/control # sed -i "s,^\(Maintainer: \).*,\1'${DEBFULLNAME}\ \<${DEBEMAIL}\>'," debian/control # sed -i 's,^\(Homepage: \).*,\1'${HOMEPAGE}',' debian/control # sed -i "s/.*insert\ long.*/${LONG_DESCRIPTION}/" debian/control # # echo **************** # cat debian/control # echo **************** # # BUILD_OPTIONS="" # # cat <debian/rules # #!/usr/bin/make -f # %: # dh \$@ # # override_dh_auto_configure: # dh_auto_configure -- ${BUILD_OPTIONS} # EOF # # cat <<'EOF' >debian/watch # version=4 # opts=filenamemangle=s/.+\/v?(\d\S+)\.tar\.gz/${PROJECT}-$1\.tar\.gz/ \ # http://github.com/${USER}/${PROJECT}/tags .*/v?(\d\S+)\.tar\.gz # EOF # # cp -a packages/debian/* debian # # dpkg-buildpackage -us -uc -b # # echo ${PACKAGE}_${VERSION}_amd64.deb # # sudo dpkg -i ../${PACKAGE}_${VERSION}_amd64.deb