From 93c81d4e7826f4d4814026c9a5a9532bebe2be0e Mon Sep 17 00:00:00 2001 From: cyteen Date: Wed, 4 Mar 2026 15:04:38 +0000 Subject: [PATCH] Small layout and syntax fixes. --- 020_sanoid.sh | 378 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 259 insertions(+), 119 deletions(-) diff --git a/020_sanoid.sh b/020_sanoid.sh index 4983442..18fc54f 100755 --- a/020_sanoid.sh +++ b/020_sanoid.sh @@ -2,7 +2,7 @@ # set -x sudo apt install -y \ - sanoid + sanoid # git \ # dh-make \ # libconfig-inifiles-perl \ @@ -24,66 +24,73 @@ EOF # Cronjob for non-systemd systems: every 15 minutes. # If you require a different interval, you will need to disable the # timer or the cron job according to your system configuration. -conf_print_syncoid_cron() { +conf_print_sanoid_cron() { cat <<-EOF - PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - */15 * * * * root [ -f /etc/sanoid/sanoid.conf ] && if [ ! -d /run/systemd/system ]; then TZ=UTC /usr/sbin/sanoid --cron --quiet; fi - EOF + */15 * * * * root [ -f /etc/sanoid/sanoid.conf ] && if [ ! -d /run/systemd/system ]; then TZ=UTC /usr/sbin/sanoid --cron --quiet; fi +EOF } -conf_print_sanoid_cron > /etc/cron.d/sanoid +conf_print_sanoid_cron | sudo tee /etc/cron.d/sanoid # Do the same for syncoid for backups. conf_print_syncoid_cron() { cat <<-EOF - PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin - */15 * * * * root [ -f /etc/sanoid/syncoid.conf ] && if [ ! -d /run/systemd/system ]; then TZ=UTC /usr/sbin/sanoid --cron --quiet; fi - EOF + */15 * * * * root [ -f /etc/sanoid/syncoid.conf ] && if [ ! -d /run/systemd/system ]; then TZ=UTC /usr/sbin/syncoid --cron --quiet; fi +EOF } -conf_print_syncoid_cron > /etc/cron.d/syncoid +conf_print_syncoid_cron | sudo tee /etc/cron.d/syncoid + +# Copied from /usr/share/doc/sanoid/example.conf +sudo mkdir -p /etc/sanoid/ -# Copied from /usr/shar/doc/sanoid/example.conf conf_print_sanoid() { - cat <<-EOF | sudo tee /etc/sanoid/sanoid.conf - ## name your backup modules with the path to their ZFS dataset - no leading slash. - #[zpoolname/datasetname] - # # pick one or more templates - they're defined (and editable) below. Comma separated, processed in order. - # # in this example, template_demo's daily value overrides template_production's daily value. - # use_template = production,demo - # - # # if you want to, you can override settings in the template directly inside module definitions like this. - # # in this example, we override the template to only keep 12 hourly and 1 monthly snapshot for this dataset. - # hourly = 12 - # monthly = 1 - # - ## you can also handle datasets recursively. - #[zpoolname/parent] - # use_template = production - # recursive = yes - # # if you want sanoid to manage the child datasets but leave this one alone, set process_children_only. - # process_children_only = yes - # - ## you can selectively override settings for child datasets which already fall under a recursive definition. - #[zpoolname/parent/child] - # # child datasets already initialized won't be wiped out, so if you use a new template, it will - # # only override the values already set by the parent template, not replace it completely. - # use_template = demo + cat <<-EOF + [rpool/docker] + use_template = ignore + recursive = yes + + [rpool/archive] + use_template = ignore + recursive = yes + + [rpool/tmp] + use_template = ignore + recursive = yes - # you can also handle datasets recursively in an atomic way without the possibility to override settings for child datasets. - [zpoolname/parent2] + [rpool/ROOT/devuan-1] + use_template = root + recursive = yes + + # rpool/ROOT/devuan-1/opt + # rpool/ROOT/devuan-1/usr + # rpool/ROOT/devuan-1/usr_local + # rpool/ROOT/devuan-1/usr_share + # rpool/ROOT/devuan-1/var + # rpool/ROOT/devuan-1/var_lib + + [rpool/ROOT/devuan-1/var_lib_virt] + use_template = root + # Custom script to freeze VMs before snapshotting + pre_snapshot_script = /usr/local/bin/virt-freeze-all.sh + post_snapshot_script = /usr/local/bin/virt-thaw-all.sh + + [rpool/home] use_template = production - recursive = zfs - + recursive = yes + [rpool/space] + use_template = production ############################# # templates below this line # ############################# - # name your templates template_templatename. you can create your own, and use them in your module definitions above. - + # name your templates template_templatename. you can create your own, and + # use them in your module definitions above. [template_demo] daily = 60 @@ -156,13 +163,22 @@ conf_print_sanoid() { ### limit allowed execution time of scripts before continuing (<= 0: infinite) script_timeout = 5 + [template_root] + # Root changes fast; shorter history often suffices + hourly = 24 + daily = 7 + monthly = 1 + yearly = 0 + autosnap = yes + autoprune = yes + [template_ignore] autoprune = no autosnap = no monitor = no - EOF +EOF } -conf_print_sanoid > /etc/sanoid/sanoid.conf +conf_print_sanoid | sudo tee /etc/sanoid/sanoid.conf # Both sanoid and synmcoid are oneshot processes so it makes little sense to # provide an init file, cron is just fine. In this case the systemd file is there @@ -212,13 +228,12 @@ conf_print_sanoid_init() { force_reload() { stop start - } case "$1" in start) start - ;; + ;; stop) stop ;; @@ -234,15 +249,16 @@ conf_print_sanoid_init() { echo "$Usage: $prog {start|stop|force-reload|restart}" exit 2 esac - EOF +EOF } -# conf_print_sanoid-init > /etc/init.d/sanoid +conf_print_sanoid_init | sudo tee /etc/init.d/sanoid +sudo chmod +x /etc/init.d/sanoid # Generated using: # https://raw.githubusercontent.com/akhilvij/systemd-to-sysvinit-converter/master/converter.py # python2 converter /usr/src/sanoid-2.2.0/sanoid-prune.service > sanoid-prune conf_print_sanoid-prune_init() { - cat <<-'EOF' | sudo tee /etc/init.d/sanoid=prune + cat <<-'EOF' #!/bin/sh ### BEGIN INIT INFO # Provides: sanoid-prune @@ -305,93 +321,217 @@ conf_print_sanoid-prune_init() { echo "$Usage: $prog {start|stop|force-reload|restart}" exit 2 esac - EOF +EOF } -# conf_print_sanoid-prune_init > /etc/init.d/sanoid-prune +conf_print_sanoid-prune_init | sudo tee /etc/init.d/sanoid-prune +sudo chmod +x /etc/init.d/sanoid-prune -exit 0 +# Give sudo access to virsh or is part of the libvirt group. +# qemu-guest-agent must be running in the vm +conf_print_virt_freeze() { + cat <<'EOF' +#!/bin/sh +# /usr/local/bin/virt-freeze.sh -# build a debian package from the git repo -USER="jimsalterjrs" -PROJECT="sanoid" -LICENSE="mit" -SECTION="admin" -HOMEPAGE="https://openoid.net" -BUILD_HOME="/var/tmp/sanoid-git_build" -#VERSION="2.10.0" -PACKAGE="sanoid" -VERSION=$(curl "https://api.github.com/repos/${USER}/${PROJECT}/tags?per_page=5" | jq -r '.[0] | .name') -VERSION=${VERSION:1} # strip the preceding 'v' -DEBEMAIL="person@company.tld" -DEBFULLNAME="Testy McTester" -DESCRIPTION="Policy-driven snapshot management tool for ZFS filesystems." -LONG_DESCRIPTION="Create, thin, and monitor snapshots and pool health from a single configuration file." +VM_NAME="$1" -RSS_FEED="https://github.com/${USER}/${PROJECT}/releases.atom" -# xdg-open ${RSS_FEED} -echo ${RSS_FEED} +if [ -z "$VM_NAME" ]; then + echo "Usage: $0 " + exit 1 +fi -# Make the BUILD_HOME directory and clone upstream -mkdir -p ${BUILD_HOME} -cd ${BUILD_HOME} || exit +# Check if the VM is running +STATE=$(virsh domstate "$VM_NAME") +if [ "$STATE" != "running" ]; then + echo "VM $VM_NAME is not running. Skipping freeze." + exit 0 +fi -git clone http://github.com/${USER}/${PROJECT}.git ${PACKAGE}-${VERSION} - -cd ${PACKAGE}-${VERSION} || exit - -mkdir -p m4 - -cat <.git/gbp.conf -[DEFAULT] -# this is the upstream-branch: -upstream-branch=master +echo "Freezing filesystems for $VM_NAME..." +virsh qemu-agent-command "$VM_NAME" '{"execute":"guest-fsfreeze-freeze"}' EOF +} +conf_print_virt_freeze | sudo tee /usr/local/bin/virt-freeze.sh +sudo chmod +x /usr/local/bin/virt-freeze.sh -# debianize the BUILD_HOME directory -echo ${DEBFULLNAME} -dh_make \ - --createorig \ - --single \ - --native \ - --copyright ${LICENSE} \ - --yes +conf_print_virt_unfreeze() { + cat <<'EOF' +#!/bin/sh +# /usr/local/bin/virt-unfreeze.sh -cp README.rst debian/README -rm debian/README.source +VM_NAME="$1" -# Customize the debian directory values -#sed -i 's,^\(Description: \).*,\1'${DESCRIPTION}',' debian/control -sed -i "s,^\(Description: \).*,\1${DESCRIPTION}," debian/control -sed -i 's,^\(Section: \).*,\1'${SECTION}',' debian/control -sed -i "s,^\(Maintainer: \).*,\1'${DEBFULLNAME}\ \<${DEBEMAIL}\>'," debian/control -sed -i 's,^\(Homepage: \).*,\1'${HOMEPAGE}',' debian/control -sed -i "s/.*insert\ long.*/${LONG_DESCRIPTION}/" debian/control +if [ -z "$VM_NAME" ]; then + echo "Usage: $0 " + exit 1 +fi -echo **************** -cat debian/control -echo **************** +# Check if the VM is running +STATE=$(virsh domstate "$VM_NAME") +if [ "$STATE" != "running" ]; then + echo "VM $VM_NAME is not running. Skipping unfreeze." + exit 0 +fi -BUILD_OPTIONS="" - -cat <debian/rules -#!/usr/bin/make -f -%: - dh \$@ - -override_dh_auto_configure: - dh_auto_configure -- ${BUILD_OPTIONS} +echo "Thawing filesystems for $VM_NAME..." +virsh qemu-agent-command "$VM_NAME" '{"execute":"guest-fsfreeze-thaw"}' EOF +} +conf_print_virt_unfreeze | sudo tee /usr/local/bin/virt-unfreeze.sh +sudo chmod +x /usr/local/bin/virt-unfreeze.sh -cat <debian/watch -version=4 -opts=filenamemangle=s/.+\/v?(\d\S+)\.tar\.gz/${PROJECT}-$1\.tar\.gz/ \ - http://github.com/${USER}/${PROJECT}/tags .*/v?(\d\S+)\.tar\.gz +conf_print_virt_thaw-all() { + cat <<'EOF' +#!/bin/sh +# /usr/local/bin/virt-thaw-all.sh + +# 1. Get a list of all running VM names +RUNNING_VMS=$(virsh list --state-running --name) + +if [ -z "$RUNNING_VMS" ]; then + echo "No running VMs found." + exit 0 +fi + +# 2. Loop through each running VM +for VM_NAME in $RUNNING_VMS; do + echo "Processing $VM_NAME..." + + # Execute the thaw command via QEMU guest agent + # Note: This requires the guest-agent to be installed/running inside the VM + RESPONSE=$(virsh qemu-agent-command "$VM_NAME" '{"execute":"guest-fsfreeze-thaw"}' 2>&1) + + if [ $? -eq 0 ]; then + echo "Successfully thawed $VM_NAME." + else + echo "Error thawing $VM_NAME: $RESPONSE" + fi +done + +echo "Finished processing all VMs." EOF +} +conf_print_virt_thaw-all | sudo tee /usr/local/bin/virt-thaw-all.sh +sudo chmod +x /usr/local/bin/virt-thaw-all.sh -cp -a packages/debian/* debian +conf_print_virt_freeze-all() { + cat <<'EOF' +#!/bin/sh +# /usr/local/bin/virt-freeze-all.sh -dpkg-buildpackage -us -uc -b +# 1. Get a list of all running VM names +RUNNING_VMS=$(virsh list --state-running --name) -echo ${PACKAGE}_${VERSION}_amd64.deb +if [ -z "$RUNNING_VMS" ]; then + echo "No running VMs found." + exit 0 +fi -sudo dpkg -i ../${PACKAGE}_${VERSION}_amd64.deb +# 2. Loop through each running VM +for VM_NAME in $RUNNING_VMS; do + echo "Processing $VM_NAME..." + + # Execute the freeze command via QEMU guest agent + # This flushes disk buffers and halts I/O + RESPONSE=$(virsh qemu-agent-command "$VM_NAME" '{"execute":"guest-fsfreeze-freeze"}' 2>&1) + + if [ $? -eq 0 ]; then + # The response usually contains the number of filesystems frozen + echo "Successfully frozen $VM_NAME. Details: $RESPONSE" + else + echo "Error freezing $VM_NAME: $RESPONSE" + fi +done + +echo "Finished processing all VMs." +EOF +} +conf_print_virt_freeze-all | sudo tee /usr/local/bin/virt-freeze-all.sh +sudo chmod +x /usr/local/bin/virt-freeze-all.sh + +# exit 0 +# +# # build a debian package from the git repo +# USER="jimsalterjrs" +# PROJECT="sanoid" +# LICENSE="mit" +# SECTION="admin" +# HOMEPAGE="https://openoid.net" +# BUILD_HOME="/var/tmp/sanoid-git_build" +# #VERSION="2.10.0" +# PACKAGE="sanoid" +# VERSION=$(curl "https://api.github.com/repos/${USER}/${PROJECT}/tags?per_page=5" | jq -r '.[0] | .name') +# VERSION=${VERSION:1} # strip the preceding 'v' +# DEBEMAIL="person@company.tld" +# DEBFULLNAME="Testy McTester" +# DESCRIPTION="Policy-driven snapshot management tool for ZFS filesystems." +# LONG_DESCRIPTION="Create, thin, and monitor snapshots and pool health from a single configuration file." +# +# RSS_FEED="https://github.com/${USER}/${PROJECT}/releases.atom" +# # xdg-open ${RSS_FEED} +# echo ${RSS_FEED} +# +# # Make the BUILD_HOME directory and clone upstream +# mkdir -p ${BUILD_HOME} +# cd ${BUILD_HOME} || exit +# +# git clone http://github.com/${USER}/${PROJECT}.git ${PACKAGE}-${VERSION} +# +# cd ${PACKAGE}-${VERSION} || exit +# +# mkdir -p m4 +# +# cat <.git/gbp.conf +# [DEFAULT] +# # this is the upstream-branch: +# upstream-branch=master +# EOF +# +# # debianize the BUILD_HOME directory +# echo ${DEBFULLNAME} +# dh_make \ +# --createorig \ +# --single \ +# --native \ +# --copyright ${LICENSE} \ +# --yes +# +# cp README.rst debian/README +# rm debian/README.source +# +# # Customize the debian directory values +# #sed -i 's,^\(Description: \).*,\1'${DESCRIPTION}',' debian/control +# sed -i "s,^\(Description: \).*,\1${DESCRIPTION}," debian/control +# sed -i 's,^\(Section: \).*,\1'${SECTION}',' debian/control +# sed -i "s,^\(Maintainer: \).*,\1'${DEBFULLNAME}\ \<${DEBEMAIL}\>'," debian/control +# sed -i 's,^\(Homepage: \).*,\1'${HOMEPAGE}',' debian/control +# sed -i "s/.*insert\ long.*/${LONG_DESCRIPTION}/" debian/control +# +# echo **************** +# cat debian/control +# echo **************** +# +# BUILD_OPTIONS="" +# +# cat <debian/rules +# #!/usr/bin/make -f +# %: +# dh \$@ +# +# override_dh_auto_configure: +# dh_auto_configure -- ${BUILD_OPTIONS} +# EOF +# +# cat <<'EOF' >debian/watch +# version=4 +# opts=filenamemangle=s/.+\/v?(\d\S+)\.tar\.gz/${PROJECT}-$1\.tar\.gz/ \ +# http://github.com/${USER}/${PROJECT}/tags .*/v?(\d\S+)\.tar\.gz +# EOF +# +# cp -a packages/debian/* debian +# +# dpkg-buildpackage -us -uc -b +# +# echo ${PACKAGE}_${VERSION}_amd64.deb +# +# sudo dpkg -i ../${PACKAGE}_${VERSION}_amd64.deb