automate/020_sanoid.sh

554 lines
14 KiB
Bash
Executable File

#!/usr/bin/env bash
# set -x
sudo apt install -y \
sanoid
# git \
# dh-make \
# libconfig-inifiles-perl \
echo "sanoid provides: sanoid, syncoid, findoid in /usr/sbin"
echo "defaults are in /usr/share/sanoid/sanoid.defaults.conf"
echo "override them in /etc/sanoid/sanoid.conf"
echo "Installed files:"
cat <<-EOF
syncoid /usr/sbin/
sanoid /usr/sbin/
findoid /usr/sbin/
sanoid.defaults.conf /usr/share/sanoid/
debian/sanoid-prune.service /lib/systemd/system
CHANGELIST /usr/share/doc/sanoid/changelog
EOF
# Cronjob for non-systemd systems: every 15 minutes.
# If you require a different interval, you will need to disable the
# timer or the cron job according to your system configuration.
conf_print_sanoid_cron() {
cat <<-EOF
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
*/15 * * * * root [ -f /etc/sanoid/sanoid.conf ] && if [ ! -d /run/systemd/system ]; then TZ=UTC /usr/sbin/sanoid --cron --quiet; fi
EOF
}
conf_print_sanoid_cron | sudo tee /etc/cron.d/sanoid
# Do the same for syncoid for backups.
conf_print_syncoid_cron() {
cat <<-EOF
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
*/15 * * * * root [ -f /etc/sanoid/syncoid.conf ] && if [ ! -d /run/systemd/system ]; then TZ=UTC /usr/sbin/syncoid --cron --quiet; fi
EOF
}
# conf_print_syncoid_cron | sudo tee /etc/cron.d/syncoid
# Copied from /usr/share/doc/sanoid/example.conf
sudo mkdir -p /etc/sanoid/
conf_print_sanoid() {
cat <<-EOF
[rpool/docker]
use_template = ignore
[rpool/archive]
use_template = ignore
[rpool/swap]
use_template = ignore
[rpool/tmp]
use_template = ignore
[rpool/ROOT/devuan-1]
use_template = root
recursive = yes
# rpool/ROOT/devuan-1/opt
# rpool/ROOT/devuan-1/usr
# rpool/ROOT/devuan-1/usr_local
# rpool/ROOT/devuan-1/usr_share
# rpool/ROOT/devuan-1/var
# rpool/ROOT/devuan-1/var_lib
# Specific override for Virtual Machines to use scripts
[rpool/ROOT/devuan-1/var_lib_virt]
use_template = root
recursive = no
pre_snapshot_script = /usr/local/bin/virt-freeze-all.sh
post_snapshot_script = /usr/local/bin/virt-thaw-all.sh
# -- User Data --
[rpool/home]
use_template = production
recursive = yes
[rpool/space]
use_template = production
#############################
# templates below this line #
#############################
[template_production]
frequently = 0
hourly = 36
daily = 30
monthly = 3
yearly = 0
autosnap = yes
autoprune = yes
[template_root]
# Root changes fast; shorter history often suffices
hourly = 24
daily = 7
monthly = 1
yearly = 0
autosnap = yes
autoprune = yes
[template_ignore]
autoprune = no
autosnap = no
monitor = no
[template_backup]
autoprune = yes
frequently = 0
hourly = 30
daily = 90
monthly = 12
yearly = 0
### don't take new snapshots - snapshots on backup
### datasets are replicated in from source, not
### generated locally
autosnap = no
### monitor hourlies and dailies, but don't warn or
### crit until they're over 48h old, since replication
### is typically daily only
hourly_warn = 2880
hourly_crit = 3600
daily_warn = 48
daily_crit = 60
[template_hotspare]
autoprune = yes
frequently = 0
hourly = 30
daily = 90
monthly = 3
yearly = 0
### don't take new snapshots - snapshots on backup
### datasets are replicated in from source, not
### generated locally
autosnap = no
### monitor hourlies and dailies, but don't warn or
### crit until they're over 4h old, since replication
### is typically hourly only
hourly_warn = 4h
hourly_crit = 6h
daily_warn = 2d
daily_crit = 4d
[template_scripts]
### information about the snapshot will be supplied as environment variables,
### see the README.md file for details about what is passed when.
### run script before snapshot
pre_snapshot_script = /path/to/script.sh
### run script after snapshot
post_snapshot_script = /path/to/script.sh
### run script before pruning snapshot
pre_pruning_script = /path/to/script.sh
### run script after pruning snapshot
pruning_script = /path/to/script.sh
### don't take an inconsistent snapshot (skip if pre script fails)
#no_inconsistent_snapshot = yes
### run post_snapshot_script when pre_snapshot_script is failing
#force_post_snapshot_script = yes
### limit allowed execution time of scripts before continuing (<= 0: infinite)
script_timeout = 5
EOF
}
conf_print_sanoid | sudo tee /etc/sanoid/sanoid.conf
# Both sanoid and synmcoid are oneshot processes so it makes little sense to
# provide an init file, cron is just fine. In this case the systemd file is there
# because systemd decided to manage cron tasks.
# Generated using:
# https://raw.githubusercontent.com/akhilvij/systemd-to-sysvinit-converter/master/converter.py
# python2 converter /usr/src/sanoid-2.2.0/sanoid.service > sanoid
conf_print_sanoid_init() {
cat <<-'EOF'
#!/bin/sh
### BEGIN INIT INFO
# Provides: sanoid
# Required-Start: $syslog $local_fs $remote_fs
# Required-Stop: $syslog $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Snapshot ZFS filesystems
### END INIT INFO
. /lib/lsb/init-functions
prog=sanoid
PIDFILE=/var/run/$prog.pid
DESC="Snapshot ZFS filesystems"
start() {
log_daemon_msg "Starting $DESC" "$prog"
start_daemon -p $PIDFILE /usr/sbin/sanoid --take-snapshots --verbose
if [ $? -ne 0 ]; then
log_end_msg 1
exit 1
fi
if [ $? -eq 0 ]; then
log_end_msg 0
fi
exit 0
}
stop() {
log_daemon_msg "Stopping $DESC" "$prog"
killproc -p $PIDFILE /usr/sbin/sanoid
if [ $? -ne 0 ]; then
log_end_msg 1
exit 1
fi
if [ $? -eq 0 ]; then
log_end_msg 0
fi
}
force_reload() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
force-reload)
force_reload
;;
restart)
stop
start
;;
*)
echo "$Usage: $prog {start|stop|force-reload|restart}"
exit 2
esac
EOF
}
# Sandoid doesn't ran as a daemon it runs vi cron
# conf_print_sanoid_init | sudo tee /etc/init.d/sanoid
# sudo chmod +x /etc/init.d/sanoid
# Generated using:
# https://raw.githubusercontent.com/akhilvij/systemd-to-sysvinit-converter/master/converter.py
# python2 converter /usr/src/sanoid-2.2.0/sanoid-prune.service > sanoid-prune
conf_print_sanoid-prune_init() {
cat <<-'EOF'
#!/bin/sh
### BEGIN INIT INFO
# Provides: sanoid-prune
# Required-Start: $syslog $local_fs $remote_fs
# Required-Stop: $syslog $local_fs $remote_fs
# Short-Description: Prune ZFS snapshots
### END INIT INFO
. /lib/lsb/init-functions
prog=sanoid-prune
PIDFILE=/var/run/$prog.pid
DESC="Prune ZFS snapshots"
start() {
log_daemon_msg "Starting $DESC" "$prog"
start_daemon -p $PIDFILE /usr/sbin/sanoid --prune-snapshots --verbose
if [ $? -ne 0 ]; then
log_end_msg 1
exit 1
fi
if [ $? -eq 0 ]; then
log_end_msg 0
fi
exit 0
}
stop() {
log_daemon_msg "Stopping $DESC" "$prog"
killproc -p $PIDFILE /usr/sbin/sanoid
if [ $? -ne 0 ]; then
log_end_msg 1
exit 1
fi
if [ $? -eq 0 ]; then
log_end_msg 0
fi
}
force_reload() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
force-reload)
force_reload
;;
restart)
stop
start
;;
*)
echo "$Usage: $prog {start|stop|force-reload|restart}"
exit 2
esac
EOF
}
# Sandoid doesn't ran as a daemon it runs vi cron
# conf_print_sanoid-prune_init | sudo tee /etc/init.d/sanoid-prune
# sudo chmod +x /etc/init.d/sanoid-prune
# Give sudo access to virsh or is part of the libvirt group.
# qemu-guest-agent must be running in the vm
conf_print_virt_freeze() {
cat <<'EOF'
#!/bin/sh
# /usr/local/bin/virt-freeze.sh
VM_NAME="$1"
if [ -z "$VM_NAME" ]; then
echo "Usage: $0 <domain_name>"
exit 1
fi
# Check if the VM is running
STATE=$(virsh domstate "$VM_NAME" 2>/dev/null)
if [ "$STATE" != "running" ]; then
echo "VM $VM_NAME is not running or does not exist. Skipping freeze."
exit 0
fi
echo "Freezing filesystems for $VM_NAME..."
# domfsfreeze returns the number of frozen filesystems on success
if virsh domfsfreeze "$VM_NAME" > /dev/null; then
echo "Successfully frozen $VM_NAME."
else
echo "Error: Failed to freeze $VM_NAME. Ensure qemu-guest-agent is active."
exit 1
fi
EOF
}
conf_print_virt_freeze | sudo tee /usr/local/bin/virt-freeze.sh
sudo chmod +x /usr/local/bin/virt-freeze.sh
#---
conf_print_virt_unfreeze() {
cat <<'EOF'
#!/bin/sh
# /usr/local/bin/virt-unfreeze.sh
VM_NAME="$1"
if [ -z "$VM_NAME" ]; then
echo "Usage: $0 <domain_name>"
exit 1
fi
# Check if the VM is running
STATE=$(virsh domstate "$VM_NAME" 2>/dev/null)
if [ "$STATE" != "running" ]; then
echo "VM $VM_NAME is not running. Skipping unfreeze."
exit 0
fi
echo "Thawing filesystems for $VM_NAME..."
# domfsthaw returns the number of thawed filesystems on success
if virsh domfsthaw "$VM_NAME" > /dev/null; then
echo "Successfully thawed $VM_NAME."
else
echo "Error: Failed to thaw $VM_NAME."
exit 1
fi
EOF
}
conf_print_virt_unfreeze | sudo tee /usr/local/bin/virt-unfreeze.sh
sudo chmod +x /usr/local/bin/virt-unfreeze.sh
#---
conf_print_virt_thaw_all() {
cat <<'EOF'
#!/bin/bash
# /usr/local/bin/virt-thaw-all.sh
# 1. Get running VM names, filtering out empty lines with awk
RUNNING_VMS=$(virsh list --state-running --name | awk 'NF')
if [ -z "$RUNNING_VMS" ]; then
echo "No running VMs found."
exit 0
fi
# 2. Loop through each running VM
for VM_NAME in $RUNNING_VMS; do
echo "Processing $VM_NAME..."
# Use the native thaw command.
# It handles the guest agent communication for you.
if virsh domfsthaw "$VM_NAME" > /dev/null 2>&1; then
echo "Successfully thawed $VM_NAME."
else
# If native fails, we capture the error for the user
ERROR=$(virsh domfsthaw "$VM_NAME" 2>&1)
echo "Error thawing $VM_NAME: $ERROR"
fi
done
echo "Finished processing all VMs."
EOF
}
conf_print_virt_thaw_all | sudo tee /usr/local/bin/virt-thaw-all.sh
sudo chmod +x /usr/local/bin/virt-thaw-all.sh
#---
conf_print_virt_freeze-all() {
cat <<'EOF'
#!/bin/bash
# 1. Get running VM names, filtering out empty lines
RUNNING_VMS=$(virsh list --state-running --name | awk 'NF')
if [ -z "$RUNNING_VMS" ]; then
echo "No running VMs found."
exit 0
fi
# 2. Loop through each running VM
for VM_NAME in $RUNNING_VMS; do
echo "Attempting to freeze $VM_NAME..."
# Using the native virsh command is generally cleaner
# It returns the number of frozen filesystems on success
if virsh domfsfreeze "$VM_NAME" > /dev/null 2>&1; then
echo "Successfully frozen $VM_NAME."
else
echo "Error: Could not freeze $VM_NAME. Check if QEMU Guest Agent is running."
fi
done
echo "Finished processing all VMs."
EOF
}
conf_print_virt_freeze-all | sudo tee /usr/local/bin/virt-freeze-all.sh
sudo chmod +x /usr/local/bin/virt-freeze-all.sh
#---
# exit 0
#
# # build a debian package from the git repo
# USER="jimsalterjrs"
# PROJECT="sanoid"
# LICENSE="mit"
# SECTION="admin"
# HOMEPAGE="https://openoid.net"
# BUILD_HOME="/var/tmp/sanoid-git_build"
# #VERSION="2.10.0"
# PACKAGE="sanoid"
# VERSION=$(curl "https://api.github.com/repos/${USER}/${PROJECT}/tags?per_page=5" | jq -r '.[0] | .name')
# VERSION=${VERSION:1} # strip the preceding 'v'
# DEBEMAIL="person@company.tld"
# DEBFULLNAME="Testy McTester"
# DESCRIPTION="Policy-driven snapshot management tool for ZFS filesystems."
# LONG_DESCRIPTION="Create, thin, and monitor snapshots and pool health from a single configuration file."
#
# RSS_FEED="https://github.com/${USER}/${PROJECT}/releases.atom"
# # xdg-open ${RSS_FEED}
# echo ${RSS_FEED}
#
# # Make the BUILD_HOME directory and clone upstream
# mkdir -p ${BUILD_HOME}
# cd ${BUILD_HOME} || exit
#
# git clone http://github.com/${USER}/${PROJECT}.git ${PACKAGE}-${VERSION}
#
# cd ${PACKAGE}-${VERSION} || exit
#
# mkdir -p m4
#
# cat <<EOF >.git/gbp.conf
# [DEFAULT]
# # this is the upstream-branch:
# upstream-branch=master
# EOF
#
# # debianize the BUILD_HOME directory
# echo ${DEBFULLNAME}
# dh_make \
# --createorig \
# --single \
# --native \
# --copyright ${LICENSE} \
# --yes
#
# cp README.rst debian/README
# rm debian/README.source
#
# # Customize the debian directory values
# #sed -i 's,^\(Description: \).*,\1'${DESCRIPTION}',' debian/control
# sed -i "s,^\(Description: \).*,\1${DESCRIPTION}," debian/control
# sed -i 's,^\(Section: \).*,\1'${SECTION}',' debian/control
# sed -i "s,^\(Maintainer: \).*,\1'${DEBFULLNAME}\ \<${DEBEMAIL}\>'," debian/control
# sed -i 's,^\(Homepage: \).*,\1'${HOMEPAGE}',' debian/control
# sed -i "s/.*insert\ long.*/${LONG_DESCRIPTION}/" debian/control
#
# echo ****************
# cat debian/control
# echo ****************
#
# BUILD_OPTIONS=""
#
# cat <<EOF >debian/rules
# #!/usr/bin/make -f
# %:
# dh \$@
#
# override_dh_auto_configure:
# dh_auto_configure -- ${BUILD_OPTIONS}
# EOF
#
# cat <<'EOF' >debian/watch
# version=4
# opts=filenamemangle=s/.+\/v?(\d\S+)\.tar\.gz/${PROJECT}-$1\.tar\.gz/ \
# http://github.com/${USER}/${PROJECT}/tags .*/v?(\d\S+)\.tar\.gz
# EOF
#
# cp -a packages/debian/* debian
#
# dpkg-buildpackage -us -uc -b
#
# echo ${PACKAGE}_${VERSION}_amd64.deb
#
# sudo dpkg -i ../${PACKAGE}_${VERSION}_amd64.deb