Updated and improved, now autosnapshots according to policy via cron.

This commit is contained in:
cyteen 2026-03-11 01:46:57 +00:00
parent 04d2d424af
commit 60e606d4b5
1 changed files with 75 additions and 59 deletions

View File

@ -41,7 +41,7 @@ conf_print_syncoid_cron() {
*/15 * * * * root [ -f /etc/sanoid/syncoid.conf ] && if [ ! -d /run/systemd/system ]; then TZ=UTC /usr/sbin/syncoid --cron --quiet; fi
EOF
}
conf_print_syncoid_cron | sudo tee /etc/cron.d/syncoid
# conf_print_syncoid_cron | sudo tee /etc/cron.d/syncoid
# Copied from /usr/share/doc/sanoid/example.conf
sudo mkdir -p /etc/sanoid/
@ -50,16 +50,15 @@ conf_print_sanoid() {
cat <<-EOF
[rpool/docker]
use_template = ignore
recursive = yes
[rpool/archive]
use_template = ignore
recursive = yes
[rpool/swap]
use_template = ignore
[rpool/tmp]
use_template = ignore
recursive = yes
[rpool/ROOT/devuan-1]
use_template = root
@ -72,12 +71,14 @@ conf_print_sanoid() {
# rpool/ROOT/devuan-1/var
# rpool/ROOT/devuan-1/var_lib
# Specific override for Virtual Machines to use scripts
[rpool/ROOT/devuan-1/var_lib_virt]
use_template = root
# Custom script to freeze VMs before snapshotting
recursive = no
pre_snapshot_script = /usr/local/bin/virt-freeze-all.sh
post_snapshot_script = /usr/local/bin/virt-thaw-all.sh
# -- User Data --
[rpool/home]
use_template = production
recursive = yes
@ -89,11 +90,6 @@ conf_print_sanoid() {
# templates below this line #
#############################
# name your templates template_templatename. you can create your own, and
# use them in your module definitions above.
[template_demo]
daily = 60
[template_production]
frequently = 0
hourly = 36
@ -103,7 +99,21 @@ conf_print_sanoid() {
autosnap = yes
autoprune = yes
[template_backup]
[template_root]
# Root changes fast; shorter history often suffices
hourly = 24
daily = 7
monthly = 1
yearly = 0
autosnap = yes
autoprune = yes
[template_ignore]
autoprune = no
autosnap = no
monitor = no
[template_backup]
autoprune = yes
frequently = 0
hourly = 30
@ -162,20 +172,6 @@ conf_print_sanoid() {
#force_post_snapshot_script = yes
### limit allowed execution time of scripts before continuing (<= 0: infinite)
script_timeout = 5
[template_root]
# Root changes fast; shorter history often suffices
hourly = 24
daily = 7
monthly = 1
yearly = 0
autosnap = yes
autoprune = yes
[template_ignore]
autoprune = no
autosnap = no
monitor = no
EOF
}
conf_print_sanoid | sudo tee /etc/sanoid/sanoid.conf
@ -193,6 +189,8 @@ conf_print_sanoid_init() {
# Provides: sanoid
# Required-Start: $syslog $local_fs $remote_fs
# Required-Stop: $syslog $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Snapshot ZFS filesystems
### END INIT INFO
@ -251,8 +249,9 @@ conf_print_sanoid_init() {
esac
EOF
}
conf_print_sanoid_init | sudo tee /etc/init.d/sanoid
sudo chmod +x /etc/init.d/sanoid
# Sandoid doesn't ran as a daemon it runs vi cron
# conf_print_sanoid_init | sudo tee /etc/init.d/sanoid
# sudo chmod +x /etc/init.d/sanoid
# Generated using:
# https://raw.githubusercontent.com/akhilvij/systemd-to-sysvinit-converter/master/converter.py
@ -323,8 +322,9 @@ conf_print_sanoid-prune_init() {
esac
EOF
}
conf_print_sanoid-prune_init | sudo tee /etc/init.d/sanoid-prune
sudo chmod +x /etc/init.d/sanoid-prune
# Sandoid doesn't ran as a daemon it runs vi cron
# conf_print_sanoid-prune_init | sudo tee /etc/init.d/sanoid-prune
# sudo chmod +x /etc/init.d/sanoid-prune
# Give sudo access to virsh or is part of the libvirt group.
# qemu-guest-agent must be running in the vm
@ -341,19 +341,27 @@ if [ -z "$VM_NAME" ]; then
fi
# Check if the VM is running
STATE=$(virsh domstate "$VM_NAME")
STATE=$(virsh domstate "$VM_NAME" 2>/dev/null)
if [ "$STATE" != "running" ]; then
echo "VM $VM_NAME is not running. Skipping freeze."
echo "VM $VM_NAME is not running or does not exist. Skipping freeze."
exit 0
fi
echo "Freezing filesystems for $VM_NAME..."
virsh qemu-agent-command "$VM_NAME" '{"execute":"guest-fsfreeze-freeze"}'
# domfsfreeze returns the number of frozen filesystems on success
if virsh domfsfreeze "$VM_NAME" > /dev/null; then
echo "Successfully frozen $VM_NAME."
else
echo "Error: Failed to freeze $VM_NAME. Ensure qemu-guest-agent is active."
exit 1
fi
EOF
}
conf_print_virt_freeze | sudo tee /usr/local/bin/virt-freeze.sh
sudo chmod +x /usr/local/bin/virt-freeze.sh
#---
conf_print_virt_unfreeze() {
cat <<'EOF'
#!/bin/sh
@ -367,26 +375,34 @@ if [ -z "$VM_NAME" ]; then
fi
# Check if the VM is running
STATE=$(virsh domstate "$VM_NAME")
STATE=$(virsh domstate "$VM_NAME" 2>/dev/null)
if [ "$STATE" != "running" ]; then
echo "VM $VM_NAME is not running. Skipping unfreeze."
exit 0
fi
echo "Thawing filesystems for $VM_NAME..."
virsh qemu-agent-command "$VM_NAME" '{"execute":"guest-fsfreeze-thaw"}'
# domfsthaw returns the number of thawed filesystems on success
if virsh domfsthaw "$VM_NAME" > /dev/null; then
echo "Successfully thawed $VM_NAME."
else
echo "Error: Failed to thaw $VM_NAME."
exit 1
fi
EOF
}
conf_print_virt_unfreeze | sudo tee /usr/local/bin/virt-unfreeze.sh
sudo chmod +x /usr/local/bin/virt-unfreeze.sh
conf_print_virt_thaw-all() {
#---
conf_print_virt_thaw_all() {
cat <<'EOF'
#!/bin/sh
#!/bin/bash
# /usr/local/bin/virt-thaw-all.sh
# 1. Get a list of all running VM names
RUNNING_VMS=$(virsh list --state-running --name)
# 1. Get running VM names, filtering out empty lines with awk
RUNNING_VMS=$(virsh list --state-running --name | awk 'NF')
if [ -z "$RUNNING_VMS" ]; then
echo "No running VMs found."
@ -397,30 +413,31 @@ fi
for VM_NAME in $RUNNING_VMS; do
echo "Processing $VM_NAME..."
# Execute the thaw command via QEMU guest agent
# Note: This requires the guest-agent to be installed/running inside the VM
RESPONSE=$(virsh qemu-agent-command "$VM_NAME" '{"execute":"guest-fsfreeze-thaw"}' 2>&1)
if [ $? -eq 0 ]; then
# Use the native thaw command.
# It handles the guest agent communication for you.
if virsh domfsthaw "$VM_NAME" > /dev/null 2>&1; then
echo "Successfully thawed $VM_NAME."
else
echo "Error thawing $VM_NAME: $RESPONSE"
# If native fails, we capture the error for the user
ERROR=$(virsh domfsthaw "$VM_NAME" 2>&1)
echo "Error thawing $VM_NAME: $ERROR"
fi
done
echo "Finished processing all VMs."
EOF
}
conf_print_virt_thaw-all | sudo tee /usr/local/bin/virt-thaw-all.sh
conf_print_virt_thaw_all | sudo tee /usr/local/bin/virt-thaw-all.sh
sudo chmod +x /usr/local/bin/virt-thaw-all.sh
#---
conf_print_virt_freeze-all() {
cat <<'EOF'
#!/bin/sh
# /usr/local/bin/virt-freeze-all.sh
#!/bin/bash
# 1. Get a list of all running VM names
RUNNING_VMS=$(virsh list --state-running --name)
# 1. Get running VM names, filtering out empty lines
RUNNING_VMS=$(virsh list --state-running --name | awk 'NF')
if [ -z "$RUNNING_VMS" ]; then
echo "No running VMs found."
@ -429,17 +446,14 @@ fi
# 2. Loop through each running VM
for VM_NAME in $RUNNING_VMS; do
echo "Processing $VM_NAME..."
echo "Attempting to freeze $VM_NAME..."
# Execute the freeze command via QEMU guest agent
# This flushes disk buffers and halts I/O
RESPONSE=$(virsh qemu-agent-command "$VM_NAME" '{"execute":"guest-fsfreeze-freeze"}' 2>&1)
if [ $? -eq 0 ]; then
# The response usually contains the number of filesystems frozen
echo "Successfully frozen $VM_NAME. Details: $RESPONSE"
# Using the native virsh command is generally cleaner
# It returns the number of frozen filesystems on success
if virsh domfsfreeze "$VM_NAME" > /dev/null 2>&1; then
echo "Successfully frozen $VM_NAME."
else
echo "Error freezing $VM_NAME: $RESPONSE"
echo "Error: Could not freeze $VM_NAME. Check if QEMU Guest Agent is running."
fi
done
@ -449,6 +463,8 @@ EOF
conf_print_virt_freeze-all | sudo tee /usr/local/bin/virt-freeze-all.sh
sudo chmod +x /usr/local/bin/virt-freeze-all.sh
#---
# exit 0
#
# # build a debian package from the git repo