diff --git a/.gitignore b/.gitignore index 1d26785..09d5c8d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1 @@ *_pass.txt -*.env diff --git a/backups/create_vaultwarden_backup.sh b/backups/create_vaultwarden_backup.sh new file mode 100755 index 0000000..652da10 --- /dev/null +++ b/backups/create_vaultwarden_backup.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Backup script for Vaultwarden in a kubernetes cluster + +BACKUP_DEST='/mnt/PRIVATE_DOCS/BACKUPS/vaultwarden' +PASSFILE='./vaultwarden_pass.txt' + +# Create filename for database +database_backupfile="vaultwarden-sqlbkp_$(date +'%Y%m%d').bak" + +# Retrieve container names +base_container="$( docker ps --format '{{.Names}}' | grep vaultwarden_vaultwarden )" +database_container="$( docker ps --format '{{.Names}}' | grep vaultwarden-postgresql_vaultwarden-postgresql )" + +# Abort entire script if any command fails +set -e + +# Database backup +>&2 echo 'Backing up database' +internal_database_backupfile="/tmp/${database_backupfile}" +# Create backup file in docker container +docker exec --env-file "${PASSFILE}" "${database_container}" pg_dump 'vaultwarden' -cwv -h 'localhost' -U 'vaultwarden' -f "${internal_database_backupfile}" +# Copy backup outside container +docker cp "${database_container}":"${internal_database_backupfile}" "${BACKUP_DEST}" + +# Files backup +for file in 'attachments' 'sends' 'config.json' 'rsa_key.pem' 'rsa_key.pub.pem'; do + >&2 printf 'Copying %s\n' "${file}" + docker cp -a "${base_container}":"/data/${file}" "${BACKUP_DEST}" +done + +# Backup cleanup +# Only keep 30 days of backups, seems about right. +>&2 echo 'Cleaning up old database backups' +find "${BACKUP_DEST}" -name '*sqlbkp*' -type f -mtime +30 -print -delete + +>&2 echo 'Done' diff --git a/scripts/backup-zfs-dataset.sh b/scripts/backup-zfs-dataset.sh index 806d918..92d31ae 100755 --- a/scripts/backup-zfs-dataset.sh +++ b/scripts/backup-zfs-dataset.sh @@ -18,7 +18,7 @@ usage() { while [[ $# -gt 0 ]]; do case "${1}" in -c | --compression_level) - if ! [[ "${2}" =~ [[:digit:]] ]]; then + if ! [[ "${2}" =~ -[[:digit:]] ]]; then >&2 printf "Error: Invalid compression level: '%s'\n" "${2}" usage fi @@ -73,16 +73,12 @@ elif ! [ -d "${destination}" ]; then usage fi -# Set defaults -compression_level="${compression_level:=1}" -max_size="${max_size:=2G}" - # Working snapshots # Find snapshots snapshot_location="/mnt/${dataset}/.zfs/snapshot" -latest_auto="$( find "${snapshot_location}"/* -maxdepth 0 -name 'auto*' -type d | sort -n | tail -n1 | xargs -n1 basename )" -latest_manual="$( find "${snapshot_location}"/* -maxdepth 0 -name 'manual*' -type d | sort -n | tail -n1 | xargs -n1 basename )" +latest_auto="$( find "${snapshot_location}"/* -maxdepth 0 -name 'auto*' -type d | sort -n | tail -n1 | xargs -n1 basename >2 /dev/null )" +latest_manual="$( find "${snapshot_location}"/* -maxdepth 0 -name 'manual*' -type d | sort -n | tail -n1 | xargs -n1 basename >2 /dev/null )" # Check snapshots existance if ! [ -n "${latest_manual}" ]; then @@ -94,7 +90,7 @@ if ! [ -n "${latest_auto}" ]; then exit 2 fi -printf "Latest manual snapshot: %s\nLatest auto snapshot: %s\n" "${latest_manual}" "${latest_auto}" +printf "Latest auto snapshot: %s\nLatest manual snapshot: %s\n" "${latest_auto}" "${latest_manual}" # Abort entire script if anything fails. set -e @@ -103,20 +99,21 @@ set -e # Base backup. output_filename="${destination}/${latest_manual}.gz" -existing_backup="$( find "${destination}" -type f -name "${latest_manual}.gz.part.[a-z][a-z]" -print -quit )" -if [ -z ${existing_backup} ]; then +existing_backup="$( find "${destination}" -type f -name "${output_filename}.part.[a-z][a-z]" -print )" # -quit if you don't need to know every file. +if ! [ ${existing_backup} ]; then printf "Info: If you've manually created a new snapshot, you might want to remove the old backups.\n" printf "Latest manual snapshot was not yet backed up, backing up now.\n" sudo zfs send --verbose "${dataset}@${latest_manual}" \ - | gzip "-${compression_level}" --verbose --rsyncable \ + | gzip "${compression_level:='-1'}" --verbose --rsyncable \ | split - --verbose -b "${max_size}" "${output_filename}.part." printf "Written manual backup to: %s\n" "${output_filename}" elif [ "${force_create_manual_backup}" ]; then + # TODO What if the new backup is smaller than the previous? printf "Removing previous backup files.\n" - find "${destination}" -type f -name "${latest_manual}.gz.part.[a-z][a-z]" -print -delete + rm "${existing_backup}" printf "Backing up manual snapshot.\n" sudo zfs send --verbose "${dataset}@${latest_manual}" \ - | gzip "-${compression_level}" --verbose --rsyncable \ + | gzip "${compression_level:='-1'}" --verbose --rsyncable \ | split - --verbose -b "${max_size}" "${output_filename}.part." printf "Written manual backup to: %s\n" "${output_filename}" else @@ -126,9 +123,9 @@ fi # Incremental incremental backup. printf "Creating incremental backup between %s and %s\n" "${latest_manual}" "${latest_auto}" output_filename="${destination}/${latest_manual}-${latest_auto}.gz" -sudo zfs send --verbose -i "@${latest_manual}" "${dataset}@${latest_auto}" \ - | gzip "-${compression_level}" --verbose \ - | split - --verbose -b "${max_size}" "${output_filename}.part." +sudo zfs send -i "@${latest_manual}" "${dataset}@${latest_auto}" \ + | gzip "${compression_level}" --verbose \ + | split - --verbose -b "${max_size:='2G'}" "${output_filename}.part." printf "Written incremental backup to: %s\n" "${output_filename}" # TODO Cleanup diff --git a/scripts/nextcloud/backup-database.sh b/scripts/nextcloud/backup-database.sh index 6105468..cd4a934 100755 --- a/scripts/nextcloud/backup-database.sh +++ b/scripts/nextcloud/backup-database.sh @@ -25,7 +25,6 @@ while getopts ":e:" option; do ;; esac done -shift $(( OPTIND - 1 )) # Check arguments. @@ -60,11 +59,11 @@ docker exec "${base_container}" php occ maintenance:mode --on # Database backup echo 'Backing up database' host_database_backupfile="${destination}/${database_backupfile}" -docker exec --env-file "${env_file:=.env}" "${database_container}" pg_dump 'nextcloud' -cwv -h 'localhost' -U 'nextcloud' > "${host_database_backupfile}" +docker exec --env-file "${env_file:='./nextcloud.env'}" "${database_container}" pg_dump 'nextcloud' -cwv -h 'localhost' -U 'nextcloud' > "${host_database_backupfile}" # Files backup for file in 'config' 'themes'; do - printf "Copying %s\n" "${file}" + printf 'Copying %s\n' "${file}" docker cp -a "${base_container}":"/var/www/html/${file}" "${destination}" done @@ -73,7 +72,7 @@ docker exec "${base_container}" php occ maintenance:mode --off # Backup cleanup # Only keep 30 days of backups -printf "Clean up old database backups.\n" +printf 'Clean up old database backups' find "${destination}" -name '*sqlbkp*' -type f -mtime +30 -print -delete -printf "Done\n" +printf 'Done' diff --git a/scripts/vaultwarden/backup_database.sh b/scripts/vaultwarden/backup_database.sh deleted file mode 100755 index 0b0c640..0000000 --- a/scripts/vaultwarden/backup_database.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash -# Backup Vaultwarden database in a Kubernetes environment. -# Usage: backup-database [OPTIONS] - -usage() { - >&2 printf "Usage: %s \n" "${0}" - >&2 printf "Options:\n" - >&2 printf "\t-e \t Specify the environment file to use\n" - exit "${1:-1}" -} - -# Get options - -while getopts ":e:" option; do - case "${option}" in - e) - if ! [ -f "${OPTARG}" ]; then - >&2 printf "Error: Specified environment file does not exist: '%s'.\n" "${OPTARG}" - elif ! [ -r "${OPTARG}" ]; then - >&2 printf "Error: Specified environment file is not readable: '%s'.\n" "${OPTARG}" - fi - env_file="${OPTARG}" - ;; - *) - >&2 printf "Error: Invalid option: '%s'.\n" "${option}" - usage - ;; - esac -done -shift $(( OPTIND - 1 )) - -# Check arguments - -if [ $# -ne 1 ]; then - >&2 printf "Error: You need to specify a destination.\n" - usage -elif ! [ -d "${1}" ]; then - >&2 printf "Error: Specified destination does not exist or is not readable : '%s'.\n" "${1}" - usage -else - destination="${1}" -fi - -# Retrieve container names -base_container="$( docker ps --format '{{.Names}}' | grep -E 'vaultwarden-2_vaultwarden-2-[0-9a-z]{10}-[0-9a-z]{5}' )" -database_container="$( docker ps --format '{{.Names}}' | grep postgres_vaultwarden-2-cnpg-main-1 )" - -if ! [[ -n "${base_container}" && -n "${database_container}" ]]; then - >&2 printf "Error: Not all containers could be found.\n" - exit 2 -fi - -# Abort entire script if any command fails -set -e - -# Database backup - -# Filename for database backup -database_backupfile="vaultwarden-sqlbkp_$(date +'%Y%m%d').bak" -host_database_backupfile="${destination}/${database_backupfile}" - -# Create backup file in docker container -echo 'Backing up database' -docker exec --env-file "${env_file:=.env}" "${database_container}" pg_dump 'vaultwarden' -cwv -h 'localhost' -U 'vaultwarden' > "${host_database_backupfile}" - -# Files backup -for file in 'attachments' 'sends' 'rsa_key.pem' 'rsa_key.pub.pem'; do # 'config.json' - printf 'Copying %s\n' "${file}" - docker cp -a "${base_container}":"/data/${file}" "${destination}" -done - -# Backup cleanup -# Only keep 30 days of backups, seems about right. -echo 'Cleaning up old database backups' -find "${destination}" -name '*sqlbkp*' -type f -mtime +30 -print -delete - -echo 'Done'