Add autobackup functionality to CLI
Adds autobackup (integrated, managed VM backups with automatic remote filesystem mounting, included backup expiry/removal and automatic full/incremental selection, independent from the manual "pvc vm backup" commands) to the CLI client. This is a bit of a large command to handle only inside the CLI client, but this was chosen as it's the only real place for it aside from an external script. There are several major restrictions on this command, mainly that it must be run from the primary coordinator using the "local" connection, and that it must be run as "root". The command is designed to run in a cron/systemd timer installed by pvc-ansible when the appropriate group_vars are enabled, and otherwise not touched.
This commit is contained in:
parent
e63d8e59e9
commit
5954feaa31
|
@ -0,0 +1,147 @@
|
|||
---
|
||||
# Root level configuration key
|
||||
autobackup:
|
||||
|
||||
# Backup root path on the node, used as the remote mountpoint
|
||||
# Must be an absolute path beginning with '/'
|
||||
# If remote_mount is enabled, the remote mount will be mounted on this directory
|
||||
# If remote_mount is enabled, it is recommended to use a path under `/tmp` for this
|
||||
# If remote_mount is disabled, a real filesystem must be mounted here (PVC system volumes are small!)
|
||||
backup_root_path: "/tmp/backups"
|
||||
|
||||
# Suffix to the backup root path, used to allow multiple PVC systems to write to a single root path
|
||||
# Must begin with '/'; leave empty to use the backup root path directly
|
||||
# Note that most remote mount options can fake this if needed, but provided to ensure local compatability
|
||||
backup_root_suffix: "/mycluster"
|
||||
|
||||
# VM tag(s) to back up
|
||||
# Only VMs with at least one of the given tag(s) will be backed up; all others will be skipped
|
||||
backup_tags:
|
||||
- "backup"
|
||||
- "mytag"
|
||||
|
||||
# Backup schedule: when and what format to take backups
|
||||
backup_schedule:
|
||||
full_interval: 7 # Number of total backups between full backups; others are incremental
|
||||
# > If this number is 1, every backup will be a full backup and no incremental
|
||||
# backups will be taken
|
||||
# > If this number is 2, every second backup will be a full backup, etc.
|
||||
full_retention: 2 # Keep this many full backups; the oldest will be deleted when a new one is
|
||||
# taken, along with all child incremental backups of that backup
|
||||
# > Should usually be at least 2 when using incrementals (full_interval > 1) to
|
||||
# avoid there being too few backups after cleanup from a new full backup
|
||||
|
||||
# Remote mount settings for backup root path
|
||||
# If remote mount support is disabled, it is up to the administrator to that the backup root path is
|
||||
# created and a valid destination filesystem is mounted on it
|
||||
remote_mount:
|
||||
enabled: no # Enable automatic remote mount/unmount support
|
||||
type: sshfs # Set the type of remote mount; optional if remote_mount is disabled
|
||||
# > Supported values are: sshfs, nfs, cifs (i.e. SMB), cephfs, and s3fs
|
||||
# > WARNING: s3fs has serious known bugs that we don't work around; avoid it if possible
|
||||
|
||||
# Remote mount configurations, per-type; you only need to specify the type(s) you plan to use, but all
|
||||
# are given here for completeness as examples
|
||||
# > NOTE: This key (and all children) are optional if remote mounting is not enabled
|
||||
remote_mount_config:
|
||||
|
||||
# SSHFS specific options
|
||||
# > NOTE: This SSHFS implementation does not support password authentication; keys MUST be used
|
||||
sshfs:
|
||||
# Remote username
|
||||
user: username
|
||||
# Remote hostname
|
||||
host: hostname
|
||||
# Remote path
|
||||
path: /srv/vm_backups
|
||||
# Required command to check for or error
|
||||
command: /usr/bin/sshfs
|
||||
# Options to pass to the mount command (joined, each requires "-o"!)
|
||||
# See the command manual page for more options
|
||||
options:
|
||||
- "-o IdentityFile=/srv/pvc_autobackup.id_ed25519" # Identity (SSH key) file, required!
|
||||
- "-o port=22" # Port number
|
||||
- "-o reconnect" # Enable reconnection
|
||||
- "-o default_permissions" # Enable local permission checking
|
||||
- "-o compression=no" # Disable compression; testing shows that compression slows things
|
||||
# down a fair bit (1m40.435s vs 0m22.253s for 750MB on 10GbE net)
|
||||
- "-o sshfs_sync" # Enable sync; ensures consistent writes with an acceptable performance
|
||||
# overhead (0m22.253s vs 0m17.453s for 750GB on 10GbE net)
|
||||
# Mount command, populated at import time
|
||||
mount_cmd: "{command} {sshfs_user}@{sshfs_host}:{sshfs_path} {backup_root_path} {sshfs_options}"
|
||||
# Unmount command, populated at import time
|
||||
unmount_cmd: "fusermount3 -u {backup_root_path}"
|
||||
|
||||
# NFS specific options
|
||||
nfs:
|
||||
# Remote hostname
|
||||
host: hostname
|
||||
# Remote path
|
||||
path: /srv/vm_backups
|
||||
# Required command to check for or error
|
||||
command: /usr/sbin/mount.nfs
|
||||
# Options to pass to the mount command (joined and passed to "-o")
|
||||
# See the command manual page for more options
|
||||
options:
|
||||
- "nfsvers=3" # Use a specific NFS version
|
||||
# Mount command, populated at import time
|
||||
mount_cmd: "{command} -o {nfs_options} {nfs_host}:{nfs_path} {backup_root_path}"
|
||||
# Unmount command, populated at import time
|
||||
unmount_cmd: "umount {backup_root_path}"
|
||||
|
||||
# CIFS specific options
|
||||
cifs:
|
||||
# Remote hostname
|
||||
host: hostname
|
||||
# Remote path be sure to include the leading '/'!)
|
||||
path: /srv/vm_backups
|
||||
# Required command to check for or error
|
||||
command: /usr/sbin/mount.cifs
|
||||
# Options to pass to the mount command (joined and passed to "-o")
|
||||
# See the command manual page for more options
|
||||
options:
|
||||
- "credentials=/srv/backup_vms.cifs_credentials" # Specify a credentials file
|
||||
- "guest" # Use guest access, alternate to above
|
||||
# Mount command, populated at import time
|
||||
mount_cmd: "{command} -o {cifs_options} //{cifs_host}{cifs_path} {backup_root_path}"
|
||||
# Unmount command, populated at import time
|
||||
unmount_cmd: "umount {backup_root_path}"
|
||||
|
||||
# CephFS specific options
|
||||
cephfs:
|
||||
# Monitor address/hostname list
|
||||
monitors:
|
||||
- mon1
|
||||
# CephFS path; at least "/" is always required
|
||||
path: "/mysubdir"
|
||||
# Required command to check for or error
|
||||
command: /usr/sbin/mount.ceph
|
||||
# Options to pass to mount command (joined and passed to "-o")
|
||||
# See the command manual page for more options
|
||||
options:
|
||||
- "secretfile=/srv/backup_vms.cephfs_secret" # Specify a cephx secret file
|
||||
- "conf=/srv/backup_vms.ceph.conf" # Specify a nonstandard ceph.conf file
|
||||
# Mount command, populated at import time
|
||||
mount_cmd: "{command} {cephfs_monitors}:{cephfs_path} {backup_root_path} -o {cephfs_options}"
|
||||
# Unmount command, populated at import time
|
||||
unmount_cmd: "umount {backup_root_path}"
|
||||
|
||||
# S3FS specific options
|
||||
s3fs:
|
||||
# S3 bucket
|
||||
bucket: mybucket
|
||||
# S3 bucket (sub)path, including leading ':' if used!
|
||||
# Leave empty for no (sub)path
|
||||
path: ":/mypath"
|
||||
# Required command to check for or error
|
||||
command: /usr/bin/s3fs
|
||||
# Options to pass to the mount command (joined, each requires "-o"!)
|
||||
# See the command manual page for more options
|
||||
options:
|
||||
- "-o passwd_file=/srv/backup_vms.s3fs_credentials" # Specify a password file
|
||||
- "-o host=https://s3.amazonaws.com" # Specify an alternate host
|
||||
- "-o endpoint=us-east-1" # Specify an alternate endpoint/region
|
||||
# Mount command, populated at import time
|
||||
mount_cmd: "{command} {s2fs_bucket}{s3fs_path} {backup_root_path} {s3fs_options}"
|
||||
# Unmount command, populated at import time
|
||||
unmount_cmd: "fusermount3 -u {backup_root_path}"
|
|
@ -51,7 +51,9 @@ import click
|
|||
###############################################################################
|
||||
|
||||
|
||||
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"], max_content_width=120)
|
||||
CONTEXT_SETTINGS = dict(
|
||||
help_option_names=["-h", "--help"], max_content_width=MAX_CONTENT_WIDTH
|
||||
)
|
||||
IS_COMPLETION = True if environ.get("_PVC_COMPLETE", "") == "complete" else False
|
||||
|
||||
CLI_CONFIG = dict()
|
||||
|
@ -1734,6 +1736,64 @@ def cli_vm_backup_remove(domain, backup_datestring, backup_path):
|
|||
finish(retcode, retmsg)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# > pvc vm autobackup
|
||||
###############################################################################
|
||||
@click.command(
|
||||
name="autobackup", short_help="Perform automatic virtual machine backups."
|
||||
)
|
||||
@connection_req
|
||||
@click.option(
|
||||
"-f",
|
||||
"--configuration",
|
||||
"autobackup_cfgfile",
|
||||
envvar="PVC_AUTOBACKUP_CFGFILE",
|
||||
default=DEFAULT_AUTOBACKUP_FILENAME,
|
||||
show_default=True,
|
||||
help="Override default config file location.",
|
||||
)
|
||||
@click.option(
|
||||
"--force-full",
|
||||
"force_full_flag",
|
||||
default=False,
|
||||
is_flag=True,
|
||||
help="Force all backups to be full backups this run.",
|
||||
)
|
||||
def cli_vm_autobackup(autobackup_cfgfile, force_full_flag):
|
||||
"""
|
||||
Perform automated backups of VMs, with integrated cleanup and full/incremental scheduling.
|
||||
|
||||
This command enables automatic backup of PVC VMs at the block level, leveraging the various "pvc vm backup"
|
||||
functions with an internal rentention and cleanup system as well as determination of full vs. incremental
|
||||
backups at different intervals. VMs are selected based on configured VM tags. The destination storage
|
||||
may either be local, or provided by a remote filesystem which is automatically mounted and unmounted during
|
||||
the backup run.
|
||||
|
||||
NOTE: This command performs its tasks in a local context. It MUST be run from the cluster's active primary
|
||||
coordinator using the "local" connection only; if either is not correct, the command will error.
|
||||
|
||||
NOTE: This command should be run as the same user as the API daemon, usually "root" with "sudo -E" or in
|
||||
a cronjob as "root", to ensure permissions are correct on the backup files. Failure to do so will still take
|
||||
the backup, but the state update write will likely fail and the backup will become untracked. The command
|
||||
will prompt for confirmation if it is found not to be running as "root" and this cannot be bypassed.
|
||||
|
||||
This command should be run from cron or a timer at a regular interval (e.g. daily, hourly, etc.) which defines
|
||||
how often backups are taken. Backup format (full/incremental) and retention is based only on the number of
|
||||
recorded backups, not on the time interval between them. Backups taken manually outside of the "autobackup"
|
||||
command are not counted towards the format or retention of autobackups.
|
||||
|
||||
The PVC_AUTOBACKUP_CFGFILE envvar or "-f"/"--configuration" option can be used to override the default
|
||||
configuration file path if required by a particular run. For full details of the possible options, please
|
||||
see the example configuration file at "/usr/share/pvc/autobackup.sample.yaml".
|
||||
|
||||
The "--force-full" option can be used to force all configured VMs to perform a "full" level backup this run,
|
||||
which can help synchronize the backups of existing VMs with new ones.
|
||||
"""
|
||||
|
||||
# All work here is done in the helper function for portability; we don't even use "finish"
|
||||
vm_autobackup(CLI_CONFIG, autobackup_cfgfile, force_full_flag)
|
||||
|
||||
|
||||
###############################################################################
|
||||
# > pvc vm tag
|
||||
###############################################################################
|
||||
|
@ -5807,6 +5867,7 @@ cli_vm_backup.add_command(cli_vm_backup_create)
|
|||
cli_vm_backup.add_command(cli_vm_backup_restore)
|
||||
cli_vm_backup.add_command(cli_vm_backup_remove)
|
||||
cli_vm.add_command(cli_vm_backup)
|
||||
cli_vm.add_command(cli_vm_autobackup)
|
||||
cli_vm_tag.add_command(cli_vm_tag_get)
|
||||
cli_vm_tag.add_command(cli_vm_tag_add)
|
||||
cli_vm_tag.add_command(cli_vm_tag_remove)
|
||||
|
|
|
@ -20,25 +20,33 @@
|
|||
###############################################################################
|
||||
|
||||
from click import echo as click_echo
|
||||
from click import progressbar
|
||||
from click import progressbar, confirm
|
||||
from datetime import datetime
|
||||
from distutils.util import strtobool
|
||||
from getpass import getuser
|
||||
from json import load as jload
|
||||
from json import dump as jdump
|
||||
from os import chmod, environ, getpid, path
|
||||
from os import chmod, environ, getpid, path, makedirs
|
||||
from re import findall
|
||||
from socket import gethostname
|
||||
from subprocess import run, PIPE
|
||||
from sys import argv
|
||||
from syslog import syslog, openlog, closelog, LOG_AUTH
|
||||
from time import sleep
|
||||
from yaml import load as yload
|
||||
from yaml import BaseLoader
|
||||
from yaml import BaseLoader, SafeLoader
|
||||
|
||||
import pvc.lib.provisioner
|
||||
import pvc.lib.vm
|
||||
import pvc.lib.node
|
||||
|
||||
|
||||
DEFAULT_STORE_DATA = {"cfgfile": "/etc/pvc/pvcapid.yaml"}
|
||||
DEFAULT_STORE_FILENAME = "pvc.json"
|
||||
DEFAULT_API_PREFIX = "/api/v1"
|
||||
DEFAULT_NODE_HOSTNAME = gethostname().split(".")[0]
|
||||
DEFAULT_AUTOBACKUP_FILENAME = "/etc/pvc/autobackup.yaml"
|
||||
MAX_CONTENT_WIDTH = 120
|
||||
|
||||
|
||||
def echo(config, message, newline=True, stderr=False):
|
||||
|
@ -238,3 +246,405 @@ def wait_for_provisioner(CLI_CONFIG, task_id):
|
|||
retdata = task_status.get("state") + ": " + task_status.get("status")
|
||||
|
||||
return retdata
|
||||
|
||||
|
||||
def get_autobackup_config(CLI_CONFIG, cfgfile):
|
||||
try:
|
||||
config = dict()
|
||||
with open(cfgfile) as fh:
|
||||
backup_config = yload(fh, Loader=SafeLoader)["autobackup"]
|
||||
|
||||
config["backup_root_path"] = backup_config["backup_root_path"]
|
||||
config["backup_root_suffix"] = backup_config["backup_root_suffix"]
|
||||
config["backup_tags"] = backup_config["backup_tags"]
|
||||
config["backup_schedule"] = backup_config["backup_schedule"]
|
||||
config["remote_mount_enabled"] = backup_config["remote_mount"]["enabled"]
|
||||
if config["remote_mount_enabled"]:
|
||||
config["remote_mount_type"] = backup_config["remote_mount"]["type"]
|
||||
else:
|
||||
config["remote_mount_type"] = None
|
||||
|
||||
if config["remote_mount_type"] == "sshfs":
|
||||
config["check_command"] = backup_config["remote_mount_config"]["sshfs"][
|
||||
"command"
|
||||
]
|
||||
config["remote_mount_cmd"] = backup_config["remote_mount_config"]["sshfs"][
|
||||
"mount_cmd"
|
||||
].format(
|
||||
command=backup_config["remote_mount_config"]["sshfs"]["command"],
|
||||
sshfs_user=backup_config["remote_mount_config"]["sshfs"]["user"],
|
||||
sshfs_host=backup_config["remote_mount_config"]["sshfs"]["host"],
|
||||
sshfs_path=backup_config["remote_mount_config"]["sshfs"]["path"],
|
||||
sshfs_options=" ".join(
|
||||
backup_config["remote_mount_config"]["sshfs"]["options"]
|
||||
),
|
||||
backup_root_path=backup_config["backup_root_path"],
|
||||
)
|
||||
config["remote_unmount_cmd"] = backup_config["remote_mount_config"][
|
||||
"sshfs"
|
||||
]["unmount_cmd"].format(
|
||||
backup_root_path=backup_config["backup_root_path"],
|
||||
)
|
||||
elif config["remote_mount_type"] == "nfs":
|
||||
config["check_command"] = backup_config["remote_mount_config"]["nfs"][
|
||||
"command"
|
||||
]
|
||||
config["remote_mount_cmd"] = backup_config["remote_mount_config"]["nfs"][
|
||||
"mount_cmd"
|
||||
].format(
|
||||
command=backup_config["remote_mount_config"]["nfs"]["command"],
|
||||
nfs_host=backup_config["remote_mount_config"]["nfs"]["host"],
|
||||
nfs_path=backup_config["remote_mount_config"]["nfs"]["path"],
|
||||
nfs_options=",".join(
|
||||
backup_config["remote_mount_config"]["nfs"]["options"]
|
||||
),
|
||||
backup_root_path=backup_config["backup_root_path"],
|
||||
)
|
||||
config["remote_unmount_cmd"] = backup_config["remote_mount_config"]["nfs"][
|
||||
"unmount_cmd"
|
||||
].format(
|
||||
backup_root_path=backup_config["backup_root_path"],
|
||||
)
|
||||
elif config["remote_mount_type"] == "cifs":
|
||||
config["check_command"] = backup_config["remote_mount_config"]["cifs"][
|
||||
"command"
|
||||
]
|
||||
config["remote_mount_cmd"] = backup_config["remote_mount_config"]["cifs"][
|
||||
"mount_cmd"
|
||||
].format(
|
||||
command=backup_config["remote_mount_config"]["cifs"]["command"],
|
||||
cifs_host=backup_config["remote_mount_config"]["cifs"]["host"],
|
||||
cifs_path=backup_config["remote_mount_config"]["cifs"]["path"],
|
||||
cifs_options=",".join(
|
||||
backup_config["remote_mount_config"]["cifs"]["options"]
|
||||
),
|
||||
backup_root_path=backup_config["backup_root_path"],
|
||||
)
|
||||
config["remote_unmount_cmd"] = backup_config["remote_mount_config"]["cifs"][
|
||||
"unmount_cmd"
|
||||
].format(
|
||||
backup_root_path=backup_config["backup_root_path"],
|
||||
)
|
||||
elif config["remote_mount_type"] == "s3fs":
|
||||
config["check_command"] = backup_config["remote_mount_config"]["s3fs"][
|
||||
"command"
|
||||
]
|
||||
config["remote_mount_cmd"] = backup_config["remote_mount_config"]["s3fs"][
|
||||
"mount_cmd"
|
||||
].format(
|
||||
command=backup_config["remote_mount_config"]["s3fs"]["command"],
|
||||
s2fs_bucket=backup_config["remote_mount_config"]["s3fs"]["bucket"],
|
||||
s2fs_path=backup_config["remote_mount_config"]["s3fs"]["path"],
|
||||
s3fs_options=" ".join(
|
||||
backup_config["remote_mount_config"]["s3fs"]["options"]
|
||||
),
|
||||
backup_root_path=backup_config["backup_root_path"],
|
||||
)
|
||||
config["remote_unmount_cmd"] = backup_config["remote_mount_config"]["s3fs"][
|
||||
"unmount_cmd"
|
||||
].format(
|
||||
backup_root_path=backup_config["backup_root_path"],
|
||||
)
|
||||
elif config["remote_mount_type"] == "cephfs":
|
||||
config["check_command"] = backup_config["remote_mount_config"]["cephfs"][
|
||||
"command"
|
||||
]
|
||||
config["remote_mount_cmd"] = backup_config["remote_mount_config"]["cephfs"][
|
||||
"mount_cmd"
|
||||
].format(
|
||||
command=backup_config["remote_mount_config"]["cephfs"]["command"],
|
||||
cephfs_monitors=",".join(
|
||||
backup_config["remote_mount_config"]["cephfs"]["monitors"]
|
||||
),
|
||||
cephfs_path=backup_config["remote_mount_config"]["cephfs"]["path"],
|
||||
cephfs_options=",".join(
|
||||
backup_config["remote_mount_config"]["cephfs"]["options"]
|
||||
),
|
||||
backup_root_path=backup_config["backup_root_path"],
|
||||
)
|
||||
config["remote_unmount_cmd"] = backup_config["remote_mount_config"][
|
||||
"cephfs"
|
||||
]["unmount_cmd"].format(
|
||||
backup_root_path=backup_config["backup_root_path"],
|
||||
)
|
||||
else:
|
||||
config["remote_mount_cmd"] = None
|
||||
config["remote_unmount_cmd"] = None
|
||||
|
||||
except FileNotFoundError:
|
||||
echo(CLI_CONFIG, "ERROR: Specified backup configuration does not exist!")
|
||||
exit(1)
|
||||
except KeyError as e:
|
||||
echo(CLI_CONFIG, f"ERROR: Backup configuration is invalid: {e}")
|
||||
exit(1)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def vm_autobackup(
|
||||
CLI_CONFIG, autobackup_cfgfile=DEFAULT_AUTOBACKUP_FILENAME, force_full_flag=False
|
||||
):
|
||||
"""
|
||||
Perform automatic backups of VMs based on an external config file.
|
||||
"""
|
||||
|
||||
# Validate that we are running on the current primary coordinator of the 'local' cluster connection
|
||||
real_connection = CLI_CONFIG["connection"]
|
||||
CLI_CONFIG["connection"] = "local"
|
||||
retcode, retdata = pvc.lib.node.node_info(CLI_CONFIG, DEFAULT_NODE_HOSTNAME)
|
||||
if not retcode or retdata.get("coordinator_state") != "primary":
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
f"ERROR: Current host is not the primary coordinator of the local cluster; got connection '{real_connection}', host '{DEFAULT_NODE_HOSTNAME}'.",
|
||||
)
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
"Autobackup MUST be run from the cluster active primary coordinator using the 'local' connection. See '-h'/'--help' for details.",
|
||||
)
|
||||
exit(1)
|
||||
|
||||
# Ensure we're running as root, or show a warning & confirmation
|
||||
if getuser() != "root":
|
||||
confirm(
|
||||
"WARNING: You are not running this command as 'root'. This command should be run under the same user as the API daemon, which is usually 'root'. Are you sure you want to continue?",
|
||||
prompt_suffix=" ",
|
||||
abort=True,
|
||||
)
|
||||
|
||||
# Load our YAML config
|
||||
autobackup_config = get_autobackup_config(CLI_CONFIG, autobackup_cfgfile)
|
||||
|
||||
# Get a list of all VMs on the cluster
|
||||
# We don't do tag filtering here, because we could match an arbitrary number of tags; instead, we
|
||||
# parse the list after
|
||||
retcode, retdata = pvc.lib.vm.vm_list(CLI_CONFIG, None, None, None, None, None)
|
||||
if not retcode:
|
||||
echo(CLI_CONFIG, f"ERROR: Failed to fetch VM list: {retdata}")
|
||||
exit(1)
|
||||
cluster_vms = retdata
|
||||
|
||||
# Parse the list to match tags; too complex for list comprehension alas
|
||||
backup_vms = list()
|
||||
for vm in cluster_vms:
|
||||
vm_tag_names = [t["name"] for t in vm["tags"]]
|
||||
matching_tags = (
|
||||
True
|
||||
if len(
|
||||
set(vm_tag_names).intersection(set(autobackup_config["backup_tags"]))
|
||||
)
|
||||
> 0
|
||||
else False
|
||||
)
|
||||
if matching_tags:
|
||||
backup_vms.append(vm["name"])
|
||||
|
||||
if len(backup_vms) < 1:
|
||||
echo(CLI_CONFIG, "Found no suitable VMs for autobackup.")
|
||||
exit(0)
|
||||
|
||||
# Pretty print the names of the VMs we'll back up (to stderr)
|
||||
maxnamelen = max([len(n) for n in backup_vms]) + 2
|
||||
cols = 1
|
||||
while (cols * maxnamelen + maxnamelen + 2) <= MAX_CONTENT_WIDTH:
|
||||
cols += 1
|
||||
rows = len(backup_vms) // cols
|
||||
vm_list_rows = list()
|
||||
for row in range(0, rows + 1):
|
||||
row_start = row * cols
|
||||
row_end = (row * cols) + cols
|
||||
row_str = ""
|
||||
for x in range(row_start, row_end):
|
||||
if x < len(backup_vms):
|
||||
row_str += "{:<{}}".format(backup_vms[x], maxnamelen)
|
||||
vm_list_rows.append(row_str)
|
||||
|
||||
echo(CLI_CONFIG, f"Found {len(backup_vms)} suitable VM(s) for autobackup.")
|
||||
echo(CLI_CONFIG, "Full VM list:", stderr=True)
|
||||
echo(CLI_CONFIG, " {}".format("\n ".join(vm_list_rows)), stderr=True)
|
||||
echo(CLI_CONFIG, "", stderr=True)
|
||||
|
||||
if autobackup_config["remote_mount_cmd"] is not None:
|
||||
# Validate that the mount command is valid
|
||||
if not path.exists(autobackup_config["check_command"]):
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
f"ERROR: Failed to find required command {autobackup_config['check_command']}; ensure it is installed.",
|
||||
)
|
||||
exit(1)
|
||||
|
||||
# Try to mount the remote mount
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
f"Mounting remote {autobackup_config['remote_mount_type']} filesystem on {autobackup_config['backup_root_path']}... ",
|
||||
newline=False,
|
||||
)
|
||||
tstart = datetime.now()
|
||||
ret = run(
|
||||
autobackup_config["remote_mount_cmd"].split(),
|
||||
stdout=PIPE,
|
||||
stderr=PIPE,
|
||||
)
|
||||
tend = datetime.now()
|
||||
ttot = tend - tstart
|
||||
if ret.returncode != 0:
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
f"failed. [{ttot.seconds}s]",
|
||||
)
|
||||
echo(CLI_CONFIG, f"Exiting; command reports: {ret.stderr.decode().strip()}")
|
||||
exit(1)
|
||||
else:
|
||||
echo(CLI_CONFIG, f"done. [{ttot.seconds}s]")
|
||||
|
||||
# For each VM, perform the backup
|
||||
for vm in backup_vms:
|
||||
backup_suffixed_path = f"{autobackup_config['backup_root_path']}{autobackup_config['backup_root_suffix']}"
|
||||
if not path.exists(backup_suffixed_path):
|
||||
makedirs(backup_suffixed_path)
|
||||
|
||||
backup_path = f"{backup_suffixed_path}/{vm}"
|
||||
autobackup_state_file = f"{backup_path}/.autobackup.json"
|
||||
if not path.exists(backup_path) or not path.exists(autobackup_state_file):
|
||||
# There are no new backups so the list is empty
|
||||
state_data = dict()
|
||||
tracked_backups = list()
|
||||
else:
|
||||
with open(autobackup_state_file) as fh:
|
||||
state_data = jload(fh)
|
||||
tracked_backups = state_data["tracked_backups"]
|
||||
|
||||
full_interval = autobackup_config["backup_schedule"]["full_interval"]
|
||||
full_retention = autobackup_config["backup_schedule"]["full_retention"]
|
||||
|
||||
full_backups = [b for b in tracked_backups if b["type"] == "full"]
|
||||
if len(full_backups) > 0:
|
||||
last_full_backup = full_backups[0]
|
||||
last_full_backup_idx = tracked_backups.index(last_full_backup)
|
||||
if force_full_flag:
|
||||
this_backup_type = "forced-full"
|
||||
this_backup_incremental_parent = None
|
||||
this_backup_retain_snapshot = True
|
||||
elif last_full_backup_idx >= full_interval - 1:
|
||||
this_backup_type = "full"
|
||||
this_backup_incremental_parent = None
|
||||
this_backup_retain_snapshot = True
|
||||
else:
|
||||
this_backup_type = "incremental"
|
||||
this_backup_incremental_parent = last_full_backup["datestring"]
|
||||
this_backup_retain_snapshot = False
|
||||
else:
|
||||
# The very first backup must be full to start the tree
|
||||
this_backup_type = "full"
|
||||
this_backup_incremental_parent = None
|
||||
this_backup_retain_snapshot = True
|
||||
|
||||
# Perform the backup
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
f"Backing up VM '{vm}' ({this_backup_type})... ",
|
||||
newline=False,
|
||||
)
|
||||
tstart = datetime.now()
|
||||
retcode, retdata = pvc.lib.vm.vm_backup(
|
||||
CLI_CONFIG,
|
||||
vm,
|
||||
backup_suffixed_path,
|
||||
incremental_parent=this_backup_incremental_parent,
|
||||
retain_snapshot=this_backup_retain_snapshot,
|
||||
)
|
||||
tend = datetime.now()
|
||||
ttot = tend - tstart
|
||||
if not retcode:
|
||||
echo(CLI_CONFIG, f"failed. [{ttot.seconds}s]")
|
||||
echo(CLI_CONFIG, f"Skipping cleanups; command reports: {retdata}")
|
||||
continue
|
||||
else:
|
||||
backup_datestring = findall(r"[0-9]{14}", retdata)[0]
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
f"done. Backup '{backup_datestring}' created. [{ttot.seconds}s]",
|
||||
)
|
||||
|
||||
# Read backup file to get details
|
||||
backup_json_file = f"{backup_path}/{backup_datestring}/pvcbackup.json"
|
||||
with open(backup_json_file) as fh:
|
||||
backup_json = jload(fh)
|
||||
backup = {
|
||||
"datestring": backup_json["datestring"],
|
||||
"type": backup_json["type"],
|
||||
"parent": backup_json["incremental_parent"],
|
||||
"retained_snapshot": backup_json["retained_snapshot"],
|
||||
}
|
||||
tracked_backups.insert(0, backup)
|
||||
|
||||
# Delete any full backups that are expired
|
||||
marked_for_deletion = list()
|
||||
found_full_count = 0
|
||||
for backup in tracked_backups:
|
||||
if backup["type"] == "full":
|
||||
found_full_count += 1
|
||||
if found_full_count > full_retention:
|
||||
marked_for_deletion.append(backup)
|
||||
|
||||
# Depete any incremental backups that depend on marked parents
|
||||
for backup in tracked_backups:
|
||||
if backup["type"] == "incremental" and backup["parent"] in [
|
||||
b["datestring"] for b in marked_for_deletion
|
||||
]:
|
||||
marked_for_deletion.append(backup)
|
||||
|
||||
# Execute deletes
|
||||
for backup_to_delete in marked_for_deletion:
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
f"Removing old VM '{vm}' backup '{backup_to_delete['datestring']}' ({backup_to_delete['type']})... ",
|
||||
newline=False,
|
||||
)
|
||||
tstart = datetime.now()
|
||||
retcode, retdata = pvc.lib.vm.vm_remove_backup(
|
||||
CLI_CONFIG,
|
||||
vm,
|
||||
backup_suffixed_path,
|
||||
backup_to_delete["datestring"],
|
||||
)
|
||||
tend = datetime.now()
|
||||
ttot = tend - tstart
|
||||
if not retcode:
|
||||
echo(CLI_CONFIG, f"failed. [{ttot.seconds}s]")
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
f"Skipping removal from tracked backups; command reports: {retdata}",
|
||||
)
|
||||
continue
|
||||
else:
|
||||
tracked_backups.remove(backup_to_delete)
|
||||
echo(CLI_CONFIG, f"done. [{ttot.seconds}s]")
|
||||
|
||||
# Update tracked state information
|
||||
state_data["tracked_backups"] = tracked_backups
|
||||
with open(autobackup_state_file, "w") as fh:
|
||||
jdump(state_data, fh)
|
||||
|
||||
# Try to unmount the remote mount
|
||||
if autobackup_config["remote_unmount_cmd"] is not None:
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
f"Unmounting remote {autobackup_config['remote_mount_type']} filesystem from {autobackup_config['backup_root_path']}... ",
|
||||
newline=False,
|
||||
)
|
||||
tstart = datetime.now()
|
||||
ret = run(
|
||||
autobackup_config["remote_unmount_cmd"].split(),
|
||||
stdout=PIPE,
|
||||
stderr=PIPE,
|
||||
)
|
||||
tend = datetime.now()
|
||||
ttot = tend - tstart
|
||||
if ret.returncode != 0:
|
||||
echo(CLI_CONFIG, f"failed. [{ttot.seconds}s]")
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
f"Continuing; command reports: {ret.stderr.decode().strip()}",
|
||||
)
|
||||
else:
|
||||
echo(CLI_CONFIG, f"done. [{ttot.seconds}s]")
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
client-cli/autobackup.sample.yaml usr/share/pvc
|
Loading…
Reference in New Issue