Use consistent path name and format

This commit is contained in:
Joshua Boniface 2023-10-24 01:20:44 -04:00
parent 63d0a85e29
commit c87736eb0a
5 changed files with 186 additions and 110 deletions

View File

@ -2298,7 +2298,7 @@ class API_VM_Backup(Resource):
@RequestParser( @RequestParser(
[ [
{ {
"name": "target_path", "name": "backup_path",
"required": True, "required": True,
"helptext": "A local filesystem path on the primary coordinator must be specified", "helptext": "A local filesystem path on the primary coordinator must be specified",
}, },
@ -2321,7 +2321,7 @@ class API_VM_Backup(Resource):
- vm - vm
parameters: parameters:
- in: query - in: query
name: target_path name: backup_path
type: string type: string
required: true required: true
description: A local filesystem path on the primary coordinator to store the backup description: A local filesystem path on the primary coordinator to store the backup
@ -2353,17 +2353,17 @@ class API_VM_Backup(Resource):
type: object type: object
id: Message id: Message
""" """
target_path = reqargs.get("target_path", None) backup_path = reqargs.get("backup_path", None)
incremental_parent = reqargs.get("incremental_parent", None) incremental_parent = reqargs.get("incremental_parent", None)
retain_snapshot = bool(strtobool(reqargs.get("retain_snapshot", "false"))) retain_snapshot = bool(strtobool(reqargs.get("retain_snapshot", "false")))
return api_helper.vm_backup( return api_helper.vm_backup(
vm, target_path, incremental_parent, retain_snapshot vm, backup_path, incremental_parent, retain_snapshot
) )
@RequestParser( @RequestParser(
[ [
{ {
"name": "target_path", "name": "backup_path",
"required": True, "required": True,
"helptext": "A local filesystem path on the primary coordinator must be specified", "helptext": "A local filesystem path on the primary coordinator must be specified",
}, },
@ -2383,7 +2383,7 @@ class API_VM_Backup(Resource):
- vm - vm
parameters: parameters:
- in: query - in: query
name: target_path name: backup_path
type: string type: string
required: true required: true
description: A local filesystem path on the primary coordinator where the backup is stored description: A local filesystem path on the primary coordinator where the backup is stored
@ -2409,11 +2409,9 @@ class API_VM_Backup(Resource):
type: object type: object
id: Message id: Message
""" """
target_path = reqargs.get("target_path", None) backup_path = reqargs.get("backup_path", None)
backup_datestring = reqargs.get("backup_datestring", None) backup_datestring = reqargs.get("backup_datestring", None)
return api_helper.vm_remove_backup( return api_helper.vm_remove_backup(vm, backup_path, backup_datestring)
vm, target_path, backup_datestring
)
api.add_resource(API_VM_Backup, "/vm/<vm>/backup") api.add_resource(API_VM_Backup, "/vm/<vm>/backup")
@ -2424,7 +2422,7 @@ class API_VM_Restore(Resource):
@RequestParser( @RequestParser(
[ [
{ {
"name": "target_path", "name": "backup_path",
"required": True, "required": True,
"helptext": "A local filesystem path on the primary coordinator must be specified", "helptext": "A local filesystem path on the primary coordinator must be specified",
}, },
@ -2448,7 +2446,7 @@ class API_VM_Restore(Resource):
- vm - vm
parameters: parameters:
- in: query - in: query
name: target_path name: backup_path
type: string type: string
required: true required: true
description: A local filesystem path on the primary coordinator where the backup is stored description: A local filesystem path on the primary coordinator where the backup is stored
@ -2480,11 +2478,11 @@ class API_VM_Restore(Resource):
type: object type: object
id: Message id: Message
""" """
target_path = reqargs.get("target_path", None) backup_path = reqargs.get("backup_path", None)
backup_datestring = reqargs.get("backup_datestring", None) backup_datestring = reqargs.get("backup_datestring", None)
retain_snapshot = bool(strtobool(reqargs.get("retain_snapshot", "true"))) retain_snapshot = bool(strtobool(reqargs.get("retain_snapshot", "true")))
return api_helper.vm_restore( return api_helper.vm_restore(
vm, target_path, backup_datestring, retain_snapshot vm, backup_path, backup_datestring, retain_snapshot
) )

View File

@ -474,7 +474,7 @@ def vm_define(
def vm_backup( def vm_backup(
zkhandler, zkhandler,
domain, domain,
target_path, backup_path,
incremental_parent=None, incremental_parent=None,
retain_snapshot=False, retain_snapshot=False,
): ):
@ -484,7 +484,7 @@ def vm_backup(
retflag, retdata = pvc_vm.backup_vm( retflag, retdata = pvc_vm.backup_vm(
zkhandler, zkhandler,
domain, domain,
target_path, backup_path,
incremental_parent, incremental_parent,
retain_snapshot, retain_snapshot,
) )
@ -528,7 +528,7 @@ def vm_remove_backup(
def vm_restore( def vm_restore(
zkhandler, zkhandler,
domain, domain,
target_path, backup_path,
datestring, datestring,
retain_snapshot=False, retain_snapshot=False,
): ):
@ -538,7 +538,7 @@ def vm_restore(
retflag, retdata = pvc_vm.restore_vm( retflag, retdata = pvc_vm.restore_vm(
zkhandler, zkhandler,
domain, domain,
target_path, backup_path,
datestring, datestring,
retain_snapshot, retain_snapshot,
) )

View File

@ -1611,7 +1611,7 @@ def cli_vm_backup():
@click.command(name="create", short_help="Create a backup of a virtual machine.") @click.command(name="create", short_help="Create a backup of a virtual machine.")
@connection_req @connection_req
@click.argument("domain") @click.argument("domain")
@click.argument("target_path") @click.argument("backup_path")
@click.option( @click.option(
"-i", "-i",
"--incremental", "--incremental",
@ -1627,11 +1627,11 @@ def cli_vm_backup():
default=False, default=False,
help="Retain volume snapshot for future incremental use (full only).", help="Retain volume snapshot for future incremental use (full only).",
) )
def cli_vm_backup_create(domain, target_path, incremental_parent, retain_snapshot): def cli_vm_backup_create(domain, backup_path, incremental_parent, retain_snapshot):
""" """
Create a backup of virtual machine DOMAIN to TARGET_PATH on the cluster primary coordinator. DOMAIN may be a UUID or name. Create a backup of virtual machine DOMAIN to BACKUP_PATH on the cluster primary coordinator. DOMAIN may be a UUID or name.
TARGET_PATH must be a valid absolute directory path on the cluster "primary" coordinator (see "pvc node list") allowing writes from the API daemon (normally running as "root"). The TARGET_PATH should be a large storage volume, ideally a remotely mounted filesystem (e.g. NFS, SSHFS, etc.) or non-Ceph-backed disk; PVC does not handle this path, that is up to the administrator to configure and manage. BACKUP_PATH must be a valid absolute directory path on the cluster "primary" coordinator (see "pvc node list") allowing writes from the API daemon (normally running as "root"). The BACKUP_PATH should be a large storage volume, ideally a remotely mounted filesystem (e.g. NFS, SSHFS, etc.) or non-Ceph-backed disk; PVC does not handle this path, that is up to the administrator to configure and manage.
The backup will export the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes, using a datestring formatted backup name (i.e. YYYYMMDDHHMMSS). The backup will export the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes, using a datestring formatted backup name (i.e. YYYYMMDDHHMMSS).
@ -1639,7 +1639,7 @@ def cli_vm_backup_create(domain, target_path, incremental_parent, retain_snapsho
Incremental snapshots are possible by specifying the "-i"/"--incremental" option along with a source backup datestring. The snapshots from that source backup must have been retained using the "-r"/"--retain-snapshots" option. Retaining snapshots of incremental backups is not supported as incremental backups cannot be chained. Incremental snapshots are possible by specifying the "-i"/"--incremental" option along with a source backup datestring. The snapshots from that source backup must have been retained using the "-r"/"--retain-snapshots" option. Retaining snapshots of incremental backups is not supported as incremental backups cannot be chained.
Full backup volume images are sparse-allocated, however it is recommended for safety to consider their maximum allocated size when allocated space for the TARGET_PATH. Incremental volume images are generally small but are dependent entirely on the rate of data change in each volume. Full backup volume images are sparse-allocated, however it is recommended for safety to consider their maximum allocated size when allocated space for the BACKUP_PATH. Incremental volume images are generally small but are dependent entirely on the rate of data change in each volume.
""" """
echo( echo(
@ -1648,7 +1648,7 @@ def cli_vm_backup_create(domain, target_path, incremental_parent, retain_snapsho
newline=False, newline=False,
) )
retcode, retmsg = pvc.lib.vm.vm_backup( retcode, retmsg = pvc.lib.vm.vm_backup(
CLI_CONFIG, domain, target_path, incremental_parent, retain_snapshot CLI_CONFIG, domain, backup_path, incremental_parent, retain_snapshot
) )
if retcode: if retcode:
echo(CLI_CONFIG, "done.") echo(CLI_CONFIG, "done.")
@ -1664,7 +1664,7 @@ def cli_vm_backup_create(domain, target_path, incremental_parent, retain_snapsho
@connection_req @connection_req
@click.argument("domain") @click.argument("domain")
@click.argument("backup_datestring") @click.argument("backup_datestring")
@click.argument("target_path") @click.argument("backup_path")
@click.option( @click.option(
"-r/-R", "-r/-R",
"--retain-snapshot/--remove-snapshot", "--retain-snapshot/--remove-snapshot",
@ -1673,11 +1673,11 @@ def cli_vm_backup_create(domain, target_path, incremental_parent, retain_snapsho
default=True, default=True,
help="Retain or remove restored (parent, if incremental) snapshot.", help="Retain or remove restored (parent, if incremental) snapshot.",
) )
def cli_vm_backup_restore(domain, backup_datestring, target_path, retain_snapshot): def cli_vm_backup_restore(domain, backup_datestring, backup_path, retain_snapshot):
""" """
Restore the backup BACKUP_DATESTRING of virtual machine DOMAIN stored in TARGET_PATH on the cluster primary coordinator. DOMAIN may be a UUID or name. Restore the backup BACKUP_DATESTRING of virtual machine DOMAIN stored in BACKUP_PATH on the cluster primary coordinator. DOMAIN may be a UUID or name.
TARGET_PATH must be a valid absolute directory path on the cluster "primary" coordinator (see "pvc node list") allowing reads from the API daemon (normally running as "root"). The TARGET_PATH should be a large storage volume, ideally a remotely mounted filesystem (e.g. NFS, SSHFS, etc.) or non-Ceph-backed disk; PVC does not handle this path, that is up to the administrator to configure and manage. BACKUP_PATH must be a valid absolute directory path on the cluster "primary" coordinator (see "pvc node list") allowing reads from the API daemon (normally running as "root"). The BACKUP_PATH should be a large storage volume, ideally a remotely mounted filesystem (e.g. NFS, SSHFS, etc.) or non-Ceph-backed disk; PVC does not handle this path, that is up to the administrator to configure and manage.
The restore will import the VM configuration, metainfo, and the point-in-time snapshot of all attached RBD volumes. Incremental backups will be automatically handled. The restore will import the VM configuration, metainfo, and the point-in-time snapshot of all attached RBD volumes. Incremental backups will be automatically handled.
@ -1694,7 +1694,7 @@ def cli_vm_backup_restore(domain, backup_datestring, target_path, retain_snapsho
newline=False, newline=False,
) )
retcode, retmsg = pvc.lib.vm.vm_restore( retcode, retmsg = pvc.lib.vm.vm_restore(
CLI_CONFIG, domain, target_path, backup_datestring, retain_snapshot CLI_CONFIG, domain, backup_path, backup_datestring, retain_snapshot
) )
if retcode: if retcode:
echo(CLI_CONFIG, "done.") echo(CLI_CONFIG, "done.")
@ -1710,10 +1710,10 @@ def cli_vm_backup_restore(domain, backup_datestring, target_path, retain_snapsho
@connection_req @connection_req
@click.argument("domain") @click.argument("domain")
@click.argument("backup_datestring") @click.argument("backup_datestring")
@click.argument("target_path") @click.argument("backup_path")
def cli_vm_backup_remove(domain, backup_datestring, target_path): def cli_vm_backup_remove(domain, backup_datestring, backup_path):
""" """
Remove the backup BACKUP_DATESTRING, including snapshots, of virtual machine DOMAIN stored in TARGET_PATH on the cluster primary coordinator. DOMAIN may be a UUID or name. Remove the backup BACKUP_DATESTRING, including snapshots, of virtual machine DOMAIN stored in BACKUP_PATH on the cluster primary coordinator. DOMAIN may be a UUID or name.
WARNING: Removing an incremental parent will invalidate any existing incremental backups based on that backup. WARNING: Removing an incremental parent will invalidate any existing incremental backups based on that backup.
""" """
@ -1724,7 +1724,7 @@ def cli_vm_backup_remove(domain, backup_datestring, target_path):
newline=False, newline=False,
) )
retcode, retmsg = pvc.lib.vm.vm_remove_backup( retcode, retmsg = pvc.lib.vm.vm_remove_backup(
CLI_CONFIG, domain, target_path, backup_datestring CLI_CONFIG, domain, backup_path, backup_datestring
) )
if retcode: if retcode:
echo(CLI_CONFIG, "done.") echo(CLI_CONFIG, "done.")

View File

@ -433,16 +433,16 @@ def vm_locks(config, vm):
return retstatus, response.json().get("message", "") return retstatus, response.json().get("message", "")
def vm_backup(config, vm, target_path, incremental_parent=None, retain_snapshot=False): def vm_backup(config, vm, backup_path, incremental_parent=None, retain_snapshot=False):
""" """
Create a backup of {vm} and its volumes to a local primary coordinator filesystem path Create a backup of {vm} and its volumes to a local primary coordinator filesystem path
API endpoint: POST /vm/{vm}/backup API endpoint: POST /vm/{vm}/backup
API arguments: target_path={target_path}, incremental_parent={incremental_parent}, retain_snapshot={retain_snapshot} API arguments: backup_path={backup_path}, incremental_parent={incremental_parent}, retain_snapshot={retain_snapshot}
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
""" """
params = { params = {
"target_path": target_path, "backup_path": backup_path,
"incremental_parent": incremental_parent, "incremental_parent": incremental_parent,
"retain_snapshot": retain_snapshot, "retain_snapshot": retain_snapshot,
} }
@ -454,19 +454,21 @@ def vm_backup(config, vm, target_path, incremental_parent=None, retain_snapshot=
return True, response.json().get("message", "") return True, response.json().get("message", "")
def vm_remove_backup(config, vm, target_path, backup_datestring): def vm_remove_backup(config, vm, backup_path, backup_datestring):
""" """
Remove a backup of {vm}, including snapshots, from a local primary coordinator filesystem path Remove a backup of {vm}, including snapshots, from a local primary coordinator filesystem path
API endpoint: DELETE /vm/{vm}/backup API endpoint: DELETE /vm/{vm}/backup
API arguments: target_path={target_path}, backup_datestring={backup_datestring} API arguments: backup_path={backup_path}, backup_datestring={backup_datestring}
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
""" """
params = { params = {
"target_path": target_path, "backup_path": backup_path,
"backup_datestring": backup_datestring, "backup_datestring": backup_datestring,
} }
response = call_api(config, "delete", "/vm/{vm}/backup".format(vm=vm), params=params) response = call_api(
config, "delete", "/vm/{vm}/backup".format(vm=vm), params=params
)
if response.status_code != 200: if response.status_code != 200:
return False, response.json().get("message", "") return False, response.json().get("message", "")
@ -474,16 +476,16 @@ def vm_remove_backup(config, vm, target_path, backup_datestring):
return True, response.json().get("message", "") return True, response.json().get("message", "")
def vm_restore(config, vm, target_path, backup_datestring, retain_snapshot=False): def vm_restore(config, vm, backup_path, backup_datestring, retain_snapshot=False):
""" """
Restore a backup of {vm} and its volumes from a local primary coordinator filesystem path Restore a backup of {vm} and its volumes from a local primary coordinator filesystem path
API endpoint: POST /vm/{vm}/restore API endpoint: POST /vm/{vm}/restore
API arguments: target_path={target_path}, backup_datestring={backup_datestring}, retain_snapshot={retain_snapshot} API arguments: backup_path={backup_path}, backup_datestring={backup_datestring}, retain_snapshot={retain_snapshot}
API schema: {"message":"{data}"} API schema: {"message":"{data}"}
""" """
params = { params = {
"target_path": target_path, "backup_path": backup_path,
"backup_datestring": backup_datestring, "backup_datestring": backup_datestring,
"retain_snapshot": retain_snapshot, "retain_snapshot": retain_snapshot,
} }

View File

@ -1309,7 +1309,7 @@ def get_list(
def backup_vm( def backup_vm(
zkhandler, domain, target_path, incremental_parent=None, retain_snapshot=False zkhandler, domain, backup_path, incremental_parent=None, retain_snapshot=False
): ):
tstart = time.time() tstart = time.time()
@ -1317,7 +1317,10 @@ def backup_vm(
# 0. Validations # 0. Validations
# Disallow retaining snapshots with an incremental parent # Disallow retaining snapshots with an incremental parent
if incremental_parent is not None and retain_snapshot: if incremental_parent is not None and retain_snapshot:
return False, 'ERROR: Retaining snapshots of incremental backups is not supported!' return (
False,
"ERROR: Retaining snapshots of incremental backups is not supported!",
)
# Validate that VM exists in cluster # Validate that VM exists in cluster
dom_uuid = getDomainUUID(zkhandler, domain) dom_uuid = getDomainUUID(zkhandler, domain)
@ -1325,15 +1328,15 @@ def backup_vm(
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain) return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
# Validate that the target path is valid # Validate that the target path is valid
if not re.match(r"^/", target_path): if not re.match(r"^/", backup_path):
return ( return (
False, False,
f"ERROR: Target path {target_path} is not a valid absolute path on the primary coordinator!", f"ERROR: Target path {backup_path} is not a valid absolute path on the primary coordinator!",
) )
# Ensure that target_path (on this node) exists # Ensure that backup_path (on this node) exists
if not os.path.isdir(target_path): if not os.path.isdir(backup_path):
return False, f"ERROR: Target path {target_path} does not exist!" return False, f"ERROR: Target path {backup_path} does not exist!"
# 1. Get information about VM # 1. Get information about VM
vm_detail = get_list(zkhandler, limit=dom_uuid, is_fuzzy=False)[1][0] vm_detail = get_list(zkhandler, limit=dom_uuid, is_fuzzy=False)[1][0]
@ -1345,7 +1348,7 @@ def backup_vm(
if disk["type"] != "rbd": if disk["type"] != "rbd":
continue continue
pool, volume = disk["name"].split('/') pool, volume = disk["name"].split("/")
retcode, retdata = ceph.get_list_volume(zkhandler, pool, volume, is_fuzzy=False) retcode, retdata = ceph.get_list_volume(zkhandler, pool, volume, is_fuzzy=False)
if not retcode or len(retdata) != 1: if not retcode or len(retdata) != 1:
@ -1353,7 +1356,10 @@ def backup_vm(
retdata = "No volumes returned." retdata = "No volumes returned."
elif len(retdata) > 1: elif len(retdata) > 1:
retdata = "Multiple volumes returned." retdata = "Multiple volumes returned."
return False, f"ERROR: Failed to get volume details for {pool}/{volume}: {retdata}" return (
False,
f"ERROR: Failed to get volume details for {pool}/{volume}: {retdata}",
)
try: try:
size = retdata[0]["stats"]["size"] size = retdata[0]["stats"]["size"]
@ -1396,8 +1402,8 @@ def backup_vm(
snapshot_name = f"backup_{datestring}" snapshot_name = f"backup_{datestring}"
# 4. Create destination directory # 4. Create destination directory
vm_target_root = f"{target_path}/{domain}" vm_target_root = f"{backup_path}/{domain}"
vm_target_backup = f"{target_path}/{domain}/{domain}.{datestring}.pvcdisks" vm_target_backup = f"{backup_path}/{domain}/{domain}.{datestring}.pvcdisks"
if not os.path.isdir(vm_target_backup): if not os.path.isdir(vm_target_backup):
try: try:
os.makedirs(vm_target_backup) os.makedirs(vm_target_backup)
@ -1463,7 +1469,10 @@ def backup_vm(
"datestring": datestring, "datestring": datestring,
"incremental_parent": incremental_parent, "incremental_parent": incremental_parent,
"vm_detail": vm_detail, "vm_detail": vm_detail,
"backup_files": [(f"{domain}.{datestring}.pvcdisks/{p}.{v}.{export_fileext}", s) for p, v, s in vm_volumes], "backup_files": [
(f"{domain}.{datestring}.pvcdisks/{p}.{v}.{export_fileext}", s)
for p, v, s in vm_volumes
],
} }
with open(f"{vm_target_root}/{domain}.{datestring}.pvcbackup", "w") as fh: with open(f"{vm_target_root}/{domain}.{datestring}.pvcbackup", "w") as fh:
jdump(vm_backup, fh) jdump(vm_backup, fh)
@ -1488,18 +1497,24 @@ def backup_vm(
retlines = list() retlines = list()
if is_snapshot_remove_failed: if is_snapshot_remove_failed:
retlines.append(f"WARNING: Failed to remove snapshot(s) as requested for volume(s) {', '.join(which_snapshot_remove_failed)}: {', '.join(msg_snapshot_remove_failed)}") retlines.append(
f"WARNING: Failed to remove snapshot(s) as requested for volume(s) {', '.join(which_snapshot_remove_failed)}: {', '.join(msg_snapshot_remove_failed)}"
)
myhostname = gethostname().split(".")[0] myhostname = gethostname().split(".")[0]
if retain_snapshot: if retain_snapshot:
retlines.append(f"Successfully backed up VM '{domain}' ({backup_type}@{datestring}, snapshots retained) to '{myhostname}:{target_path}' in {ttot}s.") retlines.append(
f"Successfully backed up VM '{domain}' ({backup_type}@{datestring}, snapshots retained) to '{myhostname}:{backup_path}' in {ttot}s."
)
else: else:
retlines.append(f"Successfully backed up VM '{domain}' ({backup_type}@{datestring}) to '{myhostname}:{target_path}' in {ttot}s.") retlines.append(
f"Successfully backed up VM '{domain}' ({backup_type}@{datestring}) to '{myhostname}:{backup_path}' in {ttot}s."
)
return True, '\n'.join(retlines) return True, "\n".join(retlines)
def remove_backup(zkhandler, domain, source_path, datestring): def remove_backup(zkhandler, domain, backup_path, datestring):
tstart = time.time() tstart = time.time()
# 0. Validation # 0. Validation
@ -1509,27 +1524,29 @@ def remove_backup(zkhandler, domain, source_path, datestring):
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain) return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
# Validate that the source path is valid # Validate that the source path is valid
if not re.match(r"^/", source_path): if not re.match(r"^/", backup_path):
return ( return (
False, False,
f"ERROR: Source path {source_path} is not a valid absolute path on the primary coordinator!", f"ERROR: Source path {backup_path} is not a valid absolute path on the primary coordinator!",
) )
# Ensure that source_path (on this node) exists # Ensure that backup_path (on this node) exists
if not os.path.isdir(source_path): if not os.path.isdir(backup_path):
return False, f"ERROR: Source path {source_path} does not exist!" return False, f"ERROR: Source path {backup_path} does not exist!"
# Ensure that domain path (on this node) exists # Ensure that domain path (on this node) exists
backup_source_path = f"{source_path}/{domain}" backup_backup_path = f"{backup_path}/{domain}"
if not os.path.isdir(backup_source_path): if not os.path.isdir(backup_backup_path):
return False, f"ERROR: Source VM path {backup_source_path} does not exist!" return False, f"ERROR: Source VM path {backup_backup_path} does not exist!"
# Ensure that the archives are present # Ensure that the archives are present
backup_source_pvcbackup_file = f"{backup_source_path}/{domain}.{datestring}.pvcbackup" backup_source_pvcbackup_file = (
f"{backup_backup_path}/{domain}.{datestring}.pvcbackup"
)
if not os.path.isfile(backup_source_pvcbackup_file): if not os.path.isfile(backup_source_pvcbackup_file):
return False, "ERROR: The specified source backup files do not exist!" return False, "ERROR: The specified source backup files do not exist!"
backup_source_pvcdisks_path = f"{backup_source_path}/{domain}.{datestring}.pvcdisks" backup_source_pvcdisks_path = f"{backup_backup_path}/{domain}.{datestring}.pvcdisks"
if not os.path.isdir(backup_source_pvcdisks_path): if not os.path.isdir(backup_source_pvcdisks_path):
return False, "ERROR: The specified source backup files do not exist!" return False, "ERROR: The specified source backup files do not exist!"
@ -1544,8 +1561,8 @@ def remove_backup(zkhandler, domain, source_path, datestring):
is_snapshot_remove_failed = False is_snapshot_remove_failed = False
which_snapshot_remove_failed = list() which_snapshot_remove_failed = list()
msg_snapshot_remove_failed = list() msg_snapshot_remove_failed = list()
for volume_file, _ in backup_source_details.get('backup_files'): for volume_file, _ in backup_source_details.get("backup_files"):
pool, volume, _ = volume_file.split('/')[-1].split('.') pool, volume, _ = volume_file.split("/")[-1].split(".")
snapshot = f"backup_{datestring}" snapshot = f"backup_{datestring}"
retcode, retmsg = ceph.remove_snapshot(zkhandler, pool, volume, snapshot) retcode, retmsg = ceph.remove_snapshot(zkhandler, pool, volume, snapshot)
if not retcode: if not retcode:
@ -1568,18 +1585,24 @@ def remove_backup(zkhandler, domain, source_path, datestring):
retlines = list() retlines = list()
if is_snapshot_remove_failed: if is_snapshot_remove_failed:
retlines.append(f"WARNING: Failed to remove snapshot(s) as requested for volume(s) {', '.join(which_snapshot_remove_failed)}: {', '.join(msg_snapshot_remove_failed)}") retlines.append(
f"WARNING: Failed to remove snapshot(s) as requested for volume(s) {', '.join(which_snapshot_remove_failed)}: {', '.join(msg_snapshot_remove_failed)}"
)
if is_files_remove_failed: if is_files_remove_failed:
retlines.append(f"WARNING: Failed to remove backup file(s) from {source_path}: {msg_files_remove_failed}") retlines.append(
f"WARNING: Failed to remove backup file(s) from {backup_path}: {msg_files_remove_failed}"
)
myhostname = gethostname().split(".")[0] myhostname = gethostname().split(".")[0]
retlines.append(f"Removed VM backup {datestring} for '{domain}' from '{myhostname}:{source_path}' in {ttot}s.") retlines.append(
f"Removed VM backup {datestring} for '{domain}' from '{myhostname}:{backup_path}' in {ttot}s."
)
return True, '\n'.join(retlines) return True, "\n".join(retlines)
def restore_vm(zkhandler, domain, source_path, datestring, retain_snapshot=False): def restore_vm(zkhandler, domain, backup_path, datestring, retain_snapshot=False):
tstart = time.time() tstart = time.time()
# 0. Validations # 0. Validations
@ -1592,23 +1615,25 @@ def restore_vm(zkhandler, domain, source_path, datestring, retain_snapshot=False
) )
# Validate that the source path is valid # Validate that the source path is valid
if not re.match(r"^/", source_path): if not re.match(r"^/", backup_path):
return ( return (
False, False,
f"ERROR: Source path {source_path} is not a valid absolute path on the primary coordinator!", f"ERROR: Source path {backup_path} is not a valid absolute path on the primary coordinator!",
) )
# Ensure that source_path (on this node) exists # Ensure that backup_path (on this node) exists
if not os.path.isdir(source_path): if not os.path.isdir(backup_path):
return False, f"ERROR: Source path {source_path} does not exist!" return False, f"ERROR: Source path {backup_path} does not exist!"
# Ensure that domain path (on this node) exists # Ensure that domain path (on this node) exists
backup_source_path = f"{source_path}/{domain}" backup_backup_path = f"{backup_path}/{domain}"
if not os.path.isdir(backup_source_path): if not os.path.isdir(backup_backup_path):
return False, f"ERROR: Source VM path {backup_source_path} does not exist!" return False, f"ERROR: Source VM path {backup_backup_path} does not exist!"
# Ensure that the archives are present # Ensure that the archives are present
backup_source_pvcbackup_file = f"{backup_source_path}/{domain}.{datestring}.pvcbackup" backup_source_pvcbackup_file = (
f"{backup_backup_path}/{domain}.{datestring}.pvcbackup"
)
if not os.path.isfile(backup_source_pvcbackup_file): if not os.path.isfile(backup_source_pvcbackup_file):
return False, "ERROR: The specified source backup files do not exist!" return False, "ERROR: The specified source backup files do not exist!"
@ -1623,7 +1648,7 @@ def restore_vm(zkhandler, domain, source_path, datestring, retain_snapshot=False
incremental_parent = backup_source_details.get("incremental_parent", None) incremental_parent = backup_source_details.get("incremental_parent", None)
if incremental_parent is not None: if incremental_parent is not None:
backup_source_parent_pvcbackup_file = ( backup_source_parent_pvcbackup_file = (
f"{backup_source_path}/{domain}.{incremental_parent}.pvcbackup" f"{backup_backup_path}/{domain}.{incremental_parent}.pvcbackup"
) )
if not os.path.isfile(backup_source_parent_pvcbackup_file): if not os.path.isfile(backup_source_parent_pvcbackup_file):
return ( return (
@ -1635,7 +1660,10 @@ def restore_vm(zkhandler, domain, source_path, datestring, retain_snapshot=False
with open(backup_source_parent_pvcbackup_file) as fh: with open(backup_source_parent_pvcbackup_file) as fh:
backup_source_parent_details = jload(fh) backup_source_parent_details = jload(fh)
except Exception as e: except Exception as e:
return False, f"ERROR: Failed to read source incremental parent backup details: {e}" return (
False,
f"ERROR: Failed to read source incremental parent backup details: {e}",
)
# 2. Import VM config and metadata in provision state # 2. Import VM config and metadata in provision state
try: try:
@ -1661,12 +1689,20 @@ def restore_vm(zkhandler, domain, source_path, datestring, retain_snapshot=False
which_snapshot_remove_failed = list() which_snapshot_remove_failed = list()
msg_snapshot_remove_failed = list() msg_snapshot_remove_failed = list()
if incremental_parent is not None: if incremental_parent is not None:
for volume_file, volume_size in backup_source_details.get('backup_files'): for volume_file, volume_size in backup_source_details.get("backup_files"):
pool, volume, _ = volume_file.split('/')[-1].split('.') pool, volume, _ = volume_file.split("/")[-1].split(".")
try: try:
parent_volume_file = [f[0] for f in backup_source_parent_details.get('backup_files') if f[0].split('/')[-1].replace('.rbdimg', '') == volume_file.split('/')[-1].replace('.rbddiff', '')][0] parent_volume_file = [
f[0]
for f in backup_source_parent_details.get("backup_files")
if f[0].split("/")[-1].replace(".rbdimg", "")
== volume_file.split("/")[-1].replace(".rbddiff", "")
][0]
except Exception as e: except Exception as e:
return False, f"ERROR: Failed to find parent volume for volume {pool}/{volume}; backup may be corrupt or invalid: {e}" return (
False,
f"ERROR: Failed to find parent volume for volume {pool}/{volume}; backup may be corrupt or invalid: {e}",
)
# First we create the expected volumes then clean them up # First we create the expected volumes then clean them up
# This process is a bit of a hack because rbd import does not expect an existing volume, # This process is a bit of a hack because rbd import does not expect an existing volume,
@ -1681,27 +1717,45 @@ def restore_vm(zkhandler, domain, source_path, datestring, retain_snapshot=False
f"rbd remove {pool}/{volume}" f"rbd remove {pool}/{volume}"
) )
if retcode: if retcode:
return False, f"ERROR: Failed to remove temporary RBD volume '{pool}/{volume}': {stderr}" return (
False,
f"ERROR: Failed to remove temporary RBD volume '{pool}/{volume}': {stderr}",
)
# Next we import the parent images # Next we import the parent images
retcode, stdout, stderr = common.run_os_command( retcode, stdout, stderr = common.run_os_command(
f"rbd import --export-format 2 --dest-pool {pool} {source_path}/{domain}/{parent_volume_file} {volume}" f"rbd import --export-format 2 --dest-pool {pool} {backup_path}/{domain}/{parent_volume_file} {volume}"
) )
if retcode: if retcode:
return False, f"ERROR: Failed to import parent backup image {parent_volume_file}: {stderr}" return (
False,
f"ERROR: Failed to import parent backup image {parent_volume_file}: {stderr}",
)
# Then we import the incremental diffs # Then we import the incremental diffs
retcode, stdout, stderr = common.run_os_command( retcode, stdout, stderr = common.run_os_command(
f"rbd import-diff {source_path}/{domain}/{volume_file} {pool}/{volume}" f"rbd import-diff {backup_path}/{domain}/{volume_file} {pool}/{volume}"
) )
if retcode: if retcode:
return False, f"ERROR: Failed to import incremental backup image {volume_file}: {stderr}" return (
False,
f"ERROR: Failed to import incremental backup image {volume_file}: {stderr}",
)
# Finally we remove the parent and child snapshots (no longer required required) # Finally we remove the parent and child snapshots (no longer required required)
if retain_snapshot: if retain_snapshot:
retcode, retmsg = ceph.add_snapshot(zkhandler, pool, volume, f"backup_{incremental_parent}", zk_only=True) retcode, retmsg = ceph.add_snapshot(
zkhandler,
pool,
volume,
f"backup_{incremental_parent}",
zk_only=True,
)
if not retcode: if not retcode:
return False, f"ERROR: Failed to add imported image snapshot for {parent_volume_file}: {retmsg}" return (
False,
f"ERROR: Failed to add imported image snapshot for {parent_volume_file}: {retmsg}",
)
else: else:
retcode, stdout, stderr = common.run_os_command( retcode, stdout, stderr = common.run_os_command(
f"rbd snap rm {pool}/{volume}@backup_{incremental_parent}" f"rbd snap rm {pool}/{volume}@backup_{incremental_parent}"
@ -1719,8 +1773,8 @@ def restore_vm(zkhandler, domain, source_path, datestring, retain_snapshot=False
msg_snapshot_remove_failed.append(retmsg) msg_snapshot_remove_failed.append(retmsg)
else: else:
for volume_file, volume_size in backup_source_details.get('backup_files'): for volume_file, volume_size in backup_source_details.get("backup_files"):
pool, volume, _ = volume_file.split('/')[-1].split('.') pool, volume, _ = volume_file.split("/")[-1].split(".")
# First we create the expected volumes then clean them up # First we create the expected volumes then clean them up
# This process is a bit of a hack because rbd import does not expect an existing volume, # This process is a bit of a hack because rbd import does not expect an existing volume,
@ -1735,26 +1789,44 @@ def restore_vm(zkhandler, domain, source_path, datestring, retain_snapshot=False
f"rbd remove {pool}/{volume}" f"rbd remove {pool}/{volume}"
) )
if retcode: if retcode:
return False, f"ERROR: Failed to remove temporary RBD volume '{pool}/{volume}': {stderr}" return (
False,
f"ERROR: Failed to remove temporary RBD volume '{pool}/{volume}': {stderr}",
)
# Then we perform the actual import # Then we perform the actual import
retcode, stdout, stderr = common.run_os_command( retcode, stdout, stderr = common.run_os_command(
f"rbd import --export-format 2 --dest-pool {pool} {source_path}/{domain}/{volume_file} {volume}" f"rbd import --export-format 2 --dest-pool {pool} {backup_path}/{domain}/{volume_file} {volume}"
) )
if retcode: if retcode:
return False, f"ERROR: Failed to import backup image {volume_file}: {stderr}" return (
False,
f"ERROR: Failed to import backup image {volume_file}: {stderr}",
)
# Finally we remove the source snapshot (not required) # Finally we remove the source snapshot (not required)
if retain_snapshot: if retain_snapshot:
retcode, retmsg = ceph.add_snapshot(zkhandler, pool, volume, f"backup_{incremental_parent}", zk_only=True) retcode, retmsg = ceph.add_snapshot(
zkhandler,
pool,
volume,
f"backup_{incremental_parent}",
zk_only=True,
)
if not retcode: if not retcode:
return False, f"ERROR: Failed to add imported image snapshot for {volume_file}: {retmsg}" return (
False,
f"ERROR: Failed to add imported image snapshot for {volume_file}: {retmsg}",
)
else: else:
retcode, stdout, stderr = common.run_os_command( retcode, stdout, stderr = common.run_os_command(
f"rbd snap rm {pool}/{volume}@backup_{datestring}" f"rbd snap rm {pool}/{volume}@backup_{datestring}"
) )
if retcode: if retcode:
return False, f"ERROR: Failed to remove imported image snapshot for {volume_file}: {stderr}" return (
False,
f"ERROR: Failed to remove imported image snapshot for {volume_file}: {stderr}",
)
# 5. Start VM # 5. Start VM
retcode, retmsg = start_vm(zkhandler, domain) retcode, retmsg = start_vm(zkhandler, domain)
@ -1766,9 +1838,13 @@ def restore_vm(zkhandler, domain, source_path, datestring, retain_snapshot=False
retlines = list() retlines = list()
if is_snapshot_remove_failed: if is_snapshot_remove_failed:
retlines.append(f"WARNING: Failed to remove hanging snapshot(s) as requested for volume(s) {', '.join(which_snapshot_remove_failed)}: {', '.join(msg_snapshot_remove_failed)}") retlines.append(
f"WARNING: Failed to remove hanging snapshot(s) as requested for volume(s) {', '.join(which_snapshot_remove_failed)}: {', '.join(msg_snapshot_remove_failed)}"
)
myhostname = gethostname().split(".")[0] myhostname = gethostname().split(".")[0]
retlines.append(f"Successfully restored VM backup {datestring} for '{domain}' from '{myhostname}:{source_path}' in {ttot}s.") retlines.append(
f"Successfully restored VM backup {datestring} for '{domain}' from '{myhostname}:{backup_path}' in {ttot}s."
)
return True, '\n'.join(retlines) return True, "\n".join(retlines)