Finish implementing snapshot import

This commit is contained in:
Joshua Boniface 2024-08-20 11:08:14 -04:00
parent 44232fe3c6
commit 7cc354466f
5 changed files with 220 additions and 27 deletions

View File

@ -3362,6 +3362,79 @@ class API_VM_Snapshot_Export(Resource):
api.add_resource(API_VM_Snapshot_Export, "/vm/<vm>/snapshot/export")
# /vm/<vm>/snapshot/import
class API_VM_Snapshot_Import(Resource):
@RequestParser(
[
{
"name": "snapshot_name",
"required": True,
"helptext": "A snapshot name must be specified",
},
{
"name": "import_path",
"required": True,
"helptext": "An absolute directory path on the PVC primary coordinator to import files from",
},
{
"name": "retain_snapshot",
"required": False,
"helptext": "Whether to retain the snapshot of the import or not (default: true)",
},
]
)
@Authenticator
def post(self, vm, reqargs):
"""
Import a snapshot of a VM's disks and configuration from files
---
tags:
- vm
parameters:
- in: query
name: snapshot_name
type: string
required: true
description: The name of the snapshot to roll back to
- in: query
name: import_path
type: string (path)
required: true
description: The absolute file path to import the snapshot from on the active primary coordinator
- in: query
name: retain_snapshot
type: boolean
required: false
default: true
description: Whether or not to retain the (parent, if incremental) volume snapshot after restore
responses:
200:
description: OK
schema:
type: object
id: Message
400:
description: Execution error
schema:
type: object
id: Message
404:
description: Not found
schema:
type: object
id: Message
"""
snapshot_name = reqargs.get("snapshot_name", None)
import_path = reqargs.get("import_path", None)
retain_snapshot = bool(strtobool(reqargs.get("retain_snapshot", "True")))
return api_helper.import_vm_snapshot(
vm, snapshot_name, import_path, retain_snapshot
)
api.add_resource(API_VM_Snapshot_Import, "/vm/<vm>/snapshot/import")
##########################################################
# Client API - Network
##########################################################

View File

@ -865,6 +865,34 @@ def export_vm_snapshot(
return output, retcode
@ZKConnection(config)
def import_vm_snapshot(
zkhandler,
domain,
snapshot_name,
export_path,
retain_snapshot=False,
):
"""
Import a snapshot of a VM from files.
"""
retflag, retdata = pvc_vm.import_vm_snapshot(
zkhandler,
domain,
snapshot_name,
export_path,
retain_snapshot,
)
if retflag:
retcode = 200
else:
retcode = 400
output = {"message": retdata.replace('"', "'")}
return output, retcode
@ZKConnection(config)
def vm_attach_device(zkhandler, vm, device_spec_xml):
"""

View File

@ -1885,12 +1885,20 @@ def cli_vm_snapshot_rollback(domain, snapshot_name):
"--incremental",
"incremental_parent",
default=None,
help="Perform an incremental volume backup from this parent snapshot.",
help="Perform an incremental volume export from this parent snapshot.",
)
def cli_vm_snapshot_export(domain, snapshot_name, export_path, incremental_parent):
"""
Export the (existing) snapshot SNAPSHOT_NAME of virtual machine DOMAIN to the absolute path
EXPORT_PATH on the current PVC primary coordinator. DOMAIN may be a UUID or name.
Export the (existing) snapshot SNAPSHOT_NAME of virtual machine DOMAIN to the absolute path EXPORT_PATH on the current PVC primary coordinator.
DOMAIN may be a UUID or name.
EXPORT_PATH must be a valid absolute directory path on the cluster "primary" coordinator (see "pvc node list") allowing writes from the API daemon (normally running as "root"). The EXPORT_PATH should be a large storage volume, ideally a remotely mounted filesystem (e.g. NFS, SSHFS, etc.) or non-Ceph-backed disk; PVC does not handle this path, that is up to the administrator to configure and manage.
The export will include the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes.
Incremental exports are possible by specifying the "-i"/"--incremental" option along with a parent snapshot name. To correctly import, that export must exist on EXPORT_PATH.
Full export volume images are sparse-allocated, however it is recommended for safety to consider their maximum allocated size when allocated space for the EXPORT_PATH. Incremental volume images are generally small but are dependent entirely on the rate of data change in each volume.
"""
_, primary_node = pvc.lib.cluster.get_primary_node(CLI_CONFIG)
@ -1909,6 +1917,53 @@ def cli_vm_snapshot_export(domain, snapshot_name, export_path, incremental_paren
finish(retcode, retmsg)
###############################################################################
# > pvc vm snapshot import
###############################################################################
@click.command(name="import", short_help="Import a snapshot of a virtual machine.")
@connection_req
@click.argument("domain")
@click.argument("snapshot_name")
@click.argument("import_path")
@click.option(
"-r/-R",
"--retain-snapshot/--remove-snapshot",
"retain_snapshot",
is_flag=True,
default=True,
help="Retain or remove restored (parent, if incremental) snapshot in Ceph.",
)
def cli_vm_snapshot_import(domain, snapshot_name, import_path, retain_snapshot):
"""
Import the snapshot SNAPSHOT_NAME of virtual machine DOMAIN from the absolute path IMPORT_PATH on the current PVC primary coordinator.
DOMAIN may be a UUID or name.
IMPORT_PATH must be a valid absolute directory path on the cluster "primary" coordinator (see "pvc node list") allowing reads from the API daemon (normally running as "root"). The IMPORT_PATH should be a large storage volume, ideally a remotely mounted filesystem (e.g. NFS, SSHFS, etc.) or non-Ceph-backed disk; PVC does not handle this path, that is up to the administrator to configure and manage.
The import will include the VM configuration, metainfo, and the point-in-time snapshot of all attached RBD volumes. Incremental imports will be automatically handled.
A VM named DOMAIN or with the same UUID must not exist; if a VM with the same name or UUID already exists, it must be removed, or renamed and then undefined (to preserve volumes), before importing.
If the "-r"/"--retain-snapshot" option is specified (the default), for incremental imports, only the parent snapshot is kept; for full imports, the imported snapshot is kept. If the "-R"/"--remove-snapshot" option is specified, the imported snapshot is removed.
WARNING: The "-R"/"--remove-snapshot" option will invalidate any existing incremental snapshots based on the same incremental parent for the imported VM.
"""
echo(
CLI_CONFIG,
f"Importing snapshot '{snapshot_name}' of VM '{domain}'... ",
newline=False,
)
retcode, retmsg = pvc.lib.vm.vm_snapshot_import(
CLI_CONFIG, domain, snapshot_name, import_path, retain_snapshot
)
if retcode:
echo(CLI_CONFIG, "done.")
else:
echo(CLI_CONFIG, "failed.")
finish(retcode, retmsg)
###############################################################################
# > pvc vm backup
###############################################################################
@ -6450,6 +6505,7 @@ cli_vm_snapshot.add_command(cli_vm_snapshot_create)
cli_vm_snapshot.add_command(cli_vm_snapshot_remove)
cli_vm_snapshot.add_command(cli_vm_snapshot_rollback)
cli_vm_snapshot.add_command(cli_vm_snapshot_export)
cli_vm_snapshot.add_command(cli_vm_snapshot_import)
cli_vm.add_command(cli_vm_snapshot)
cli_vm_backup.add_command(cli_vm_backup_create)
cli_vm_backup.add_command(cli_vm_backup_restore)

View File

@ -583,6 +583,29 @@ def vm_export_snapshot(config, vm, snapshot_name, export_path, incremental_paren
return True, response.json().get("message", "")
def vm_import_snapshot(config, vm, snapshot_name, import_path, retain_snapshot=False):
"""
Import a snapshot of {vm} and its volumes from a local primary coordinator filesystem path
API endpoint: POST /vm/{vm}/snapshot/import
API arguments: snapshot_name={snapshot_name}, import_path={import_path}, retain_snapshot={retain_snapshot}
API schema: {"message":"{data}"}
"""
params = {
"snapshot_name": snapshot_name,
"import_path": import_path,
"retain_snapshot": retain_snapshot,
}
response = call_api(
config, "post", "/vm/{vm}/snapshot/import".format(vm=vm), params=params
)
if response.status_code != 200:
return False, response.json().get("message", "")
else:
return True, response.json().get("message", "")
def vm_vcpus_set(config, vm, vcpus, topology, restart):
"""
Set the vCPU count of the VM with topology
@ -1729,6 +1752,7 @@ def format_info(config, domain_information, long_output):
"unmigrate": ansiprint.blue(),
"provision": ansiprint.blue(),
"restore": ansiprint.blue(),
"import": ansiprint.blue(),
}
ainformation.append(
"{}State:{} {}{}{}".format(

View File

@ -1512,7 +1512,7 @@ def export_vm_snapshot(
"result": result,
"result_message": result_message,
"runtime_secs": ttot,
"vm_configuration": vm_configuration,
"vm_detail": vm_detail,
"export_files": export_files,
"export_size_bytes": export_files_size,
}
@ -1527,7 +1527,14 @@ def export_vm_snapshot(
write_export_json(result=False, result_message=f"ERROR: {error_message}")
return False, f"ERROR: {error_message}"
# Validate that the given snapshot exists (and incremental parent exists if applicable)
# 3. Get information about VM
vm_detail = get_list(zkhandler, limit=dom_uuid, is_fuzzy=False)[1][0]
if not isinstance(vm_detail, dict):
error_message = f"VM listing returned invalid data: {vm_detail}"
write_export_json(result=False, result_message=f"ERROR: {error_message}")
return False, f"ERROR: {error_message}"
# 4. Validate that the given snapshot exists (and incremental parent exists if applicable)
if not zkhandler.exists(
("domain.snapshots", dom_uuid, "domain_snapshot.name", snapshot_name)
):
@ -1584,6 +1591,10 @@ def export_vm_snapshot(
]
)
# Override the current XML with the snapshot XML; but all other metainfo is current
vm_detail["xml"] = snapshot_xml
# Get the list of volumes
snapshot_volumes = list()
for rbdsnap in snapshot_rbdsnaps.split(","):
pool, _volume = rbdsnap.split("/")
@ -1594,6 +1605,7 @@ def export_vm_snapshot(
if ret:
snapshot_volumes += snapshots
# Set the export filetype
if incremental_parent is not None:
export_fileext = "rbddiff"
else:
@ -1645,7 +1657,7 @@ def export_vm_snapshot(
write_export_json(
result=False,
result_message=f"ERROR: {error_message}",
vm_configuration=snapshot_xml,
vm_detail=vm_detail,
export_files=export_files,
export_files_size=export_files_size,
)
@ -1676,7 +1688,7 @@ def export_vm_snapshot(
def import_vm_snapshot(
zkhandler, domain, snapshot_name, export_path, retain_snapshot=False
zkhandler, domain, snapshot_name, import_path, retain_snapshot=False
):
tstart = time.time()
myhostname = gethostname().split(".")[0]
@ -1691,23 +1703,23 @@ def import_vm_snapshot(
)
# Validate that the source path is valid
if not re.match(r"^/", export_path):
if not re.match(r"^/", import_path):
return (
False,
f"ERROR: Source path {export_path} is not a valid absolute path on the primary coordinator!",
f"ERROR: Source path {import_path} is not a valid absolute path on the primary coordinator!",
)
# Ensure that export_path (on this node) exists
if not os.path.isdir(export_path):
return False, f"ERROR: Source path {export_path} does not exist!"
# Ensure that import_path (on this node) exists
if not os.path.isdir(import_path):
return False, f"ERROR: Source path {import_path} does not exist!"
# Ensure that domain path (on this node) exists
vm_export_path = f"{export_path}/{domain}"
if not os.path.isdir(vm_export_path):
return False, f"ERROR: Source VM path {vm_export_path} does not exist!"
vm_import_path = f"{import_path}/{domain}"
if not os.path.isdir(vm_import_path):
return False, f"ERROR: Source VM path {vm_import_path} does not exist!"
# Ensure that the archives are present
export_source_snapshot_file = f"{vm_export_path}/{snapshot_name}/snapshot.json"
export_source_snapshot_file = f"{vm_import_path}/{snapshot_name}/snapshot.json"
if not os.path.isfile(export_source_snapshot_file):
return False, "ERROR: The specified source export files do not exist!"
@ -1722,12 +1734,12 @@ def import_vm_snapshot(
incremental_parent = export_source_details.get("incremental_parent", None)
if incremental_parent is not None:
export_source_parent_snapshot_file = (
f"{vm_export_path}/{incremental_parent}/snapshot.json"
f"{vm_import_path}/{incremental_parent}/snapshot.json"
)
if not os.path.isfile(export_source_parent_snapshot_file):
return (
False,
"ERROR: This export is incremental but the required incremental parent files do not exist at '{myhostname}:{vm_export_path}/{incremental_parent}'!",
"ERROR: This export is incremental but the required incremental parent files do not exist at '{myhostname}:{vm_import_path}/{incremental_parent}'!",
)
try:
@ -1752,10 +1764,10 @@ def import_vm_snapshot(
export_source_details["vm_detail"]["migration_max_downtime"],
export_source_details["vm_detail"]["profile"],
export_source_details["vm_detail"]["tags"],
"restore",
"import",
)
if not retcode:
return False, f"ERROR: Failed to define restored VM: {retmsg}"
return False, f"ERROR: Failed to define imported VM: {retmsg}"
except Exception as e:
return False, f"ERROR: Failed to parse VM export details: {e}"
@ -1785,7 +1797,7 @@ def import_vm_snapshot(
# manually remove the RBD volume (leaving the PVC metainfo)
retcode, retmsg = ceph.add_volume(zkhandler, pool, volume, volume_size)
if not retcode:
return False, f"ERROR: Failed to create restored volume: {retmsg}"
return False, f"ERROR: Failed to create imported volume: {retmsg}"
retcode, stdout, stderr = common.run_os_command(
f"rbd remove {pool}/{volume}"
@ -1798,7 +1810,7 @@ def import_vm_snapshot(
# Next we import the parent images
retcode, stdout, stderr = common.run_os_command(
f"rbd import --export-format 2 --dest-pool {pool} {export_path}/{domain}/{incremental_parent}/{parent_volume_file} {volume}"
f"rbd import --export-format 2 --dest-pool {pool} {import_path}/{domain}/{incremental_parent}/{parent_volume_file} {volume}"
)
if retcode:
return (
@ -1808,7 +1820,7 @@ def import_vm_snapshot(
# Then we import the incremental diffs
retcode, stdout, stderr = common.run_os_command(
f"rbd import-diff {export_path}/{domain}/{snapshot_name}/{volume_file} {pool}/{volume}"
f"rbd import-diff {import_path}/{domain}/{snapshot_name}/{volume_file} {pool}/{volume}"
)
if retcode:
return (
@ -1855,7 +1867,7 @@ def import_vm_snapshot(
# manually remove the RBD volume (leaving the PVC metainfo)
retcode, retmsg = ceph.add_volume(zkhandler, pool, volume, volume_size)
if not retcode:
return False, f"ERROR: Failed to create restored volume: {retmsg}"
return False, f"ERROR: Failed to create imported volume: {retmsg}"
retcode, stdout, stderr = common.run_os_command(
f"rbd remove {pool}/{volume}"
@ -1868,7 +1880,7 @@ def import_vm_snapshot(
# Then we perform the actual import
retcode, stdout, stderr = common.run_os_command(
f"rbd import --export-format 2 --dest-pool {pool} {export_path}/{domain}/{snapshot_name}/{volume_file} {volume}"
f"rbd import --export-format 2 --dest-pool {pool} {import_path}/{domain}/{snapshot_name}/{volume_file} {volume}"
)
if retcode:
return (
@ -1903,7 +1915,7 @@ def import_vm_snapshot(
# 5. Start VM
retcode, retmsg = start_vm(zkhandler, domain)
if not retcode:
return False, f"ERROR: Failed to start restored VM {domain}: {retmsg}"
return False, f"ERROR: Failed to start imported VM {domain}: {retmsg}"
tend = time.time()
ttot = round(tend - tstart, 2)
@ -1915,7 +1927,7 @@ def import_vm_snapshot(
)
retlines.append(
f"Successfully imported VM '{domain}' at snapshot '{snapshot_name}' from '{myhostname}:{export_path}' in {ttot}s."
f"Successfully imported VM '{domain}' at snapshot '{snapshot_name}' from '{myhostname}:{import_path}' in {ttot}s."
)
return True, "\n".join(retlines)