Add initial implementation of snapshot export

This commit is contained in:
Joshua Boniface 2024-08-19 18:46:07 -04:00
parent 9a435fe2ae
commit d060787503
6 changed files with 395 additions and 0 deletions

View File

@ -3290,6 +3290,68 @@ class API_VM_Snapshot_Rollback(Resource):
api.add_resource(API_VM_Snapshot_Rollback, "/vm/<vm>/snapshot/rollback") api.add_resource(API_VM_Snapshot_Rollback, "/vm/<vm>/snapshot/rollback")
# /vm/<vm>/snapshot/export
class API_VM_Snapshot_Export(Resource):
@RequestParser(
[
{
"name": "snapshot_name",
"required": True,
"helptext": "A snapshot name must be specified",
},
{
"name": "export_path",
"required": True,
"helptext": "An absolute directory path on the PVC primary coordinator to export files to",
},
{
"name": "incremental_parent",
"required": False,
"helptext": "A snapshot name to generate an incremental diff from",
},
]
)
@Authenticator
def post(self, vm, reqargs):
"""
Roll back to a snapshot of a VM's disks and configuration
---
tags:
- vm
parameters:
- in: query
name: snapshot_name
type: string
required: true
description: The name of the snapshot to roll back to
responses:
200:
description: OK
schema:
type: object
id: Message
400:
description: Execution error
schema:
type: object
id: Message
404:
description: Not found
schema:
type: object
id: Message
"""
snapshot_name = reqargs.get("snapshot_name", None)
export_path = reqargs.get("export_path", None)
incremental_parent = reqargs.get("incremental_parent", None)
return api_helper.export_vm_snapshot(
vm, snapshot_name, export_path, incremental_parent
)
api.add_resource(API_VM_Snapshot_Export, "/vm/<vm>/snapshot/export")
########################################################## ##########################################################
# Client API - Network # Client API - Network
########################################################## ##########################################################

View File

@ -837,6 +837,34 @@ def rollback_vm_snapshot(
return output, retcode return output, retcode
@ZKConnection(config)
def export_vm_snapshot(
zkhandler,
domain,
snapshot_name,
export_path,
incremental_parent=None,
):
"""
Export a snapshot of a VM to files.
"""
retflag, retdata = pvc_vm.export_vm_snapshot(
zkhandler,
domain,
snapshot_name,
export_path,
incremental_parent,
)
if retflag:
retcode = 200
else:
retcode = 400
output = {"message": retdata.replace('"', "'")}
return output, retcode
@ZKConnection(config) @ZKConnection(config)
def vm_attach_device(zkhandler, vm, device_spec_xml): def vm_attach_device(zkhandler, vm, device_spec_xml):
""" """

View File

@ -1870,6 +1870,45 @@ def cli_vm_snapshot_rollback(domain, snapshot_name):
finish(retcode, retmsg) finish(retcode, retmsg)
###############################################################################
# > pvc vm snapshot export
###############################################################################
@click.command(
name="export", short_help="Export a snapshot of a virtual machine to files."
)
@connection_req
@click.argument("domain")
@click.argument("snapshot_name")
@click.argument("export_path")
@click.option(
"-i",
"--incremental",
"incremental_parent",
default=None,
help="Perform an incremental volume backup from this parent snapshot.",
)
def cli_vm_snapshot_export(domain, snapshot_name, export_path, incremental_parent):
"""
Export the (existing) snapshot SNAPSHOT_NAME of virtual machine DOMAIN to the absolute path
EXPORT_PATH on the current PVC primary coordinator. DOMAIN may be a UUID or name.
"""
primary_node = pvc.lib.cluster.get_primary_node(CLI_CONFIG)
echo(
CLI_CONFIG,
f'Exporting snapshot "{snapshot_name}" of VM "{domain}" to "{export_path}" on "{primary_node}"...',
newline=False,
)
retcode, retmsg = pvc.lib.vm.vm_export_snapshot(
CLI_CONFIG, domain, snapshot_name, export_path, incremental_parent
)
if retcode:
echo(CLI_CONFIG, "done.")
else:
echo(CLI_CONFIG, "failed.")
finish(retcode, retmsg)
############################################################################### ###############################################################################
# > pvc vm backup # > pvc vm backup
############################################################################### ###############################################################################
@ -6410,6 +6449,7 @@ cli_vm.add_command(cli_vm_flush_locks)
cli_vm_snapshot.add_command(cli_vm_snapshot_create) cli_vm_snapshot.add_command(cli_vm_snapshot_create)
cli_vm_snapshot.add_command(cli_vm_snapshot_remove) cli_vm_snapshot.add_command(cli_vm_snapshot_remove)
cli_vm_snapshot.add_command(cli_vm_snapshot_rollback) cli_vm_snapshot.add_command(cli_vm_snapshot_rollback)
cli_vm_snapshot.add_command(cli_vm_snapshot_export)
cli_vm.add_command(cli_vm_snapshot) cli_vm.add_command(cli_vm_snapshot)
cli_vm_backup.add_command(cli_vm_backup_create) cli_vm_backup.add_command(cli_vm_backup_create)
cli_vm_backup.add_command(cli_vm_backup_restore) cli_vm_backup.add_command(cli_vm_backup_restore)

View File

@ -21,6 +21,8 @@
import json import json
from time import sleep
from pvc.lib.common import call_api from pvc.lib.common import call_api
@ -114,3 +116,22 @@ def get_info(config):
return True, response.json() return True, response.json()
else: else:
return False, response.json().get("message", "") return False, response.json().get("message", "")
def get_primary_node(config):
"""
Get the current primary node of the PVC cluster
API endpoint: GET /api/v1/status/primary_node
API arguments:
API schema: {json_data_object}
"""
while True:
response = call_api(config, "get", "/status/primary_node")
resp_code = response.status_code
if resp_code == 200:
break
else:
sleep(1)
return True, response.json()["primary_node"]

View File

@ -557,6 +557,32 @@ def vm_rollback_snapshot(config, vm, snapshot_name):
return True, response.json().get("message", "") return True, response.json().get("message", "")
def vm_export_snapshot(config, vm, snapshot_name, export_path, incremental_parent):
"""
Export an (existing) snapshot of a VM's disks and configuration to export_path, optionally
incremental with incremental_parent
API endpoint: POST /vm/{vm}/snapshot/export
API arguments: snapshot_name=snapshot_name, export_path=export_path, incremental_parent=incremental_parent
API schema: {"message":"{data}"}
"""
params = {
"snapshot_name": snapshot_name,
"export_path": export_path,
}
if incremental_parent is not None:
params["incremental_parent"] = incremental_parent
response = call_api(
config, "post", "/vm/{vm}/snapshot/export".format(vm=vm), params=params
)
if response.status_code != 200:
return False, response.json().get("message", "")
else:
return True, response.json().get("message", "")
def vm_vcpus_set(config, vm, vcpus, topology, restart): def vm_vcpus_set(config, vm, vcpus, topology, restart):
""" """
Set the vCPU count of the VM with topology Set the vCPU count of the VM with topology

View File

@ -1459,6 +1459,224 @@ def rollback_vm_snapshot(zkhandler, domain, snapshot_name):
) )
def export_vm_snapshot(
zkhandler, domain, snapshot_name, export_path, incremental_parent=None
):
# 0b. Validations part 1
# Validate that the target path is valid
if not re.match(r"^/", export_path):
return (
False,
f"ERROR: Target path {export_path} is not a valid absolute path on the primary coordinator!",
)
# Ensure that backup_path (on this node) exists
if not os.path.isdir(export_path):
return (
False,
f"ERROR: Target path {export_path} does not exist!",
)
# 1a. Create destination directory
export_target_root = f"{export_path}/{domain}"
export_target_path = f"{export_path}/{domain}/{snapshot_name}/images"
if not os.path.isdir(export_target_path):
try:
os.makedirs(export_target_path)
except Exception as e:
return (
False,
f"ERROR: Failed to create target directory {export_target_path}: {e}",
)
tstart = time.time()
export_type = "incremental" if incremental_parent is not None else "full"
# 1b. Prepare export JSON writer (it will write on any result)
def write_export_json(
result=False,
result_message="",
vm_configuration=None,
export_files=None,
export_files_size=0,
ttot=None,
):
if ttot is None:
tend = time.time()
ttot = round(tend - tstart, 2)
export_details = {
"type": export_type,
"snapshot_name": snapshot_name,
"incremental_parent": incremental_parent,
"result": result,
"result_message": result_message,
"runtime_secs": ttot,
"vm_configuration": vm_configuration,
"export_files": export_files,
"export_size_bytes": export_files_size,
}
with open(f"{export_target_root}/{snapshot_name}/snapshot.json", "w") as fh:
jdump(export_details, fh)
# 2. Validations part 2
# Validate that VM exists in cluster
dom_uuid = getDomainUUID(zkhandler, domain)
if not dom_uuid:
error_message = f'Could not find VM "{domain}" in the cluster!'
write_export_json(result=False, result_message=f"ERROR: {error_message}")
return False, f"ERROR: {error_message}"
# Validate that the given snapshot exists
if not zkhandler.exists(
("domain.snapshots", dom_uuid, "domain_snapshot.name", snapshot_name)
):
error_message = (
f'ERROR: Could not find snapshot "{snapshot_name}" of VM "{domain}"!',
)
write_export_json(result=False, result_message=f"ERROR: {error_message}")
return False, f"ERROR: {error_message}"
if incremental_parent is not None and not zkhandler.exists(
("domain.snapshots", dom_uuid, "domain_snapshot.name", incremental_parent)
):
error_message = (
f'ERROR: Could not find snapshot "{snapshot_name}" of VM "{domain}"!',
)
write_export_json(result=False, result_message=f"ERROR: {error_message}")
return False, f"ERROR: {error_message}"
# Get details about VM snapshot
_, snapshot_timestamp, snapshot_xml, snapshot_rbdsnaps = zkhandler.read_many(
[
(
(
"domain.snapshots",
dom_uuid,
"domain_snapshot.name",
snapshot_name,
)
),
(
(
"domain.snapshots",
dom_uuid,
"domain_snapshot.timestamp",
snapshot_name,
)
),
(
(
"domain.snapshots",
dom_uuid,
"domain_snapshot.xml",
snapshot_name,
)
),
(
(
"domain.snapshots",
dom_uuid,
"domain_snapshot.rbd_snapshots",
snapshot_name,
)
),
]
)
snapshot_volumes = list()
for rbdsnap in snapshot_rbdsnaps.split(","):
pool, _volume = rbdsnap.split("/")
volume, name = _volume.split("@")
ret, snapshots = ceph.get_list_snapshot(
zkhandler, pool, volume, limit=name, is_fuzzy=False
)
if ret:
snapshot_volumes += snapshots
# 4b. Validate that, if an incremental_parent is given, it is valid
# The incremental parent is just a datestring
if incremental_parent is not None:
export_fileext = "rbddiff"
else:
export_fileext = "rbdimg"
# 6. Dump snapshot to folder with `rbd export` (full) or `rbd export-diff` (incremental)
is_snapshot_export_failed = False
which_snapshot_export_failed = list()
export_files = list()
for snapshot_volume in snapshot_volumes:
pool = snapshot_volume["pool"]
volume = snapshot_volume["volume"]
snapshot_name = snapshot_volume["snapshot"]
size = snapshot_volume["stats"]["size"]
if incremental_parent is not None:
retcode, stdout, stderr = common.run_os_command(
f"rbd export-diff --from-snap {incremental_parent} {pool}/{volume}@{snapshot_name} {export_target_path}/{pool}.{volume}.{export_fileext}"
)
if retcode:
is_snapshot_export_failed = True
which_snapshot_export_failed.append(f"{pool}/{volume}")
else:
export_files.append((f"images/{pool}.{volume}.{export_fileext}", size))
else:
retcode, stdout, stderr = common.run_os_command(
f"rbd export --export-format 2 {pool}/{volume}@{snapshot_name} {export_target_path}/{pool}.{volume}.{export_fileext}"
)
if retcode:
is_snapshot_export_failed = True
which_snapshot_export_failed.append(f"{pool}/{volume}")
else:
export_files.append((f"images/{pool}.{volume}.{export_fileext}", size))
def get_dir_size(path):
total = 0
with scandir(path) as it:
for entry in it:
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += get_dir_size(entry.path)
return total
export_files_size = get_dir_size(export_target_path)
if is_snapshot_export_failed:
error_message = f'Failed to export snapshot for volume(s) {", ".join(which_snapshot_export_failed)}'
write_export_json(
result=False,
result_message=f"ERROR: {error_message}",
vm_configuration=snapshot_xml,
export_files=export_files,
export_files_size=export_files_size,
)
return (
False,
f"ERROR: {error_message}",
)
tend = time.time()
ttot = round(tend - tstart, 2)
retlines = list()
myhostname = gethostname().split(".")[0]
result_message = f"Successfully exported VM '{domain}' snapshot '{snapshot_name}' ({export_type}) to '{myhostname}:{export_path}' in {ttot}s."
retlines.append(result_message)
write_export_json(
result=True,
result_message=result_message,
vm_configuration=snapshot_xml,
export_files=export_files,
export_files_size=export_files_size,
ttot=ttot,
)
return True, "\n".join(retlines)
# #
# VM Backup Tasks # VM Backup Tasks
# #