Compare commits

...

9 Commits

Author SHA1 Message Date
Joshua Boniface 4a0680b27f Fix issues with snapshot imports 2024-08-20 13:59:05 -04:00
Joshua Boniface 6597f7aef6 Fix bad function call 2024-08-20 12:58:17 -04:00
Joshua Boniface f42a1bad0e Allow passing zk_only into VM snapshot creation 2024-08-20 12:57:53 -04:00
Joshua Boniface 3fb52a13c2 Add missing VM states from snapshots 2024-08-20 11:53:57 -04:00
Joshua Boniface 8937ddf331 Simplify VM rename to preserve data
A rename is simply a change to two values, so instead of undefining and
re-defining the VM, just edit those two fields. This ensures things like
snapshots are preserved automatically.
2024-08-20 11:37:28 -04:00
Joshua Boniface 7cc354466f Finish implementing snapshot import 2024-08-20 11:25:09 -04:00
Joshua Boniface 44232fe3c6 Fix export swagger definition 2024-08-20 11:07:56 -04:00
Joshua Boniface 0a8bad3418 Add VM snapshot import 2024-08-20 10:53:56 -04:00
Joshua Boniface f10d32987b Fix up comments 2024-08-20 10:37:58 -04:00
7 changed files with 538 additions and 36 deletions

View File

@ -3314,7 +3314,7 @@ class API_VM_Snapshot_Export(Resource):
@Authenticator
def post(self, vm, reqargs):
"""
Roll back to a snapshot of a VM's disks and configuration
Export a snapshot of a VM's disks and configuration to files
---
tags:
- vm
@ -3323,7 +3323,17 @@ class API_VM_Snapshot_Export(Resource):
name: snapshot_name
type: string
required: true
description: The name of the snapshot to roll back to
description: The name of the snapshot to export (must exist)
- in: query
name: export_path
type: string (path)
required: true
description: The absolute file path to export the snapshot to on the active primary coordinator
- in: query
name: incremental_parent
type: boolean
required: false
description: A snapshot name to generate an incremental diff from
responses:
200:
description: OK
@ -3352,6 +3362,79 @@ class API_VM_Snapshot_Export(Resource):
api.add_resource(API_VM_Snapshot_Export, "/vm/<vm>/snapshot/export")
# /vm/<vm>/snapshot/import
class API_VM_Snapshot_Import(Resource):
@RequestParser(
[
{
"name": "snapshot_name",
"required": True,
"helptext": "A snapshot name must be specified",
},
{
"name": "import_path",
"required": True,
"helptext": "An absolute directory path on the PVC primary coordinator to import files from",
},
{
"name": "retain_snapshot",
"required": False,
"helptext": "Whether to retain the snapshot of the import or not (default: true)",
},
]
)
@Authenticator
def post(self, vm, reqargs):
"""
Import a snapshot of a VM's disks and configuration from files
---
tags:
- vm
parameters:
- in: query
name: snapshot_name
type: string
required: true
description: The name of the snapshot to roll back to
- in: query
name: import_path
type: string (path)
required: true
description: The absolute file path to import the snapshot from on the active primary coordinator
- in: query
name: retain_snapshot
type: boolean
required: false
default: true
description: Whether or not to retain the (parent, if incremental) volume snapshot after restore
responses:
200:
description: OK
schema:
type: object
id: Message
400:
description: Execution error
schema:
type: object
id: Message
404:
description: Not found
schema:
type: object
id: Message
"""
snapshot_name = reqargs.get("snapshot_name", None)
import_path = reqargs.get("import_path", None)
retain_snapshot = bool(strtobool(reqargs.get("retain_snapshot", "True")))
return api_helper.import_vm_snapshot(
vm, snapshot_name, import_path, retain_snapshot
)
api.add_resource(API_VM_Snapshot_Import, "/vm/<vm>/snapshot/import")
##########################################################
# Client API - Network
##########################################################

View File

@ -865,6 +865,34 @@ def export_vm_snapshot(
return output, retcode
@ZKConnection(config)
def import_vm_snapshot(
zkhandler,
domain,
snapshot_name,
export_path,
retain_snapshot=False,
):
"""
Import a snapshot of a VM from files.
"""
retflag, retdata = pvc_vm.import_vm_snapshot(
zkhandler,
domain,
snapshot_name,
export_path,
retain_snapshot,
)
if retflag:
retcode = 200
else:
retcode = 400
output = {"message": retdata.replace('"', "'")}
return output, retcode
@ZKConnection(config)
def vm_attach_device(zkhandler, vm, device_spec_xml):
"""

View File

@ -1885,12 +1885,20 @@ def cli_vm_snapshot_rollback(domain, snapshot_name):
"--incremental",
"incremental_parent",
default=None,
help="Perform an incremental volume backup from this parent snapshot.",
help="Perform an incremental volume export from this parent snapshot.",
)
def cli_vm_snapshot_export(domain, snapshot_name, export_path, incremental_parent):
"""
Export the (existing) snapshot SNAPSHOT_NAME of virtual machine DOMAIN to the absolute path
EXPORT_PATH on the current PVC primary coordinator. DOMAIN may be a UUID or name.
Export the (existing) snapshot SNAPSHOT_NAME of virtual machine DOMAIN to the absolute path EXPORT_PATH on the current PVC primary coordinator.
DOMAIN may be a UUID or name.
EXPORT_PATH must be a valid absolute directory path on the cluster "primary" coordinator (see "pvc node list") allowing writes from the API daemon (normally running as "root"). The EXPORT_PATH should be a large storage volume, ideally a remotely mounted filesystem (e.g. NFS, SSHFS, etc.) or non-Ceph-backed disk; PVC does not handle this path, that is up to the administrator to configure and manage.
The export will include the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes.
Incremental exports are possible by specifying the "-i"/"--incremental" option along with a parent snapshot name. To correctly import, that export must exist on EXPORT_PATH.
Full export volume images are sparse-allocated, however it is recommended for safety to consider their maximum allocated size when allocated space for the EXPORT_PATH. Incremental volume images are generally small but are dependent entirely on the rate of data change in each volume.
"""
_, primary_node = pvc.lib.cluster.get_primary_node(CLI_CONFIG)
@ -1909,6 +1917,53 @@ def cli_vm_snapshot_export(domain, snapshot_name, export_path, incremental_paren
finish(retcode, retmsg)
###############################################################################
# > pvc vm snapshot import
###############################################################################
@click.command(name="import", short_help="Import a snapshot of a virtual machine.")
@connection_req
@click.argument("domain")
@click.argument("snapshot_name")
@click.argument("import_path")
@click.option(
"-r/-R",
"--retain-snapshot/--remove-snapshot",
"retain_snapshot",
is_flag=True,
default=True,
help="Retain or remove restored (parent, if incremental) snapshot in Ceph.",
)
def cli_vm_snapshot_import(domain, snapshot_name, import_path, retain_snapshot):
"""
Import the snapshot SNAPSHOT_NAME of virtual machine DOMAIN from the absolute path IMPORT_PATH on the current PVC primary coordinator.
DOMAIN may be a UUID or name.
IMPORT_PATH must be a valid absolute directory path on the cluster "primary" coordinator (see "pvc node list") allowing reads from the API daemon (normally running as "root"). The IMPORT_PATH should be a large storage volume, ideally a remotely mounted filesystem (e.g. NFS, SSHFS, etc.) or non-Ceph-backed disk; PVC does not handle this path, that is up to the administrator to configure and manage.
The import will include the VM configuration, metainfo, and the point-in-time snapshot of all attached RBD volumes. Incremental imports will be automatically handled.
A VM named DOMAIN or with the same UUID must not exist; if a VM with the same name or UUID already exists, it must be removed, or renamed and then undefined (to preserve volumes), before importing.
If the "-r"/"--retain-snapshot" option is specified (the default), for incremental imports, only the parent snapshot is kept; for full imports, the imported snapshot is kept. If the "-R"/"--remove-snapshot" option is specified, the imported snapshot is removed.
WARNING: The "-R"/"--remove-snapshot" option will invalidate any existing incremental snapshots based on the same incremental parent for the imported VM.
"""
echo(
CLI_CONFIG,
f"Importing snapshot '{snapshot_name}' of VM '{domain}'... ",
newline=False,
)
retcode, retmsg = pvc.lib.vm.vm_import_snapshot(
CLI_CONFIG, domain, snapshot_name, import_path, retain_snapshot
)
if retcode:
echo(CLI_CONFIG, "done.")
else:
echo(CLI_CONFIG, "failed.")
finish(retcode, retmsg)
###############################################################################
# > pvc vm backup
###############################################################################
@ -6450,6 +6505,7 @@ cli_vm_snapshot.add_command(cli_vm_snapshot_create)
cli_vm_snapshot.add_command(cli_vm_snapshot_remove)
cli_vm_snapshot.add_command(cli_vm_snapshot_rollback)
cli_vm_snapshot.add_command(cli_vm_snapshot_export)
cli_vm_snapshot.add_command(cli_vm_snapshot_import)
cli_vm.add_command(cli_vm_snapshot)
cli_vm_backup.add_command(cli_vm_backup_create)
cli_vm_backup.add_command(cli_vm_backup_restore)

View File

@ -583,6 +583,29 @@ def vm_export_snapshot(config, vm, snapshot_name, export_path, incremental_paren
return True, response.json().get("message", "")
def vm_import_snapshot(config, vm, snapshot_name, import_path, retain_snapshot=False):
"""
Import a snapshot of {vm} and its volumes from a local primary coordinator filesystem path
API endpoint: POST /vm/{vm}/snapshot/import
API arguments: snapshot_name={snapshot_name}, import_path={import_path}, retain_snapshot={retain_snapshot}
API schema: {"message":"{data}"}
"""
params = {
"snapshot_name": snapshot_name,
"import_path": import_path,
"retain_snapshot": retain_snapshot,
}
response = call_api(
config, "post", "/vm/{vm}/snapshot/import".format(vm=vm), params=params
)
if response.status_code != 200:
return False, response.json().get("message", "")
else:
return True, response.json().get("message", "")
def vm_vcpus_set(config, vm, vcpus, topology, restart):
"""
Set the vCPU count of the VM with topology
@ -1729,6 +1752,7 @@ def format_info(config, domain_information, long_output):
"unmigrate": ansiprint.blue(),
"provision": ansiprint.blue(),
"restore": ansiprint.blue(),
"import": ansiprint.blue(),
}
ainformation.append(
"{}State:{} {}{}{}".format(

View File

@ -1051,6 +1051,8 @@ def get_resource_metrics(zkhandler):
"restart": 6,
"stop": 7,
"fail": 8,
"import": 9,
"restore": 10,
}
state = vm["state"]
output_lines.append(

View File

@ -82,6 +82,8 @@ vm_state_combinations = [
"migrate",
"unmigrate",
"provision",
"import",
"restore",
]
ceph_osd_state_combinations = [
"up,in",

View File

@ -618,28 +618,14 @@ def rename_vm(zkhandler, domain, new_domain):
# Get VM information
_b, dom_info = get_info(zkhandler, dom_uuid)
# Undefine the old VM
undefine_vm(zkhandler, dom_uuid)
# Define the new VM
define_vm(
zkhandler,
vm_config_new,
dom_info["node"],
dom_info["node_limit"],
dom_info["node_selector"],
dom_info["node_autostart"],
migration_method=dom_info["migration_method"],
migration_max_downtime=dom_info["migration_max_downtime"],
profile=dom_info["profile"],
tags=dom_info["tags"],
initial_state="stop",
# Edit the VM data
zkhandler.write(
[
(("domain", dom_uuid), new_domain),
(("domain.xml", dom_uuid), vm_config_new),
]
)
# If the VM is migrated, store that
if dom_info["migrated"] != "no":
zkhandler.write([(("domain.last_node", dom_uuid), dom_info["last_node"])])
return True, 'Successfully renamed VM "{}" to "{}".'.format(domain, new_domain)
@ -1249,7 +1235,7 @@ def get_list(
#
# VM Snapshot Tasks
#
def create_vm_snapshot(zkhandler, domain, snapshot_name=None):
def create_vm_snapshot(zkhandler, domain, snapshot_name=None, zk_only=False):
# Validate that VM exists in cluster
dom_uuid = getDomainUUID(zkhandler, domain)
if not dom_uuid:
@ -1291,7 +1277,9 @@ def create_vm_snapshot(zkhandler, domain, snapshot_name=None):
# Iterrate through and create a snapshot for each RBD volume
for rbd in rbd_list:
pool, volume = rbd.split("/")
ret, msg = ceph.add_snapshot(zkhandler, pool, volume, snapshot_name)
ret, msg = ceph.add_snapshot(
zkhandler, pool, volume, snapshot_name, zk_only=zk_only
)
if not ret:
cleanup_failure()
return False, msg
@ -1496,7 +1484,7 @@ def export_vm_snapshot(
def write_export_json(
result=False,
result_message="",
vm_configuration=None,
vm_detail=None,
export_files=None,
export_files_size=0,
ttot=None,
@ -1512,7 +1500,7 @@ def export_vm_snapshot(
"result": result,
"result_message": result_message,
"runtime_secs": ttot,
"vm_configuration": vm_configuration,
"vm_detail": vm_detail,
"export_files": export_files,
"export_size_bytes": export_files_size,
}
@ -1527,7 +1515,14 @@ def export_vm_snapshot(
write_export_json(result=False, result_message=f"ERROR: {error_message}")
return False, f"ERROR: {error_message}"
# Validate that the given snapshot exists
# 3. Get information about VM
vm_detail = get_list(zkhandler, limit=dom_uuid, is_fuzzy=False)[1][0]
if not isinstance(vm_detail, dict):
error_message = f"VM listing returned invalid data: {vm_detail}"
write_export_json(result=False, result_message=f"ERROR: {error_message}")
return False, f"ERROR: {error_message}"
# 4. Validate that the given snapshot exists (and incremental parent exists if applicable)
if not zkhandler.exists(
("domain.snapshots", dom_uuid, "domain_snapshot.name", snapshot_name)
):
@ -1584,6 +1579,10 @@ def export_vm_snapshot(
]
)
# Override the current XML with the snapshot XML; but all other metainfo is current
vm_detail["xml"] = snapshot_xml
# Get the list of volumes
snapshot_volumes = list()
for rbdsnap in snapshot_rbdsnaps.split(","):
pool, _volume = rbdsnap.split("/")
@ -1594,8 +1593,7 @@ def export_vm_snapshot(
if ret:
snapshot_volumes += snapshots
# 4b. Validate that, if an incremental_parent is given, it is valid
# The incremental parent is just a datestring
# Set the export filetype
if incremental_parent is not None:
export_fileext = "rbddiff"
else:
@ -1647,9 +1645,6 @@ def export_vm_snapshot(
write_export_json(
result=False,
result_message=f"ERROR: {error_message}",
vm_configuration=snapshot_xml,
export_files=export_files,
export_files_size=export_files_size,
)
return (
False,
@ -1668,7 +1663,7 @@ def export_vm_snapshot(
write_export_json(
result=True,
result_message=result_message,
vm_configuration=snapshot_xml,
vm_detail=vm_detail,
export_files=export_files,
export_files_size=export_files_size,
ttot=ttot,
@ -1677,6 +1672,318 @@ def export_vm_snapshot(
return True, "\n".join(retlines)
def import_vm_snapshot(
zkhandler, domain, snapshot_name, import_path, retain_snapshot=False
):
tstart = time.time()
myhostname = gethostname().split(".")[0]
# 0. Validations
# Validate that VM does not exist in cluster
dom_uuid = getDomainUUID(zkhandler, domain)
if dom_uuid:
return (
False,
f'ERROR: VM "{domain}" already exists in the cluster! Remove or rename it before importing a snapshot.',
)
# Validate that the source path is valid
if not re.match(r"^/", import_path):
return (
False,
f"ERROR: Source path {import_path} is not a valid absolute path on the primary coordinator!",
)
# Ensure that import_path (on this node) exists
if not os.path.isdir(import_path):
return False, f"ERROR: Source path {import_path} does not exist!"
# Ensure that domain path (on this node) exists
vm_import_path = f"{import_path}/{domain}"
if not os.path.isdir(vm_import_path):
return False, f"ERROR: Source VM path {vm_import_path} does not exist!"
# Ensure that the archives are present
export_source_snapshot_file = f"{vm_import_path}/{snapshot_name}/snapshot.json"
if not os.path.isfile(export_source_snapshot_file):
return False, "ERROR: The specified source export files do not exist!"
# 1. Read the export file and get VM details
try:
with open(export_source_snapshot_file) as fh:
export_source_details = jload(fh)
except Exception as e:
return False, f"ERROR: Failed to read source export details: {e}"
# Handle incrementals
incremental_parent = export_source_details.get("incremental_parent", None)
if incremental_parent is not None:
export_source_parent_snapshot_file = (
f"{vm_import_path}/{incremental_parent}/snapshot.json"
)
if not os.path.isfile(export_source_parent_snapshot_file):
return (
False,
"ERROR: This export is incremental but the required incremental parent files do not exist at '{myhostname}:{vm_import_path}/{incremental_parent}'!",
)
try:
with open(export_source_parent_snapshot_file) as fh:
export_source_parent_details = jload(fh)
except Exception as e:
return (
False,
f"ERROR: Failed to read source incremental parent export details: {e}",
)
# 4. Import volumes
is_snapshot_remove_failed = False
which_snapshot_remove_failed = list()
if incremental_parent is not None:
for volume_file, volume_size in export_source_details.get("export_files"):
volume_size = f"{volume_size}B"
pool, volume, _ = volume_file.split("/")[-1].split(".")
try:
parent_volume_file = [
f[0]
for f in export_source_parent_details.get("export_files")
if f[0].split("/")[-1].replace(".rbdimg", "")
== volume_file.split("/")[-1].replace(".rbddiff", "")
][0]
except Exception as e:
return (
False,
f"ERROR: Failed to find parent volume for volume {pool}/{volume}; export may be corrupt or invalid: {e}",
)
# First we create the expected volumes then clean them up
# This process is a bit of a hack because rbd import does not expect an existing volume,
# but we need the information in PVC.
# Thus create the RBD volume using ceph.add_volume based on the export size, and then
# manually remove the RBD volume (leaving the PVC metainfo)
retcode, retmsg = ceph.add_volume(zkhandler, pool, volume, volume_size)
if not retcode:
return False, f"ERROR: Failed to create imported volume: {retmsg}"
retcode, stdout, stderr = common.run_os_command(
f"rbd remove {pool}/{volume}"
)
if retcode:
return (
False,
f"ERROR: Failed to remove temporary RBD volume '{pool}/{volume}': {stderr}",
)
# Next we import the parent image
retcode, stdout, stderr = common.run_os_command(
f"rbd import --export-format 2 --dest-pool {pool} {import_path}/{domain}/{incremental_parent}/{parent_volume_file} {volume}"
)
if retcode:
return (
False,
f"ERROR: Failed to import parent export image {parent_volume_file}: {stderr}",
)
# Import VM config and metadata in import state, from the *source* details
try:
retcode, retmsg = define_vm(
zkhandler,
export_source_parent_details["vm_detail"]["xml"],
export_source_parent_details["vm_detail"]["node"],
export_source_parent_details["vm_detail"]["node_limit"],
export_source_parent_details["vm_detail"]["node_selector"],
export_source_parent_details["vm_detail"]["node_autostart"],
export_source_parent_details["vm_detail"]["migration_method"],
export_source_parent_details["vm_detail"]["migration_max_downtime"],
export_source_parent_details["vm_detail"]["profile"],
export_source_parent_details["vm_detail"]["tags"],
"import",
)
if not retcode:
return False, f"ERROR: Failed to define imported VM: {retmsg}"
except Exception as e:
return False, f"ERROR: Failed to parse VM export details: {e}"
# Handle the VM snapshots
if retain_snapshot:
# Create the parent snapshot
retcode, retmsg = create_vm_snapshot(
zkhandler, domain, snapshot_name=incremental_parent, zk_only=True
)
if not retcode:
return (
False,
f"ERROR: Failed to create imported snapshot for {incremental_parent} (parent): {retmsg}",
)
for volume_file, volume_size in export_source_details.get("export_files"):
volume_size = f"{volume_size}B"
pool, volume, _ = volume_file.split("/")[-1].split(".")
# Then we import the incremental diffs
retcode, stdout, stderr = common.run_os_command(
f"rbd import-diff {import_path}/{domain}/{snapshot_name}/{volume_file} {pool}/{volume}"
)
if retcode:
return (
False,
f"ERROR: Failed to import incremental export image {volume_file}: {stderr}",
)
if not retain_snapshot:
retcode, stdout, stderr = common.run_os_command(
f"rbd snap rm {pool}/{volume}@{incremental_parent}"
)
if retcode:
is_snapshot_remove_failed = True
which_snapshot_remove_failed.append(f"{pool}/{volume}")
retcode, stdout, stderr = common.run_os_command(
f"rbd snap rm {pool}/{volume}@{snapshot_name}"
)
if retcode:
is_snapshot_remove_failed = True
which_snapshot_remove_failed.append(f"{pool}/{volume}")
# Now update VM config and metadata, from the *current* details
try:
retcode, retmsg = modify_vm(
zkhandler,
domain,
False,
export_source_details["vm_detail"]["xml"],
)
if not retcode:
return False, f"ERROR: Failed to modify imported VM: {retmsg}"
retcode, retmsg = move_vm(
zkhandler,
domain,
export_source_details["vm_detail"]["node"],
)
if not retcode:
# We don't actually care if this fails, because it just means the vm was never moved
pass
retcode, retmsg = modify_vm_metadata(
zkhandler,
domain,
export_source_details["vm_detail"]["node_limit"],
export_source_details["vm_detail"]["node_selector"],
export_source_details["vm_detail"]["node_autostart"],
export_source_details["vm_detail"]["profile"],
export_source_details["vm_detail"]["migration_method"],
export_source_details["vm_detail"]["migration_max_downtime"],
)
if not retcode:
return False, f"ERROR: Failed to modify imported VM: {retmsg}"
except Exception as e:
return False, f"ERROR: Failed to parse VM export details: {e}"
if retain_snapshot:
# Create the child snapshot
retcode, retmsg = create_vm_snapshot(
zkhandler, domain, snapshot_name=snapshot_name, zk_only=True
)
if not retcode:
return (
False,
f"ERROR: Failed to create imported snapshot for {snapshot_name}: {retmsg}",
)
else:
for volume_file, volume_size in export_source_details.get("export_files"):
volume_size = f"{volume_size}B"
pool, volume, _ = volume_file.split("/")[-1].split(".")
# First we create the expected volumes then clean them up
# This process is a bit of a hack because rbd import does not expect an existing volume,
# but we need the information in PVC.
# Thus create the RBD volume using ceph.add_volume based on the export size, and then
# manually remove the RBD volume (leaving the PVC metainfo)
retcode, retmsg = ceph.add_volume(zkhandler, pool, volume, volume_size)
if not retcode:
return False, f"ERROR: Failed to create imported volume: {retmsg}"
retcode, stdout, stderr = common.run_os_command(
f"rbd remove {pool}/{volume}"
)
if retcode:
return (
False,
f"ERROR: Failed to remove temporary RBD volume '{pool}/{volume}': {stderr}",
)
# Then we perform the actual import
retcode, stdout, stderr = common.run_os_command(
f"rbd import --export-format 2 --dest-pool {pool} {import_path}/{domain}/{snapshot_name}/{volume_file} {volume}"
)
if retcode:
return (
False,
f"ERROR: Failed to import export image {volume_file}: {stderr}",
)
if not retain_snapshot:
retcode, stdout, stderr = common.run_os_command(
f"rbd snap rm {pool}/{volume}@{snapshot_name}"
)
if retcode:
return (
False,
f"ERROR: Failed to remove imported image snapshot for {volume_file}: {stderr}",
)
# 2. Import VM config and metadata in provision state
try:
retcode, retmsg = define_vm(
zkhandler,
export_source_details["vm_detail"]["xml"],
export_source_details["vm_detail"]["node"],
export_source_details["vm_detail"]["node_limit"],
export_source_details["vm_detail"]["node_selector"],
export_source_details["vm_detail"]["node_autostart"],
export_source_details["vm_detail"]["migration_method"],
export_source_details["vm_detail"]["migration_max_downtime"],
export_source_details["vm_detail"]["profile"],
export_source_details["vm_detail"]["tags"],
"import",
)
if not retcode:
return False, f"ERROR: Failed to define imported VM: {retmsg}"
except Exception as e:
return False, f"ERROR: Failed to parse VM export details: {e}"
# Finally we handle the VM snapshot
if retain_snapshot:
retcode, retmsg = create_vm_snapshot(
zkhandler, domain, snapshot_name=snapshot_name, zk_only=True
)
if not retcode:
return (
False,
f"ERROR: Failed to create imported snapshot for {snapshot_name}: {retmsg}",
)
# 5. Start VM
retcode, retmsg = start_vm(zkhandler, domain)
if not retcode:
return False, f"ERROR: Failed to start imported VM {domain}: {retmsg}"
tend = time.time()
ttot = round(tend - tstart, 2)
retlines = list()
if is_snapshot_remove_failed:
retlines.append(
f"WARNING: Failed to remove hanging snapshot(s) as requested for volume(s) {', '.join(which_snapshot_remove_failed)}"
)
retlines.append(
f"Successfully imported VM '{domain}' at snapshot '{snapshot_name}' from '{myhostname}:{import_path}' in {ttot}s."
)
return True, "\n".join(retlines)
#
# VM Backup Tasks
#