Implement friendlier VM mirror commands

Adds two helper commands which automate sending and promoting VM
snapshots as "vm mirror" commands.

"vm mirror create" replicates the functionality of "snapshot create" and
"snapshot send", performing both in one single task using an
autogenerated dated snapshot name for automatic cross-cluster
replication.

"vm mirror promote" replicates the functionality of "vm shutdown",
"snapshot create", "snapshot send", "vm start" (remote), and,
optionally, "vm remove", performing in one single task an entire
cross-cluster VM move with or without retaining the copy on the local
cluster (if retained, the local copy becomes a snapshot mirror of the
remote, flipping their statuses).
This commit is contained in:
Joshua Boniface 2024-10-08 22:53:12 -04:00
parent df4d437d31
commit 3ea7421f09
5 changed files with 1836 additions and 15 deletions

View File

@ -4015,6 +4015,229 @@ class API_VM_Snapshot_Receive_Config(Resource):
api.add_resource(API_VM_Snapshot_Receive_Config, "/vm/<vm>/snapshot/receive/config")
# /vm/<vm>/mirror/create
class API_VM_Mirror_Create(Resource):
@RequestParser(
[
{
"name": "destination_api_uri",
"required": True,
"helptext": "A destination API URI must be specified",
},
{
"name": "destination_api_key",
"required": True,
"helptext": "A destination API key must be specified",
},
{
"name": "destination_api_verify_ssl",
"required": False,
},
{
"name": "destination_storage_pool",
"required": False,
},
]
)
@Authenticator
def post(self, vm, reqargs):
"""
Create (or update) a snapshot mirror of a VM to a remote cluster
This method handles both the creation of a new VM snapshot, as well as sending that snapshot to a remote cluster, creating or updating a VM mirror. It will also automatically handle full vs. incremental block sends if possible based on the available snapshots on both sides.
---
tags:
- vm
parameters:
- in: query
name: destination_api_uri
type: string
required: true
description: The base API URI of the destination PVC cluster (with prefix if applicable)
- in: query
name: destination_api_key
type: string
required: true
description: The API authentication key of the destination PVC cluster
- in: query
name: destination_api_verify_ssl
type: boolean
required: false
default: true
description: Whether or not to validate SSL certificates for an SSL-enabled destination API
- in: query
name: destination_storage_pool
type: string
required: false
default: source storage pool name
description: The remote cluster storage pool to create RBD volumes in, if different from the source storage pool
responses:
202:
description: Accepted
schema:
type: object
description: The Celery job information of the task
id: CeleryTask
400:
description: Execution error
schema:
type: object
id: Message
404:
description: Not found
schema:
type: object
id: Message
"""
destination_api_uri = reqargs.get("destination_api_uri", None)
destination_api_key = reqargs.get("destination_api_key", None)
destination_api_verify_ssl = bool(
strtobool(reqargs.get("destination_api_verify_ssl", "true"))
)
destination_storage_pool = reqargs.get("destination_storage_pool", None)
task = run_celery_task(
"vm.create_mirror",
domain=vm,
destination_api_uri=destination_api_uri,
destination_api_key=destination_api_key,
destination_api_verify_ssl=destination_api_verify_ssl,
destination_storage_pool=destination_storage_pool,
run_on="primary",
)
return (
{
"task_id": task.id,
"task_name": "vm.send_snapshot",
"run_on": f"{get_primary_node()} (primary)",
},
202,
{"Location": Api.url_for(api, API_Tasks_Element, task_id=task.id)},
)
api.add_resource(API_VM_Mirror_Create, "/vm/<vm>/mirror/create")
# /vm/<vm>/mirror/promote
class API_VM_Mirror_Promote(Resource):
@RequestParser(
[
{
"name": "destination_api_uri",
"required": True,
"helptext": "A destination API URI must be specified",
},
{
"name": "destination_api_key",
"required": True,
"helptext": "A destination API key must be specified",
},
{
"name": "destination_api_verify_ssl",
"required": False,
},
{
"name": "destination_storage_pool",
"required": False,
},
{
"name": "remove_on_source",
"required": False,
},
]
)
@Authenticator
def post(self, vm, reqargs):
"""
Promote a snapshot mirror of a VM on a remote cluster, flipping "mirror" state, and optionally removing the source VM on this cluster.
This method handles shutting down the VM on this cluster, creating a new VM snapshot, sending that snapshot to a remote cluster, then starting up the VM on the remote cluster. It will also automatically handle full vs. incremental block sends if possible based on the available snapshots on both sides.
NOTE: This method may be used alone to perform a one-shot cross-cluster move; creating the mirror first is not required, though doing so will improve performance by allowing an incremental block send.
---
tags:
- vm
parameters:
- in: query
name: destination_api_uri
type: string
required: true
description: The base API URI of the destination PVC cluster (with prefix if applicable)
- in: query
name: destination_api_key
type: string
required: true
description: The API authentication key of the destination PVC cluster
- in: query
name: destination_api_verify_ssl
type: boolean
required: false
default: true
description: Whether or not to validate SSL certificates for an SSL-enabled destination API
- in: query
name: destination_storage_pool
type: string
required: false
default: source storage pool name
description: The remote cluster storage pool to create RBD volumes in, if different from the source storage pool
- in: query
name: remove_on_source
required: false
default: false
description: Remove the VM on the source cluster once promoted (performs a full move between clusters)
responses:
202:
description: Accepted
schema:
type: object
description: The Celery job information of the task
id: CeleryTask
400:
description: Execution error
schema:
type: object
id: Message
404:
description: Not found
schema:
type: object
id: Message
"""
destination_api_uri = reqargs.get("destination_api_uri", None)
destination_api_key = reqargs.get("destination_api_key", None)
destination_api_verify_ssl = bool(
strtobool(reqargs.get("destination_api_verify_ssl", "true"))
)
destination_storage_pool = reqargs.get("destination_storage_pool", None)
remove_on_source = reqargs.get("remove_on_source", False)
task = run_celery_task(
"vm.promote_mirror",
domain=vm,
destination_api_uri=destination_api_uri,
destination_api_key=destination_api_key,
destination_api_verify_ssl=destination_api_verify_ssl,
destination_storage_pool=destination_storage_pool,
remove_on_source=remove_on_source,
run_on="primary",
)
return (
{
"task_id": task.id,
"task_name": "vm.send_snapshot",
"run_on": f"{get_primary_node()} (primary)",
},
202,
{"Location": Api.url_for(api, API_Tasks_Element, task_id=task.id)},
)
api.add_resource(API_VM_Mirror_Promote, "/vm/<vm>/mirror/promote")
# /vm/autobackup
class API_VM_Autobackup_Root(Resource):
@RequestParser(

View File

@ -2078,7 +2078,7 @@ def cli_vm_snapshot_send(
Incremental sends are possible by specifying the "-i"/"--incremental-parent" option along with a parent snapshot name. To correctly receive, that parent snapshot must exist on DESTINATION. Subsequent sends after the first do not have to be incremental, but an incremental send is likely to perform better than a full send if the VM experiences few writes.
WARNING: Once sent, the VM will be in the state "mirror" on the remote cluster. If it is subsequently started, for instance for disaster recovery, a new snapshot must be taken on the remote side and sent back or data will be inconsistent between the instances. Only VMs in the "mirror" state can accept new sends.
WARNING: Once sent, the VM will be in the state "mirror" on the destination cluster. If it is subsequently started, for instance for disaster recovery, a new snapshot must be taken on the destination cluster and sent back or data will be inconsistent between the instances. Only VMs in the "mirror" state can accept new sends.
WARNING: This functionality has no automatic backout on the remote side. While a properly configured cluster should not fail any step in the process, a situation like an intermittent network connection might cause a failure which would have to be manually corrected on that side, usually by removing the mirrored VM and retrying, or rolling back to a previous snapshot and retrying. Future versions may enhance automatic recovery, but for now this would be up to the administrator.
"""
@ -2114,6 +2114,199 @@ def cli_vm_snapshot_send(
finish(retcode, retmsg)
###############################################################################
# > pvc vm mirror
###############################################################################
@click.group(
name="mirror",
short_help="Manage snapshot mirrors for PVC VMs.",
context_settings=CONTEXT_SETTINGS,
)
def cli_vm_mirror():
"""
Manage snapshot mirrors of VMs in a PVC cluster.
"""
pass
###############################################################################
# > pvc vm mirror create
###############################################################################
@click.command(
name="create",
short_help="Create a snapshot mirror of a virtual machine to another PVC cluster.",
)
@connection_req
@click.argument("domain")
@click.argument("destination")
@click.option(
"-k",
"--destination-api-key",
"destination_api_key",
default=None,
help="The API key of the destination cluster when specifying an API URI.",
)
@click.option(
"-p",
"--destination-pool",
"destination_storage_pool",
default=None,
help="The target storage pool on the destination cluster, if it differs from the source pool.",
)
@click.option(
"--wait/--no-wait",
"wait_flag",
is_flag=True,
default=True,
show_default=True,
help="Wait or don't wait for task to complete, showing progress if waiting",
)
def cli_vm_mirror_create(
domain,
destination,
destination_api_key,
destination_storage_pool,
wait_flag,
):
"""
For the virtual machine DOMAIN: create a new snapshot (dated), and send snapshot to the remote PVC cluster DESTINATION; creates a cross-cluster snapshot mirror of the VM.
DOMAIN may be a UUID or name. DESTINATION may be either a configured PVC connection name in this CLI instance (i.e. a valid argument to "--connection"), or a full API URI, including the scheme, port and API prefix; if using the latter, an API key can be specified with the "-k"/"--destination-api-key" option.
The send will include the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes.
This command may be used repeatedly to send new updates for a remote VM mirror. If a valid shared snapshot is found on the destination cluster, block device transfers will be incremental based on that snapshot.
By default, the storage pool of the sending cluster will be used at the destination cluster as well. If a pool of that name does not exist, specify one with the "-p"/"--detination-pool" option.
WARNING: Once sent, the VM will be in the state "mirror" on the destination cluster. If it is subsequently started, for instance for disaster recovery, a new snapshot must be taken on the destination cluster and sent back or data will be inconsistent between the instances. Only VMs in the "mirror" state can accept new sends. Consider using "mirror promote" instead of any manual promotion attempts.
WARNING: This functionality has no automatic backout on the remote side. While a properly configured cluster should not fail any step in the process, a situation like an intermittent network connection might cause a failure which would have to be manually corrected on that side, usually by removing the mirrored VM and retrying, or rolling back to a previous snapshot and retrying. Future versions may enhance automatic recovery, but for now this would be up to the administrator.
"""
connections_config = get_store(CLI_CONFIG["store_path"])
if destination in connections_config.keys():
destination_cluster_config = connections_config[destination]
destination_api_uri = "{}://{}:{}{}".format(
destination_cluster_config["scheme"],
destination_cluster_config["host"],
destination_cluster_config["port"],
CLI_CONFIG["api_prefix"],
)
destination_api_key = destination_cluster_config["api_key"]
else:
destination_api_uri = destination
destination_api_key = destination_api_key
retcode, retmsg = pvc.lib.vm.vm_create_mirror(
CLI_CONFIG,
domain,
destination_api_uri,
destination_api_key,
destination_api_verify_ssl=CLI_CONFIG.get("verify_ssl"),
destination_storage_pool=destination_storage_pool,
wait_flag=wait_flag,
)
if retcode and wait_flag:
retmsg = wait_for_celery_task(CLI_CONFIG, retmsg)
finish(retcode, retmsg)
###############################################################################
# > pvc vm mirror promote
###############################################################################
@click.command(
name="promote",
short_help="Shut down, create a snapshot mirror, and promote a virtual machine to another PVC cluster.",
)
@connection_req
@click.argument("domain")
@click.argument("destination")
@click.option(
"-k",
"--destination-api-key",
"destination_api_key",
default=None,
help="The API key of the destination cluster when specifying an API URI.",
)
@click.option(
"-p",
"--destination-pool",
"destination_storage_pool",
default=None,
help="The target storage pool on the destination cluster, if it differs from the source pool.",
)
@click.option(
"--remove/--no-remove",
"remove_flag",
is_flag=True,
default=False,
show_default=True,
help="Remove or don't remove the local VM after promoting (if set, performs a cross-cluster move).",
)
@click.option(
"--wait/--no-wait",
"wait_flag",
is_flag=True,
default=True,
show_default=True,
help="Wait or don't wait for task to complete, showing progress if waiting",
)
def cli_vm_mirror_promote(
domain,
destination,
destination_api_key,
destination_storage_pool,
remove_flag,
wait_flag,
):
"""
For the virtual machine DOMAIN: shut down on this cluster, create a new snapshot (dated), send snapshot to the remote PVC cluster DESTINATION, start on DESTINATION, and optionally remove from this cluster; performs a cross-cluster move of the VM, with or without retaining the source as a snapshot mirror.
DOMAIN may be a UUID or name. DESTINATION may be either a configured PVC connection name in this CLI instance (i.e. a valid argument to "--connection"), or a full API URI, including the scheme, port and API prefix; if using the latter, an API key can be specified with the "-k"/"--destination-api-key" option.
The send will include the VM configuration, metainfo, and a point-in-time snapshot of all attached RBD volumes.
If a valid shared snapshot is found on the destination cluster, block device transfers will be incremental based on that snapshot.
By default, the storage pool of the sending cluster will be used at the destination cluster as well. If a pool of that name does not exist, specify one with the "-p"/"--detination-pool" option.
WARNING: Once promoted, if the "--remove" flag is not set, the VM will be in the state "mirror" on this cluster. This effectively flips which cluster is the "primary" for this VM, and subsequent mirror management commands must be run against the destination cluster instead of this cluster. If the "--remove" flag is set, the VM will be removed from this cluster entirely once successfully started on the destination cluster.
WARNING: This functionality has no automatic backout on the remote side. While a properly configured cluster should not fail any step in the process, a situation like an intermittent network connection might cause a failure which would have to be manually corrected on that side, usually by removing the mirrored VM and retrying, or rolling back to a previous snapshot and retrying. Future versions may enhance automatic recovery, but for now this would be up to the administrator.
"""
connections_config = get_store(CLI_CONFIG["store_path"])
if destination in connections_config.keys():
destination_cluster_config = connections_config[destination]
destination_api_uri = "{}://{}:{}{}".format(
destination_cluster_config["scheme"],
destination_cluster_config["host"],
destination_cluster_config["port"],
CLI_CONFIG["api_prefix"],
)
destination_api_key = destination_cluster_config["api_key"]
else:
destination_api_uri = destination
destination_api_key = destination_api_key
retcode, retmsg = pvc.lib.vm.vm_promote_mirror(
CLI_CONFIG,
domain,
destination_api_uri,
destination_api_key,
destination_api_verify_ssl=CLI_CONFIG.get("verify_ssl"),
destination_storage_pool=destination_storage_pool,
remove_on_source=remove_flag,
wait_flag=wait_flag,
)
if retcode and wait_flag:
retmsg = wait_for_celery_task(CLI_CONFIG, retmsg)
finish(retcode, retmsg)
###############################################################################
# > pvc vm backup
###############################################################################
@ -6686,6 +6879,9 @@ cli_vm_snapshot.add_command(cli_vm_snapshot_export)
cli_vm_snapshot.add_command(cli_vm_snapshot_import)
cli_vm_snapshot.add_command(cli_vm_snapshot_send)
cli_vm.add_command(cli_vm_snapshot)
cli_vm_mirror.add_command(cli_vm_mirror_create)
cli_vm_mirror.add_command(cli_vm_mirror_promote)
cli_vm.add_command(cli_vm_mirror)
cli_vm_backup.add_command(cli_vm_backup_create)
cli_vm_backup.add_command(cli_vm_backup_restore)
cli_vm_backup.add_command(cli_vm_backup_remove)

View File

@ -611,7 +611,7 @@ def vm_send_snapshot(
incremental with incremental_parent
API endpoint: POST /vm/{vm}/snapshot/send
API arguments: snapshot_name=snapshot_name, destination_api_uri=destination_api_uri, destination_api_key=destination_api_key, incremental_parent=incremental_parent
API arguments: snapshot_name=snapshot_name, destination_api_uri=destination_api_uri, destination_api_key=destination_api_key, destination_api_verify_ssl=destination_api_verify_ssl, incremental_parent=incremental_parent, destination_storage_pool=destination_storage_pool
API schema: {"message":"{data}"}
"""
params = {
@ -632,6 +632,70 @@ def vm_send_snapshot(
return get_wait_retdata(response, wait_flag)
def vm_create_mirror(
config,
vm,
destination_api_uri,
destination_api_key,
destination_api_verify_ssl=True,
destination_storage_pool=None,
wait_flag=True,
):
"""
Create a new snapshot and send the snapshot to a destination PVC cluster, with automatic incremental handling
API endpoint: POST /vm/{vm}/mirror/create
API arguments: destination_api_uri=destination_api_uri, destination_api_key=destination_api_key, destination_api_verify_ssl=destination_api_verify_ssl, destination_storage_pool=destination_storage_pool
API schema: {"message":"{data}"}
"""
params = {
"destination_api_uri": destination_api_uri,
"destination_api_key": destination_api_key,
"destination_api_verify_ssl": destination_api_verify_ssl,
}
if destination_storage_pool is not None:
params["destination_storage_pool"] = destination_storage_pool
response = call_api(
config, "post", "/vm/{vm}/mirror/create".format(vm=vm), params=params
)
return get_wait_retdata(response, wait_flag)
def vm_promote_mirror(
config,
vm,
destination_api_uri,
destination_api_key,
destination_api_verify_ssl=True,
destination_storage_pool=None,
remove_on_source=False,
wait_flag=True,
):
"""
Shut down a VM, create a new snapshot, send the snapshot to a destination PVC cluster, start the VM on the remote cluster, and optionally remove the local VM, with automatic incremental handling
API endpoint: POST /vm/{vm}/mirror/promote
API arguments: destination_api_uri=destination_api_uri, destination_api_key=destination_api_key, destination_api_verify_ssl=destination_api_verify_ssl, destination_storage_pool=destination_storage_pool, remove_on_source=remove_on_source
API schema: {"message":"{data}"}
"""
params = {
"destination_api_uri": destination_api_uri,
"destination_api_key": destination_api_key,
"destination_api_verify_ssl": destination_api_verify_ssl,
"remove_on_source": remove_on_source,
}
if destination_storage_pool is not None:
params["destination_storage_pool"] = destination_storage_pool
response = call_api(
config, "post", "/vm/{vm}/mirror/promote".format(vm=vm), params=params
)
return get_wait_retdata(response, wait_flag)
def vm_autobackup(config, email_recipients=None, force_full_flag=False, wait_flag=True):
"""
Perform a cluster VM autobackup

File diff suppressed because it is too large Load Diff

View File

@ -34,6 +34,8 @@ from daemon_lib.vm import (
vm_worker_export_snapshot,
vm_worker_import_snapshot,
vm_worker_send_snapshot,
vm_worker_create_mirror,
vm_worker_promote_mirror,
)
from daemon_lib.ceph import (
osd_worker_add_osd,
@ -276,6 +278,90 @@ def vm_send_snapshot(
)
@celery.task(name="vm.create_mirror", bind=True, routing_key="run_on")
def vm_create_mirror(
self,
domain=None,
destination_api_uri="",
destination_api_key="",
destination_api_verify_ssl=True,
destination_storage_pool=None,
run_on="primary",
):
@ZKConnection(config)
def run_vm_create_mirror(
zkhandler,
self,
domain,
destination_api_uri,
destination_api_key,
destination_api_verify_ssl=True,
destination_storage_pool=None,
):
return vm_worker_create_mirror(
zkhandler,
self,
domain,
destination_api_uri,
destination_api_key,
destination_api_verify_ssl=destination_api_verify_ssl,
destination_storage_pool=destination_storage_pool,
)
return run_vm_create_mirror(
self,
domain,
destination_api_uri,
destination_api_key,
destination_api_verify_ssl=destination_api_verify_ssl,
destination_storage_pool=destination_storage_pool,
)
@celery.task(name="vm.promote_mirror", bind=True, routing_key="run_on")
def vm_promote_mirror(
self,
domain=None,
destination_api_uri="",
destination_api_key="",
destination_api_verify_ssl=True,
destination_storage_pool=None,
remove_on_source=False,
run_on="primary",
):
@ZKConnection(config)
def run_vm_promote_mirror(
zkhandler,
self,
domain,
destination_api_uri,
destination_api_key,
destination_api_verify_ssl=True,
destination_storage_pool=None,
remove_on_source=False,
):
return vm_worker_promote_mirror(
zkhandler,
self,
domain,
destination_api_uri,
destination_api_key,
destination_api_verify_ssl=destination_api_verify_ssl,
destination_storage_pool=destination_storage_pool,
remove_on_source=remove_on_source,
)
return run_vm_promote_mirror(
self,
domain,
destination_api_uri,
destination_api_key,
destination_api_verify_ssl=destination_api_verify_ssl,
destination_storage_pool=destination_storage_pool,
remove_on_source=remove_on_source,
)
@celery.task(name="osd.add", bind=True, routing_key="run_on")
def osd_add(
self,