Support removing VMs (and disks) from PVC clients
Adds full support for removing a VM entirely, including its RBD disks, via the PVC client(s). Avoids needing an undefine + manual removal.
This commit is contained in:
parent
3e591bd09e
commit
d28b1ba133
|
@ -388,13 +388,13 @@ def vm_modify(domain, config, editor, restart):
|
|||
###############################################################################
|
||||
# pvc vm undefine
|
||||
###############################################################################
|
||||
@click.command(name='undefine', short_help='Undefine and stop a virtual machine.')
|
||||
@click.command(name='undefine', short_help='Undefine a virtual machine.')
|
||||
@click.argument(
|
||||
'domain'
|
||||
)
|
||||
def vm_undefine(domain):
|
||||
"""
|
||||
Stop virtual machine DOMAIN and remove it from the cluster database. DOMAIN may be a UUID or name.
|
||||
Stop virtual machine DOMAIN and remove it from the cluster database, preserving disks. DOMAIN may be a UUID or name.
|
||||
"""
|
||||
|
||||
# Ensure at least one search method is set
|
||||
|
@ -407,6 +407,28 @@ def vm_undefine(domain):
|
|||
retcode, retmsg = pvc_vm.undefine_vm(zk_conn, domain, is_cli=True)
|
||||
cleanup(retcode, retmsg, zk_conn)
|
||||
|
||||
###############################################################################
|
||||
# pvc vm remove
|
||||
###############################################################################
|
||||
@click.command(name='remove', short_help='Remove a virtual machine.')
|
||||
@click.argument(
|
||||
'domain'
|
||||
)
|
||||
def vm_remove(domain):
|
||||
"""
|
||||
Stop virtual machine DOMAIN and remove it, along with all disks, from the cluster. DOMAIN may be a UUID or name.
|
||||
"""
|
||||
|
||||
# Ensure at least one search method is set
|
||||
if domain == None:
|
||||
click.echo("ERROR: You must specify either a name or UUID value.")
|
||||
exit(1)
|
||||
|
||||
# Open a Zookeeper connection
|
||||
zk_conn = pvc_common.startZKConnection(zk_host)
|
||||
retcode, retmsg = pvc_vm.remove_vm(zk_conn, domain, is_cli=True)
|
||||
cleanup(retcode, retmsg, zk_conn)
|
||||
|
||||
###############################################################################
|
||||
# pvc vm dump
|
||||
###############################################################################
|
||||
|
@ -1609,6 +1631,7 @@ cli_vm.add_command(vm_add)
|
|||
cli_vm.add_command(vm_define)
|
||||
cli_vm.add_command(vm_modify)
|
||||
cli_vm.add_command(vm_undefine)
|
||||
cli_vm.add_command(vm_remove)
|
||||
cli_vm.add_command(vm_dump)
|
||||
cli_vm.add_command(vm_start)
|
||||
cli_vm.add_command(vm_restart)
|
||||
|
|
|
@ -39,6 +39,8 @@ import client_lib.ansiprint as ansiprint
|
|||
import client_lib.zkhandler as zkhandler
|
||||
import client_lib.common as common
|
||||
|
||||
import client_lib.ceph as ceph
|
||||
|
||||
#
|
||||
# XML information parsing functions
|
||||
#
|
||||
|
@ -94,7 +96,6 @@ def getInformationFromXML(zk_conn, uuid):
|
|||
return domain_information
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Cluster search functions
|
||||
#
|
||||
|
@ -136,7 +137,7 @@ def searchClusterByName(zk_conn, name):
|
|||
return uuid
|
||||
|
||||
def getDomainUUID(zk_conn, domain):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
if common.validateUUID(domain):
|
||||
dom_name = searchClusterByUUID(zk_conn, domain)
|
||||
dom_uuid = searchClusterByName(zk_conn, dom_name)
|
||||
|
@ -147,7 +148,7 @@ def getDomainUUID(zk_conn, domain):
|
|||
return dom_uuid
|
||||
|
||||
def getDomainName(zk_conn, domain):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
if common.validateUUID(domain):
|
||||
dom_name = searchClusterByUUID(zk_conn, domain)
|
||||
dom_uuid = searchClusterByName(zk_conn, dom_name)
|
||||
|
@ -157,6 +158,14 @@ def getDomainName(zk_conn, domain):
|
|||
|
||||
return dom_name
|
||||
|
||||
def getDomainDisks(zk_conn, dom_uuid):
|
||||
domain_information = getInformationFromXML(zk_conn, dom_uuid)
|
||||
disk_list = []
|
||||
for disk in domain_information['disks']:
|
||||
disk_list.append(disk['name'])
|
||||
|
||||
return disk_list
|
||||
|
||||
#
|
||||
# Direct functions
|
||||
#
|
||||
|
@ -214,19 +223,24 @@ def dump_vm(zk_conn, domain):
|
|||
|
||||
return True, vm_xml
|
||||
|
||||
def purge_vm(zk_conn, domain, is_cli=False):
|
||||
"""
|
||||
Helper function for both undefine and remove VM to perform the shutdown, termination,
|
||||
and configuration deletion.
|
||||
"""
|
||||
|
||||
def undefine_vm(zk_conn, domain, is_cli=False):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
common.stopZKConnection(zk_conn)
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
||||
# Shut down the VM
|
||||
try:
|
||||
current_vm_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
current_vm_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(domain))
|
||||
if current_vm_state != 'stop':
|
||||
if is_cli:
|
||||
click.echo('Forcibly stopping VM "{}".'.format(dom_uuid))
|
||||
click.echo('Forcibly stopping VM "{}".'.format(domain))
|
||||
# Set the domain into stop mode
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'stop'})
|
||||
|
||||
|
@ -234,30 +248,65 @@ def undefine_vm(zk_conn, domain, is_cli=False):
|
|||
if is_cli:
|
||||
click.echo('Waiting for cluster to update.')
|
||||
time.sleep(2)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Gracefully terminate the class instances
|
||||
try:
|
||||
if is_cli:
|
||||
click.echo('Deleting VM "{}" from nodes.'.format(dom_uuid))
|
||||
click.echo('Deleting VM "{}" from nodes.'.format(domain))
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'delete'})
|
||||
time.sleep(2)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Delete the configurations
|
||||
try:
|
||||
if is_cli:
|
||||
click.echo('Undefining VM "{}".'.format(dom_uuid))
|
||||
click.echo('Undefining VM "{}".'.format(domain))
|
||||
zkhandler.deletekey(zk_conn, '/domains/{}'.format(dom_uuid))
|
||||
except:
|
||||
pass
|
||||
|
||||
return True, 'Removed VM "{}" from the cluster.'.format(dom_uuid)
|
||||
return True, 'Undefined VM "{}" from the cluster.'.format(domain)
|
||||
|
||||
def remove_vm(zk_conn, domain, is_cli=False):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
common.stopZKConnection(zk_conn)
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
||||
disk_list = getDomainDisks(zk_conn, dom_uuid)
|
||||
|
||||
# Shut down the VM
|
||||
current_vm_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid))
|
||||
if current_vm_state != 'stop':
|
||||
if is_cli:
|
||||
click.echo('Forcibly stopping VM "{}".'.format(domain))
|
||||
# Set the domain into stop mode
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'stop'})
|
||||
|
||||
# Wait for 1 second to allow state to flow to all nodes
|
||||
if is_cli:
|
||||
click.echo('Waiting for cluster to update.')
|
||||
time.sleep(2)
|
||||
|
||||
# Gracefully terminate the class instances
|
||||
if is_cli:
|
||||
click.echo('Deleting VM "{}" from nodes.'.format(domain))
|
||||
zkhandler.writedata(zk_conn, {'/domains/{}/state'.format(dom_uuid): 'delete'})
|
||||
time.sleep(2)
|
||||
|
||||
# Delete the configurations
|
||||
if is_cli:
|
||||
click.echo('Undefining VM "{}".'.format(domain))
|
||||
zkhandler.deletekey(zk_conn, '/domains/{}'.format(dom_uuid))
|
||||
|
||||
# Remove disks
|
||||
for disk in disk_list:
|
||||
# vmpool/vmname_volume
|
||||
disk_pool, disk_name = disk.split('/')
|
||||
retcode, message = ceph.remove_volume(zk_conn, disk_pool, disk_name)
|
||||
if is_cli and message:
|
||||
click.echo('{}'.format(message))
|
||||
|
||||
return True, 'Removed VM "{}" and disks from the cluster.'.format(domain)
|
||||
|
||||
def start_vm(zk_conn, domain):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
common.stopZKConnection(zk_conn)
|
||||
|
@ -269,7 +318,7 @@ def start_vm(zk_conn, domain):
|
|||
return True, 'Starting VM "{}".'.format(dom_uuid)
|
||||
|
||||
def restart_vm(zk_conn, domain):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
common.stopZKConnection(zk_conn)
|
||||
|
@ -287,7 +336,7 @@ def restart_vm(zk_conn, domain):
|
|||
return True, 'Restarting VM "{}".'.format(dom_uuid)
|
||||
|
||||
def shutdown_vm(zk_conn, domain):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
@ -304,7 +353,7 @@ def shutdown_vm(zk_conn, domain):
|
|||
return True, 'Shutting down VM "{}".'.format(dom_uuid)
|
||||
|
||||
def stop_vm(zk_conn, domain):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
common.stopZKConnection(zk_conn)
|
||||
|
@ -319,7 +368,7 @@ def stop_vm(zk_conn, domain):
|
|||
return True, 'Forcibly stopping VM "{}".'.format(dom_uuid)
|
||||
|
||||
def move_vm(zk_conn, domain, target_node, selector):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
common.stopZKConnection(zk_conn)
|
||||
|
@ -356,7 +405,7 @@ def move_vm(zk_conn, domain, target_node, selector):
|
|||
return True, 'Permanently migrating VM "{}" to node "{}".'.format(dom_uuid, target_node)
|
||||
|
||||
def migrate_vm(zk_conn, domain, target_node, selector, force_migrate, is_cli=False):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
common.stopZKConnection(zk_conn)
|
||||
|
@ -404,7 +453,7 @@ def migrate_vm(zk_conn, domain, target_node, selector, force_migrate, is_cli=Fal
|
|||
return True, 'Migrating VM "{}" to node "{}".'.format(dom_uuid, target_node)
|
||||
|
||||
def unmigrate_vm(zk_conn, domain):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
common.stopZKConnection(zk_conn)
|
||||
|
@ -433,7 +482,7 @@ def unmigrate_vm(zk_conn, domain):
|
|||
return True, 'Unmigrating VM "{}" back to node "{}".'.format(dom_uuid, target_node)
|
||||
|
||||
def get_console_log(zk_conn, domain, lines=1000):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
@ -455,7 +504,7 @@ def get_console_log(zk_conn, domain, lines=1000):
|
|||
return True, ''
|
||||
|
||||
def follow_console_log(zk_conn, domain, lines=10):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
@ -498,7 +547,7 @@ def follow_console_log(zk_conn, domain, lines=10):
|
|||
return True, ''
|
||||
|
||||
def get_info(zk_conn, domain):
|
||||
# Validate and obtain alternate passed value
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zk_conn, domain)
|
||||
if not dom_uuid:
|
||||
common.stopZKConnection(zk_conn)
|
||||
|
|
Loading…
Reference in New Issue