Add VM device hot attach/detach support
Adds a new API endpoint to support hot attach/detach of devices, and the corresponding client-side logic to use this endpoint when doing VM network/storage add/remove actions. The live attach is now the default behaviour for these types of additions and removals, and can be disabled if needed. Closes #141
This commit is contained in:
parent
46f1d761f6
commit
e962743e51
|
@ -2003,6 +2003,72 @@ class API_VM_Rename(Resource):
|
|||
api.add_resource(API_VM_Rename, '/vm/<vm>/rename')
|
||||
|
||||
|
||||
# /vm/<vm>/device
|
||||
class API_VM_Device(Resource):
|
||||
@RequestParser([
|
||||
{'name': 'xml', 'required': True, 'helptext': "A Libvirt XML device document must be specified"},
|
||||
])
|
||||
@Authenticator
|
||||
def post(self, vm, reqargs):
|
||||
"""
|
||||
Hot-attach device XML to {vm}
|
||||
---
|
||||
tags:
|
||||
- vm
|
||||
parameters:
|
||||
- in: query
|
||||
name: xml
|
||||
type: string
|
||||
required: true
|
||||
description: The raw Libvirt XML definition of the device to attach
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
400:
|
||||
description: Bad request
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
"""
|
||||
return api_helper.vm_attach_device(vm, reqargs.get('xml', None))
|
||||
|
||||
@RequestParser([
|
||||
{'name': 'xml', 'required': True, 'helptext': "A Libvirt XML device document must be specified"},
|
||||
])
|
||||
@Authenticator
|
||||
def delete(self, vm, reqargs):
|
||||
"""
|
||||
Hot-detach device XML to {vm}
|
||||
---
|
||||
tags:
|
||||
- vm
|
||||
parameters:
|
||||
- in: query
|
||||
name: xml
|
||||
type: string
|
||||
required: true
|
||||
description: The raw Libvirt XML definition of the device to detach
|
||||
responses:
|
||||
200:
|
||||
description: OK
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
400:
|
||||
description: Bad request
|
||||
schema:
|
||||
type: object
|
||||
id: Message
|
||||
"""
|
||||
return api_helper.vm_detach_device(vm, reqargs.get('xml', None))
|
||||
|
||||
|
||||
api.add_resource(API_VM_Device, '/vm/<vm>/device')
|
||||
|
||||
|
||||
##########################################################
|
||||
# Client API - Network
|
||||
##########################################################
|
||||
|
|
|
@ -491,6 +491,58 @@ def vm_define(zkhandler, xml, node, limit, selector, autostart, migration_method
|
|||
return output, retcode
|
||||
|
||||
|
||||
@ZKConnection(config)
|
||||
def vm_attach_device(zkhandler, vm, device_spec_xml):
|
||||
"""
|
||||
Hot-attach a device (via XML spec) to a VM.
|
||||
"""
|
||||
try:
|
||||
_ = etree.fromstring(device_spec_xml)
|
||||
except Exception as e:
|
||||
return {'message': 'XML is malformed or incorrect: {}'.format(e)}, 400
|
||||
|
||||
retflag, retdata = pvc_vm.attach_vm_device(zkhandler, vm, device_spec_xml)
|
||||
|
||||
if retflag:
|
||||
retcode = 200
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
else:
|
||||
retcode = 400
|
||||
output = {
|
||||
'message': 'WARNING: Failed to perform hot attach; device will be added on next VM start/restart.'
|
||||
}
|
||||
|
||||
return output, retcode
|
||||
|
||||
|
||||
@ZKConnection(config)
|
||||
def vm_detach_device(zkhandler, vm, device_spec_xml):
|
||||
"""
|
||||
Hot-detach a device (via XML spec) from a VM.
|
||||
"""
|
||||
try:
|
||||
_ = etree.fromstring(device_spec_xml)
|
||||
except Exception as e:
|
||||
return {'message': 'XML is malformed or incorrect: {}'.format(e)}, 400
|
||||
|
||||
retflag, retdata = pvc_vm.detach_vm_device(zkhandler, vm, device_spec_xml)
|
||||
|
||||
if retflag:
|
||||
retcode = 200
|
||||
output = {
|
||||
'message': retdata.replace('\"', '\'')
|
||||
}
|
||||
else:
|
||||
retcode = 400
|
||||
output = {
|
||||
'message': 'WARNING: Failed to perform hot detach; device will be removed on next VM start/restart.'
|
||||
}
|
||||
|
||||
return output, retcode
|
||||
|
||||
|
||||
@pvc_common.Profiler(config)
|
||||
@ZKConnection(config)
|
||||
def get_vm_meta(zkhandler, vm):
|
||||
|
|
|
@ -134,6 +134,48 @@ def vm_modify(config, vm, xml, restart):
|
|||
return retstatus, response.json().get('message', '')
|
||||
|
||||
|
||||
def vm_device_attach(config, vm, xml):
|
||||
"""
|
||||
Attach a device to a VM
|
||||
|
||||
API endpoint: POST /vm/{vm}/device
|
||||
API arguments: xml={xml}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
data = {
|
||||
'xml': xml
|
||||
}
|
||||
response = call_api(config, 'post', '/vm/{vm}/device'.format(vm=vm), data=data)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
|
||||
def vm_device_detach(config, vm, xml):
|
||||
"""
|
||||
Detach a device from a VM
|
||||
|
||||
API endpoint: DELETE /vm/{vm}/device
|
||||
API arguments: xml={xml}
|
||||
API schema: {"message":"{data}"}
|
||||
"""
|
||||
data = {
|
||||
'xml': xml
|
||||
}
|
||||
response = call_api(config, 'delete', '/vm/{vm}/device'.format(vm=vm), data=data)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get('message', '')
|
||||
|
||||
|
||||
def vm_rename(config, vm, new_name):
|
||||
"""
|
||||
Rename VM to new name
|
||||
|
@ -618,13 +660,15 @@ def format_vm_memory(config, name, memory):
|
|||
return '\n'.join(output_list)
|
||||
|
||||
|
||||
def vm_networks_add(config, vm, network, macaddr, model, sriov, sriov_mode, restart):
|
||||
def vm_networks_add(config, vm, network, macaddr, model, sriov, sriov_mode, live, restart):
|
||||
"""
|
||||
Add a new network to the VM
|
||||
|
||||
Calls vm_info to get the VM XML.
|
||||
|
||||
Calls vm_modify to set the VM XML.
|
||||
|
||||
Calls vm_device_attach if live to hot-attach the device.
|
||||
"""
|
||||
from lxml.objectify import fromstring
|
||||
from lxml.etree import tostring
|
||||
|
@ -747,16 +791,36 @@ def vm_networks_add(config, vm, network, macaddr, model, sriov, sriov_mode, rest
|
|||
except Exception:
|
||||
return False, 'ERROR: Failed to dump XML data.'
|
||||
|
||||
return vm_modify(config, vm, new_xml, restart)
|
||||
modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart)
|
||||
|
||||
if not modify_retcode:
|
||||
return modify_retcode, modify_retmsg
|
||||
|
||||
if live:
|
||||
attach_retcode, attach_retmsg = vm_device_attach(config, vm, device_string)
|
||||
|
||||
if not attach_retcode:
|
||||
retcode = attach_retcode
|
||||
retmsg = attach_retmsg
|
||||
else:
|
||||
retcode = attach_retcode
|
||||
retmsg = "Network '{}' successfully added to VM config and hot attached to running VM.".format(network)
|
||||
else:
|
||||
retcode = modify_retcode
|
||||
retmsg = modify_retmsg
|
||||
|
||||
return retcode, retmsg
|
||||
|
||||
|
||||
def vm_networks_remove(config, vm, network, sriov, restart):
|
||||
def vm_networks_remove(config, vm, network, sriov, live, restart):
|
||||
"""
|
||||
Remove a network to the VM
|
||||
|
||||
Calls vm_info to get the VM XML.
|
||||
|
||||
Calls vm_modify to set the VM XML.
|
||||
|
||||
Calls vm_device_detach to hot-remove the device.
|
||||
"""
|
||||
from lxml.objectify import fromstring
|
||||
from lxml.etree import tostring
|
||||
|
@ -775,6 +839,7 @@ def vm_networks_remove(config, vm, network, sriov, restart):
|
|||
return False, 'ERROR: Failed to parse XML data.'
|
||||
|
||||
changed = False
|
||||
device_string = None
|
||||
for interface in parsed_xml.devices.find('interface'):
|
||||
if sriov:
|
||||
if interface.attrib.get('type') == 'hostdev':
|
||||
|
@ -792,16 +857,37 @@ def vm_networks_remove(config, vm, network, sriov, restart):
|
|||
if network == if_vni:
|
||||
interface.getparent().remove(interface)
|
||||
changed = True
|
||||
if changed:
|
||||
device_string = tostring(interface)
|
||||
|
||||
if changed:
|
||||
try:
|
||||
new_xml = tostring(parsed_xml, pretty_print=True)
|
||||
except Exception:
|
||||
return False, 'ERROR: Failed to dump XML data.'
|
||||
|
||||
return vm_modify(config, vm, new_xml, restart)
|
||||
else:
|
||||
return False, 'ERROR: Network "{}" does not exist on VM.'.format(network)
|
||||
|
||||
modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart)
|
||||
|
||||
if not modify_retcode:
|
||||
return modify_retcode, modify_retmsg
|
||||
|
||||
if live and device_string:
|
||||
detach_retcode, detach_retmsg = vm_device_detach(config, vm, device_string)
|
||||
|
||||
if not detach_retcode:
|
||||
retcode = detach_retcode
|
||||
retmsg = detach_retmsg
|
||||
else:
|
||||
retcode = detach_retcode
|
||||
retmsg = "Network '{}' successfully removed from VM config and hot detached from running VM.".format(network)
|
||||
else:
|
||||
retcode = modify_retcode
|
||||
retmsg = modify_retmsg
|
||||
|
||||
return retcode, retmsg
|
||||
|
||||
|
||||
def vm_networks_get(config, vm):
|
||||
"""
|
||||
|
@ -913,7 +999,7 @@ def format_vm_networks(config, name, networks):
|
|||
return '\n'.join(output_list)
|
||||
|
||||
|
||||
def vm_volumes_add(config, vm, volume, disk_id, bus, disk_type, restart):
|
||||
def vm_volumes_add(config, vm, volume, disk_id, bus, disk_type, live, restart):
|
||||
"""
|
||||
Add a new volume to the VM
|
||||
|
||||
|
@ -1001,6 +1087,7 @@ def vm_volumes_add(config, vm, volume, disk_id, bus, disk_type, restart):
|
|||
new_disk_details.source.set('name', volume)
|
||||
elif disk_type == 'file':
|
||||
new_disk_details.source.set('file', volume)
|
||||
device_xml = new_disk_details
|
||||
|
||||
all_disks = parsed_xml.devices.find('disk')
|
||||
if all_disks is None:
|
||||
|
@ -1008,18 +1095,42 @@ def vm_volumes_add(config, vm, volume, disk_id, bus, disk_type, restart):
|
|||
for disk in all_disks:
|
||||
last_disk = disk
|
||||
|
||||
if last_disk is None:
|
||||
parsed_xml.devices.find('emulator').addprevious(new_disk_details)
|
||||
# Add the disk at the end of the list (or, right above emulator)
|
||||
if len(all_disks) > 0:
|
||||
for idx, disk in enumerate(parsed_xml.devices.find('disk')):
|
||||
if idx == len(all_disks) - 1:
|
||||
disk.addnext(device_xml)
|
||||
else:
|
||||
parsed_xml.devices.find('emulator').addprevious(device_xml)
|
||||
|
||||
try:
|
||||
new_xml = tostring(parsed_xml, pretty_print=True)
|
||||
except Exception:
|
||||
return False, 'ERROR: Failed to dump XML data.'
|
||||
|
||||
return vm_modify(config, vm, new_xml, restart)
|
||||
modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart)
|
||||
|
||||
if not modify_retcode:
|
||||
return modify_retcode, modify_retmsg
|
||||
|
||||
if live:
|
||||
device_string = tostring(device_xml)
|
||||
attach_retcode, attach_retmsg = vm_device_attach(config, vm, device_string)
|
||||
|
||||
if not attach_retcode:
|
||||
retcode = attach_retcode
|
||||
retmsg = attach_retmsg
|
||||
else:
|
||||
retcode = attach_retcode
|
||||
retmsg = "Volume '{}/{}' successfully added to VM config and hot attached to running VM.".format(vpool, vname)
|
||||
else:
|
||||
retcode = modify_retcode
|
||||
retmsg = modify_retmsg
|
||||
|
||||
return retcode, retmsg
|
||||
|
||||
|
||||
def vm_volumes_remove(config, vm, volume, restart):
|
||||
def vm_volumes_remove(config, vm, volume, live, restart):
|
||||
"""
|
||||
Remove a volume to the VM
|
||||
|
||||
|
@ -1043,19 +1154,44 @@ def vm_volumes_remove(config, vm, volume, restart):
|
|||
except Exception:
|
||||
return False, 'ERROR: Failed to parse XML data.'
|
||||
|
||||
changed = False
|
||||
device_string = None
|
||||
for disk in parsed_xml.devices.find('disk'):
|
||||
disk_name = disk.source.attrib.get('name')
|
||||
if not disk_name:
|
||||
disk_name = disk.source.attrib.get('file')
|
||||
if volume == disk_name:
|
||||
device_string = tostring(disk)
|
||||
disk.getparent().remove(disk)
|
||||
changed = True
|
||||
|
||||
try:
|
||||
new_xml = tostring(parsed_xml, pretty_print=True)
|
||||
except Exception:
|
||||
return False, 'ERROR: Failed to dump XML data.'
|
||||
if changed:
|
||||
try:
|
||||
new_xml = tostring(parsed_xml, pretty_print=True)
|
||||
except Exception:
|
||||
return False, 'ERROR: Failed to dump XML data.'
|
||||
else:
|
||||
return False, 'ERROR: Volume "{}" does not exist on VM.'.format(volume)
|
||||
|
||||
return vm_modify(config, vm, new_xml, restart)
|
||||
modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart)
|
||||
|
||||
if not modify_retcode:
|
||||
return modify_retcode, modify_retmsg
|
||||
|
||||
if live and device_string:
|
||||
detach_retcode, detach_retmsg = vm_device_detach(config, vm, device_string)
|
||||
|
||||
if not detach_retcode:
|
||||
retcode = detach_retcode
|
||||
retmsg = detach_retmsg
|
||||
else:
|
||||
retcode = detach_retcode
|
||||
retmsg = "Volume '{}' successfully removed from VM config and hot detached from running VM.".format(volume)
|
||||
else:
|
||||
retcode = modify_retcode
|
||||
retmsg = modify_retmsg
|
||||
|
||||
return retcode, retmsg
|
||||
|
||||
|
||||
def vm_volumes_get(config, vm):
|
||||
|
|
|
@ -1472,7 +1472,7 @@ def vm_network_get(domain, raw):
|
|||
help='The model for the interface; must be a valid libvirt model. Not used for "netdev" SR-IOV NETs.'
|
||||
)
|
||||
@click.option(
|
||||
'-s', '--sriov', 'sriov', is_flag=True, default=False,
|
||||
'-s', '--sriov', 'sriov_flag', is_flag=True, default=False,
|
||||
help='Identify that NET is an SR-IOV device name and not a VNI. Required for adding SR-IOV NETs.'
|
||||
)
|
||||
@click.option(
|
||||
|
@ -1481,16 +1481,20 @@ def vm_network_get(domain, raw):
|
|||
help='For SR-IOV NETs, the SR-IOV network device mode.'
|
||||
)
|
||||
@click.option(
|
||||
'-r', '--restart', 'restart', is_flag=True, default=False,
|
||||
help='Immediately restart VM to apply new config.'
|
||||
'-l/-L', '--live/--no-live', 'live_flag', is_flag=True, default=True,
|
||||
help='Immediately live-attach device to VM [default] or disable this behaviour.'
|
||||
)
|
||||
@click.option(
|
||||
'-r', '--restart', 'restart_flag', is_flag=True, default=False,
|
||||
help='Immediately restart VM to apply new config; requires "--no-live".'
|
||||
)
|
||||
@click.option(
|
||||
'-y', '--yes', 'confirm_flag',
|
||||
is_flag=True, default=False,
|
||||
help='Confirm the restart'
|
||||
help='Confirm the VM restart.'
|
||||
)
|
||||
@cluster_req
|
||||
def vm_network_add(domain, net, macaddr, model, sriov, sriov_mode, restart, confirm_flag):
|
||||
def vm_network_add(domain, net, macaddr, model, sriov_flag, sriov_mode, live_flag, restart_flag, confirm_flag):
|
||||
"""
|
||||
Add the network NET to the virtual machine DOMAIN. Networks are always addded to the end of the current list of networks in the virtual machine.
|
||||
|
||||
|
@ -1503,15 +1507,17 @@ def vm_network_add(domain, net, macaddr, model, sriov, sriov_mode, restart, conf
|
|||
2. If an identical SR-IOV VF device is not present on the target node, post-migration startup will fail. It may be prudent to use a node limit here.
|
||||
|
||||
"""
|
||||
if restart and not confirm_flag and not config['unsafe']:
|
||||
if restart_flag and live_flag:
|
||||
click.echo('WARNING: Live flag and restart flag both specified; this can cause unintended behaviour. To disable live changes, use "--no-live".')
|
||||
exit(1)
|
||||
|
||||
if restart_flag and not confirm_flag and not config['unsafe']:
|
||||
try:
|
||||
click.confirm('Restart VM {}'.format(domain), prompt_suffix='? ', abort=True)
|
||||
except Exception:
|
||||
restart = False
|
||||
restart_flag = False
|
||||
|
||||
retcode, retmsg = pvc_vm.vm_networks_add(config, domain, net, macaddr, model, sriov, sriov_mode, restart)
|
||||
if retcode and not restart:
|
||||
retmsg = retmsg + " Changes will be applied on next VM start/restart."
|
||||
retcode, retmsg = pvc_vm.vm_networks_add(config, domain, net, macaddr, model, sriov_flag, sriov_mode, live_flag, restart_flag)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
|
@ -1526,34 +1532,40 @@ def vm_network_add(domain, net, macaddr, model, sriov, sriov_mode, restart, conf
|
|||
'net'
|
||||
)
|
||||
@click.option(
|
||||
'-s', '--sriov', 'sriov', is_flag=True, default=False,
|
||||
'-s', '--sriov', 'sriov_flag', is_flag=True, default=False,
|
||||
help='Identify that NET is an SR-IOV device name and not a VNI. Required for removing SR-IOV NETs.'
|
||||
)
|
||||
@click.option(
|
||||
'-r', '--restart', 'restart', is_flag=True, default=False,
|
||||
help='Immediately restart VM to apply new config.'
|
||||
'-l/-L', '--live/--no-live', 'live_flag', is_flag=True, default=True,
|
||||
help='Immediately live-attach device to VM [default] or disable this behaviour.'
|
||||
)
|
||||
@click.option(
|
||||
'-r', '--restart', 'restart_flag', is_flag=True, default=False,
|
||||
help='Immediately restart VM to apply new config; requires "--no-live".'
|
||||
)
|
||||
@click.option(
|
||||
'-y', '--yes', 'confirm_flag',
|
||||
is_flag=True, default=False,
|
||||
help='Confirm the restart'
|
||||
help='Confirm the restart.'
|
||||
)
|
||||
@cluster_req
|
||||
def vm_network_remove(domain, net, sriov, restart, confirm_flag):
|
||||
def vm_network_remove(domain, net, sriov_flag, live_flag, restart_flag, confirm_flag):
|
||||
"""
|
||||
Remove the network NET from the virtual machine DOMAIN.
|
||||
|
||||
NET may be a PVC network VNI, which is added as a bridged device, or a SR-IOV VF device connected in the given mode.
|
||||
"""
|
||||
if restart and not confirm_flag and not config['unsafe']:
|
||||
if restart_flag and live_flag:
|
||||
click.echo('WARNING: Live flag and restart flag both specified; this can cause unintended behaviour. To disable live changes, use "--no-live".')
|
||||
exit(1)
|
||||
|
||||
if restart_flag and not confirm_flag and not config['unsafe']:
|
||||
try:
|
||||
click.confirm('Restart VM {}'.format(domain), prompt_suffix='? ', abort=True)
|
||||
except Exception:
|
||||
restart = False
|
||||
restart_flag = False
|
||||
|
||||
retcode, retmsg = pvc_vm.vm_networks_remove(config, domain, net, sriov, restart)
|
||||
if retcode and not restart:
|
||||
retmsg = retmsg + " Changes will be applied on next VM start/restart."
|
||||
retcode, retmsg = pvc_vm.vm_networks_remove(config, domain, net, sriov_flag, live_flag, restart_flag)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
|
@ -1623,8 +1635,12 @@ def vm_volume_get(domain, raw):
|
|||
help='The type of volume to add.'
|
||||
)
|
||||
@click.option(
|
||||
'-r', '--restart', 'restart', is_flag=True, default=False,
|
||||
help='Immediately restart VM to apply new config.'
|
||||
'-l/-L', '--live/--no-live', 'live_flag', is_flag=True, default=True,
|
||||
help='Immediately live-attach device to VM [default] or disable this behaviour.'
|
||||
)
|
||||
@click.option(
|
||||
'-r', '--restart', 'restart_flag', is_flag=True, default=False,
|
||||
help='Immediately restart VM to apply new config; requires "--no-live".'
|
||||
)
|
||||
@click.option(
|
||||
'-y', '--yes', 'confirm_flag',
|
||||
|
@ -1632,21 +1648,23 @@ def vm_volume_get(domain, raw):
|
|||
help='Confirm the restart'
|
||||
)
|
||||
@cluster_req
|
||||
def vm_volume_add(domain, volume, disk_id, bus, disk_type, restart, confirm_flag):
|
||||
def vm_volume_add(domain, volume, disk_id, bus, disk_type, live_flag, restart_flag, confirm_flag):
|
||||
"""
|
||||
Add the volume VOLUME to the virtual machine DOMAIN.
|
||||
|
||||
VOLUME may be either an absolute file path (for type 'file') or an RBD volume in the form "pool/volume" (for type 'rbd'). RBD volumes are verified against the cluster before adding and must exist.
|
||||
"""
|
||||
if restart and not confirm_flag and not config['unsafe']:
|
||||
if restart_flag and live_flag:
|
||||
click.echo('WARNING: Live flag and restart flag both specified; this can cause unintended behaviour. To disable live changes, use "--no-live".')
|
||||
exit(1)
|
||||
|
||||
if restart_flag and not confirm_flag and not config['unsafe']:
|
||||
try:
|
||||
click.confirm('Restart VM {}'.format(domain), prompt_suffix='? ', abort=True)
|
||||
except Exception:
|
||||
restart = False
|
||||
restart_flag = False
|
||||
|
||||
retcode, retmsg = pvc_vm.vm_volumes_add(config, domain, volume, disk_id, bus, disk_type, restart)
|
||||
if retcode and not restart:
|
||||
retmsg = retmsg + " Changes will be applied on next VM start/restart."
|
||||
retcode, retmsg = pvc_vm.vm_volumes_add(config, domain, volume, disk_id, bus, disk_type, live_flag, restart_flag)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
|
@ -1661,8 +1679,12 @@ def vm_volume_add(domain, volume, disk_id, bus, disk_type, restart, confirm_flag
|
|||
'volume'
|
||||
)
|
||||
@click.option(
|
||||
'-r', '--restart', 'restart', is_flag=True, default=False,
|
||||
help='Immediately restart VM to apply new config.'
|
||||
'-l/-L', '--live/--no-live', 'live_flag', is_flag=True, default=True,
|
||||
help='Immediately live-attach device to VM [default] or disable this behaviour.'
|
||||
)
|
||||
@click.option(
|
||||
'-r', '--restart', 'restart_flag', is_flag=True, default=False,
|
||||
help='Immediately restart VM to apply new config; requires "--no-live".'
|
||||
)
|
||||
@click.option(
|
||||
'-y', '--yes', 'confirm_flag',
|
||||
|
@ -1670,19 +1692,21 @@ def vm_volume_add(domain, volume, disk_id, bus, disk_type, restart, confirm_flag
|
|||
help='Confirm the restart'
|
||||
)
|
||||
@cluster_req
|
||||
def vm_volume_remove(domain, volume, restart, confirm_flag):
|
||||
def vm_volume_remove(domain, volume, live_flag, restart_flag, confirm_flag):
|
||||
"""
|
||||
Remove VOLUME from the virtual machine DOMAIN; VOLUME must be a file path or RBD path in 'pool/volume' format.
|
||||
"""
|
||||
if restart and not confirm_flag and not config['unsafe']:
|
||||
if restart_flag and live_flag:
|
||||
click.echo('WARNING: Live flag and restart flag both specified; this can cause unintended behaviour. To disable live changes, use "--no-live".')
|
||||
exit(1)
|
||||
|
||||
if restart_flag and not confirm_flag and not config['unsafe']:
|
||||
try:
|
||||
click.confirm('Restart VM {}'.format(domain), prompt_suffix='? ', abort=True)
|
||||
except Exception:
|
||||
restart = False
|
||||
restart_flag = False
|
||||
|
||||
retcode, retmsg = pvc_vm.vm_volumes_remove(config, domain, volume, restart)
|
||||
if retcode and not restart:
|
||||
retmsg = retmsg + " Changes will be applied on next VM start/restart."
|
||||
retcode, retmsg = pvc_vm.vm_volumes_remove(config, domain, volume, live_flag, restart_flag)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
|
|
|
@ -105,14 +105,14 @@ def getDomainName(zkhandler, domain):
|
|||
# Helper functions
|
||||
#
|
||||
def change_state(zkhandler, dom_uuid, new_state):
|
||||
lock = zkhandler.exclusivelock(('domain.state', dom_uuid))
|
||||
with lock:
|
||||
zkhandler.write([
|
||||
(('domain.state', dom_uuid), new_state)
|
||||
])
|
||||
lock = zkhandler.exclusivelock(('domain.state', dom_uuid))
|
||||
with lock:
|
||||
zkhandler.write([
|
||||
(('domain.state', dom_uuid), new_state)
|
||||
])
|
||||
|
||||
# Wait for 1/2 second to allow state to flow to all nodes
|
||||
time.sleep(0.5)
|
||||
# Wait for 1/2 second to allow state to flow to all nodes
|
||||
time.sleep(0.5)
|
||||
|
||||
|
||||
#
|
||||
|
@ -262,6 +262,94 @@ def define_vm(zkhandler, config_data, target_node, node_limit, node_selector, no
|
|||
return True, 'Added new VM with Name "{}" and UUID "{}" to database.'.format(dom_name, dom_uuid)
|
||||
|
||||
|
||||
def attach_vm_device(zkhandler, domain, device_spec_xml):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zkhandler, domain)
|
||||
if not dom_uuid:
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
||||
# Verify that the VM is in a stopped state; freeing locks is not safe otherwise
|
||||
state = zkhandler.read(('domain.state', dom_uuid))
|
||||
if state != 'start':
|
||||
return False, 'ERROR: VM "{}" is not in started state; live-add unneccessary.'.format(domain)
|
||||
|
||||
# Tell the cluster to attach the device
|
||||
attach_device_string = 'attach_device {} {}'.format(dom_uuid, device_spec_xml)
|
||||
zkhandler.write([
|
||||
('base.cmd.domain', attach_device_string)
|
||||
])
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock('base.cmd.domain')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.read('base.cmd.domain').split()[0]
|
||||
if result == 'success-attach_device':
|
||||
message = 'Attached device on VM "{}"'.format(domain)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to attach device on VM "{}"; check node logs for details.'.format(domain)
|
||||
success = False
|
||||
except Exception:
|
||||
message = 'ERROR: Command ignored by node.'
|
||||
success = False
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock('base.cmd.domain')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.write([
|
||||
('base.cmd.domain', '')
|
||||
])
|
||||
|
||||
return success, message
|
||||
|
||||
|
||||
def detach_vm_device(zkhandler, domain, device_spec_xml):
|
||||
# Validate that VM exists in cluster
|
||||
dom_uuid = getDomainUUID(zkhandler, domain)
|
||||
if not dom_uuid:
|
||||
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
|
||||
|
||||
# Verify that the VM is in a stopped state; freeing locks is not safe otherwise
|
||||
state = zkhandler.read(('domain.state', dom_uuid))
|
||||
if state != 'start':
|
||||
return False, 'ERROR: VM "{}" is not in started state; live-add unneccessary.'.format(domain)
|
||||
|
||||
# Tell the cluster to detach the device
|
||||
detach_device_string = 'detach_device {} {}'.format(dom_uuid, device_spec_xml)
|
||||
zkhandler.write([
|
||||
('base.cmd.domain', detach_device_string)
|
||||
])
|
||||
# Wait 1/2 second for the cluster to get the message and start working
|
||||
time.sleep(0.5)
|
||||
# Acquire a read lock, so we get the return exclusively
|
||||
lock = zkhandler.readlock('base.cmd.domain')
|
||||
with lock:
|
||||
try:
|
||||
result = zkhandler.read('base.cmd.domain').split()[0]
|
||||
if result == 'success-detach_device':
|
||||
message = 'Attached device on VM "{}"'.format(domain)
|
||||
success = True
|
||||
else:
|
||||
message = 'ERROR: Failed to detach device on VM "{}"; check node logs for details.'.format(domain)
|
||||
success = False
|
||||
except Exception:
|
||||
message = 'ERROR: Command ignored by node.'
|
||||
success = False
|
||||
|
||||
# Acquire a write lock to ensure things go smoothly
|
||||
lock = zkhandler.writelock('base.cmd.domain')
|
||||
with lock:
|
||||
time.sleep(0.5)
|
||||
zkhandler.write([
|
||||
('base.cmd.domain', '')
|
||||
])
|
||||
|
||||
return success, message
|
||||
|
||||
|
||||
def modify_vm_metadata(zkhandler, domain, node_limit, node_selector, node_autostart, provisioner_profile, migration_method):
|
||||
dom_uuid = getDomainUUID(zkhandler, domain)
|
||||
if not dom_uuid:
|
||||
|
|
|
@ -28,6 +28,8 @@ from threading import Thread
|
|||
|
||||
from xml.etree import ElementTree
|
||||
|
||||
from re import match
|
||||
|
||||
import daemon_lib.common as common
|
||||
|
||||
import pvcnoded.objects.VMConsoleWatcherInstance as VMConsoleWatcherInstance
|
||||
|
@ -163,6 +165,34 @@ class VMInstance(object):
|
|||
(('domain.console.vnc', self.domuuid), '')
|
||||
])
|
||||
|
||||
# Attach a device to the running domain
|
||||
def attach_device(self, xml_spec):
|
||||
if not self.dom:
|
||||
self.logger.out('Cannot attach device to non-running domain', state='w', prefix='Domain {}'.format(self.domuuid))
|
||||
return False
|
||||
|
||||
try:
|
||||
self.logger.out('Attaching new device to VM', state='i', prefix='Domain {}'.format(self.domuuid))
|
||||
self.dom.attachDevice(xml_spec)
|
||||
return True
|
||||
except Exception as e:
|
||||
self.logger.out('Failed to attach device: {}'.format(e), state='e', prefix='Domain {}'.format(self.domuuid))
|
||||
return False
|
||||
|
||||
# Detach a device from the running domain
|
||||
def detach_device(self, xml_spec):
|
||||
if not self.dom:
|
||||
self.logger.out('Cannot detach device from non-running domain', state='w', prefix='Domain {}'.format(self.domuuid))
|
||||
return False
|
||||
|
||||
try:
|
||||
self.logger.out('Detaching device from VM', state='i', prefix='Domain {}'.format(self.domuuid))
|
||||
self.dom.detachDevice(xml_spec)
|
||||
return True
|
||||
except Exception as e:
|
||||
self.logger.out('Failed to detach device: {}'.format(e), state='e', prefix='Domain {}'.format(self.domuuid))
|
||||
return False
|
||||
|
||||
# Start up the VM
|
||||
def start_vm(self):
|
||||
# Start the log watcher
|
||||
|
@ -851,30 +881,51 @@ class VMInstance(object):
|
|||
# Primary command function
|
||||
def vm_command(zkhandler, logger, this_node, data):
|
||||
# Get the command and args
|
||||
command, args = data.split()
|
||||
command, dom_uuid, *args = data.split()
|
||||
|
||||
# Flushing VM RBD locks
|
||||
if command == 'flush_locks':
|
||||
dom_uuid = args
|
||||
if match('success-.*', command) or match('failure-.*', command):
|
||||
return
|
||||
|
||||
# Verify that the VM is set to run on this node
|
||||
if this_node.d_domain[dom_uuid].getnode() == this_node.name:
|
||||
# Lock the command queue
|
||||
zk_lock = zkhandler.writelock('base.cmd.domain')
|
||||
with zk_lock:
|
||||
# Flush the lock
|
||||
result = VMInstance.flush_locks(zkhandler, logger, dom_uuid, this_node)
|
||||
# Command succeeded
|
||||
if result:
|
||||
# Update the command queue
|
||||
zkhandler.write([
|
||||
('base.cmd.domain', 'success-{}'.format(data))
|
||||
])
|
||||
# Command failed
|
||||
else:
|
||||
# Update the command queue
|
||||
zkhandler.write([
|
||||
('base.cmd.domain', 'failure-{}'.format(data))
|
||||
])
|
||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
||||
time.sleep(1)
|
||||
logger.out('Getting command "{}" for domain "{}"'.format(command, dom_uuid), state='i')
|
||||
|
||||
# Verify that the VM is set to run on this node
|
||||
domain = this_node.d_domain.get(dom_uuid, None)
|
||||
if domain is None:
|
||||
return False
|
||||
|
||||
if domain.getnode() != this_node.name:
|
||||
return
|
||||
|
||||
# Lock the command queue
|
||||
zk_lock = zkhandler.writelock('base.cmd.domain')
|
||||
with zk_lock:
|
||||
# Flushing VM RBD locks
|
||||
if command == 'flush_locks':
|
||||
result = VMInstance.flush_locks(zkhandler, logger, dom_uuid, this_node)
|
||||
# Attaching a device
|
||||
elif command == 'attach_device':
|
||||
xml_spec = ' '.join(args)
|
||||
result = domain.attach_device(xml_spec)
|
||||
# Detaching a device
|
||||
elif command == 'detach_device':
|
||||
xml_spec = ' '.join(args)
|
||||
result = domain.detach_device(xml_spec)
|
||||
# Command not defined
|
||||
else:
|
||||
result = False
|
||||
|
||||
# Command succeeded
|
||||
if result:
|
||||
# Update the command queue
|
||||
zkhandler.write([
|
||||
('base.cmd.domain', 'success-{}'.format(data))
|
||||
])
|
||||
# Command failed
|
||||
else:
|
||||
# Update the command queue
|
||||
zkhandler.write([
|
||||
('base.cmd.domain', 'failure-{}'.format(data))
|
||||
])
|
||||
|
||||
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
||||
time.sleep(1)
|
||||
|
|
Loading…
Reference in New Issue