Use build-in selector and respect limits in client

Use the new built-in selector option, as well as respecting node limits,
when performing migrate or move actions on a VM via the clients.
This commit is contained in:
Joshua Boniface 2019-10-12 01:45:44 -04:00
parent 8dc0c8f0ac
commit e5393082b6
5 changed files with 83 additions and 74 deletions

View File

@ -425,12 +425,12 @@ def vm_stop(name):
}
return flask.jsonify(output), retcode
def vm_move(name, node, selector):
def vm_move(name, node):
"""
Move a VM to another node.
"""
zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_vm.move_vm(zk_conn, name, node, selector)
retflag, retdata = pvc_vm.move_vm(zk_conn, name, node)
if retflag:
retcode = 200
else:
@ -442,12 +442,12 @@ def vm_move(name, node, selector):
}
return flask.jsonify(output), retcode
def vm_migrate(name, node, selector, flag_force):
def vm_migrate(name, node, flag_force):
"""
Temporarily migrate a VM to another node.
"""
zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_vm.migrate_vm(zk_conn, name, node, selector, flag_force)
retflag, retdata = pvc_vm.migrate_vm(zk_conn, name, node, flag_force)
if retflag:
retcode = 200
else:

View File

@ -338,11 +338,6 @@ def api_vm_node(vm):
node = flask.request.values['node']
else:
node = None
# Get target selector
if 'selector' in flask.request.values:
selector = flask.request.values['selector']
else:
selector = None
# Get permanent flag
if 'permanent' in flask.request.values and flask.request.values['permanent']:
flag_permanent = True
@ -358,9 +353,9 @@ def api_vm_node(vm):
is_migrated = pvcapi.vm_is_migrated(vm)
if action == 'migrate' and not flag_permanent:
return pvcapi.vm_migrate(vm, node, selector, flag_force)
return pvcapi.vm_migrate(vm, node, flag_force)
if action == 'migrate' and flag_permanent:
return pvcapi.vm_move(vm, node, selector)
return pvcapi.vm_move(vm, node)
if action == 'unmigrate' and is_migrated:
return pvcapi.vm_unmigrate(vm)

View File

@ -520,19 +520,14 @@ def vm_stop(domain):
'-t', '--target', 'target_node', default=None,
help='Target node to migrate to; autodetect if unspecified.'
)
@click.option(
'-s', '--selector', 'selector', default='mem', show_default=True,
type=click.Choice(['mem','load','vcpus','vms']),
help='Method to determine optimal target node during autodetect.'
)
def vm_move(domain, target_node, selector):
def vm_move(domain, target_node):
"""
Permanently move virtual machine DOMAIN, via live migration if running and possible, to another node. DOMAIN may be a UUID or name.
"""
# Open a Zookeeper connection
zk_conn = pvc_common.startZKConnection(zk_host)
retcode, retmsg = pvc_vm.move_vm(zk_conn, domain, target_node, selector)
retcode, retmsg = pvc_vm.move_vm(zk_conn, domain, target_node)
cleanup(retcode, retmsg, zk_conn)
###############################################################################
@ -546,23 +541,18 @@ def vm_move(domain, target_node, selector):
'-t', '--target', 'target_node', default=None,
help='Target node to migrate to; autodetect if unspecified.'
)
@click.option(
'-s', '--selector', 'selector', default='mem', show_default=True,
type=click.Choice(['mem','load','vcpus','vms']),
help='Method to determine optimal target node during autodetect.'
)
@click.option(
'-f', '--force', 'force_migrate', is_flag=True, default=False,
help='Force migrate an already migrated VM; does not replace an existing previous node value.'
)
def vm_migrate(domain, target_node, selector, force_migrate):
def vm_migrate(domain, target_node, force_migrate):
"""
Temporarily migrate running virtual machine DOMAIN, via live migration if possible, to another node. DOMAIN may be a UUID or name. If DOMAIN is not running, it will be started on the target node.
"""
# Open a Zookeeper connection
zk_conn = pvc_common.startZKConnection(zk_host)
retcode, retmsg = pvc_vm.migrate_vm(zk_conn, domain, target_node, selector, force_migrate, is_cli=True)
retcode, retmsg = pvc_vm.migrate_vm(zk_conn, domain, target_node, force_migrate, is_cli=True)
cleanup(retcode, retmsg, zk_conn)
###############################################################################

View File

@ -288,18 +288,46 @@ def getPrimaryNode(zk_conn):
return primary_node
#
# Get the list of valid target nodes
# Find a migration target
#
def getNodes(zk_conn, dom_uuid):
def findTargetHypervisor(zk_conn, config, dom_uuid):
# Determine VM node limits; set config value if read fails
try:
node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(node)).split(',')
except:
node_limit = None
zkhandler.writedata(zk_conn, { '/domains/{}/node_limit'.format(node): 'None' })
# Determine VM search field or use default; set config value if read fails
try:
search_field = zkhandler.readdata(zk_conn, '/domains/{}/node_selector'.format(node)).split(',')
except:
search_field = config.migration_target_selector
zkhandler.writedata(zk_conn, { '/domains/{}/node_selector'.format(node): config.migration_target_selector })
# Execute the search
if search_field == 'mem':
return findTargetHypervisorMem(zk_conn, node_limit, dom_uuid)
if search_field == 'load':
return findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid)
if search_field == 'vcpus':
return findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid)
if search_field == 'vms':
return findTargetHypervisorVMs(zk_conn, node_limit, dom_uuid)
# Nothing was found
return None
# Get the list of valid target nodes
def getHypervisors(zk_conn, node_limit, dom_uuid):
valid_node_list = []
full_node_list = zkhandler.listchildren(zk_conn, '/nodes')
try:
current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid))
except:
current_node = None
current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid))
for node in full_node_list:
if node_limit and node not in node_limit:
continue
daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node))
domain_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node))
@ -311,64 +339,50 @@ def getNodes(zk_conn, dom_uuid):
valid_node_list.append(node)
if not valid_node_list:
# We found no valid nodes; possibly they're all flushed or all down. Return the entire list instead.
valid_node_list = full_node_list
return valid_node_list
#
# Find a migration target
#
def findTargetNode(zk_conn, search_field, dom_uuid):
if search_field == 'mem':
return findTargetNodeMem(zk_conn, dom_uuid)
if search_field == 'load':
return findTargetNodeLoad(zk_conn, dom_uuid)
if search_field == 'vcpus':
return findTargetNodeVCPUs(zk_conn, dom_uuid)
if search_field == 'vms':
return findTargetNodeVMs(zk_conn, dom_uuid)
return None
# via allocated memory
def findTargetNodeMem(zk_conn, dom_uuid):
least_alloc = math.inf
# via free memory (relative to allocated memory)
def findTargetHypervisorMem(zk_conn, node_limit, dom_uuid):
most_allocfree = 0
target_node = None
node_list = getNodes(zk_conn, dom_uuid)
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
for node in node_list:
alloc = float(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node)))
memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node)))
memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node)))
memfree = int(zkhandler.readdata(zk_conn, '/nodes/{}/memfree'.format(node)))
memtotal = memused + memfree
allocfree = memtotal - memalloc
if alloc < least_alloc:
least_alloc = alloc
if allocfree > most_allocfree:
most_allocfree = allocfree
target_node = node
return target_node
# via load average
def findTargetNodeLoad(zk_conn, dom_uuid):
least_load = math.inf
def findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid):
least_load = 9999
target_node = None
node_list = getNodes(zk_conn, dom_uuid)
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
for node in node_list:
load = float(zkhandler.readdata(zk_conn, '/nodes/{}/cpuload'.format(node)))
load = int(zkhandler.readdata(zk_conn, '/nodes/{}/load'.format(node)))
if load < least_load:
least_load = load
target_node = node
target_hypevisor = node
return target_node
# via total vCPUs
def findTargetNodeVCPUs(zk_conn, dom_uuid):
least_vcpus = math.inf
def findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid):
least_vcpus = 9999
target_node = None
node_list = getNodes(zk_conn, dom_uuid)
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
for node in node_list:
vcpus = float(zkhandler.readdata(zk_conn, '/nodes/{}/vcpualloc'.format(node)))
vcpus = int(zkhandler.readdata(zk_conn, '/nodes/{}/vcpualloc'.format(node)))
if vcpus < least_vcpus:
least_vcpus = vcpus
@ -377,13 +391,13 @@ def findTargetNodeVCPUs(zk_conn, dom_uuid):
return target_node
# via total VMs
def findTargetNodeVMs(zk_conn, dom_uuid):
least_vms = math.inf
def findTargetHypervisorVMs(zk_conn, node_limit, dom_uuid):
least_vms = 9999
target_node = None
node_list = getNodes(zk_conn, dom_uuid)
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
for node in node_list:
vms = float(zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node)))
vms = int(zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node)))
if vms < least_vms:
least_vms = vms

View File

@ -398,7 +398,7 @@ def stop_vm(zk_conn, domain):
return True, 'Forcibly stopping VM "{}".'.format(domain)
def move_vm(zk_conn, domain, target_node, selector):
def move_vm(zk_conn, domain, target_node):
# Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain)
if not dom_uuid:
@ -408,13 +408,18 @@ def move_vm(zk_conn, domain, target_node, selector):
current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid))
if not target_node:
target_node = common.findTargetNode(zk_conn, selector, dom_uuid)
target_node = common.findTargetNode(zk_conn, dom_uuid)
else:
# Verify node is valid
valid_node = common.verifyNode(zk_conn, target_node)
if not valid_node:
return False, 'Specified node "{}" is invalid.'.format(target_node)
# Check if node is within the limit
limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(dom_uuid).split(',')
if target_node not in limit:
return False, 'Specified node "{}" is not in the allowed list of nodes for VM "{}".'.format(target_node, domain)
# Verify if node is current node
if target_node == current_node:
common.stopZKConnection(zk_conn)
@ -435,7 +440,7 @@ def move_vm(zk_conn, domain, target_node, selector):
return True, 'Permanently migrating VM "{}" to node "{}".'.format(domain, target_node)
def migrate_vm(zk_conn, domain, target_node, selector, force_migrate, is_cli=False):
def migrate_vm(zk_conn, domain, target_node, force_migrate, is_cli=False):
# Validate that VM exists in cluster
dom_uuid = getDomainUUID(zk_conn, domain)
if not dom_uuid:
@ -463,13 +468,18 @@ def migrate_vm(zk_conn, domain, target_node, selector, force_migrate, is_cli=Fal
return False, 'ERROR: VM "{}" has been previously migrated.'.format(domain)
if not target_node:
target_node = common.findTargetNode(zk_conn, selector, dom_uuid)
target_node = common.findTargetNode(zk_conn, dom_uuid)
else:
# Verify node is valid
valid_node = common.verifyNode(zk_conn, target_node)
if not valid_node:
return False, 'Specified node "{}" is invalid.'.format(target_node)
# Check if node is within the limit
limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(dom_uuid).split(',')
if target_node not in limit:
return False, 'Specified node "{}" is not in the allowed list of nodes for VM "{}".'.format(target_node, domain)
# Verify if node is current node
if target_node == current_node:
common.stopZKConnection(zk_conn)