Use node instead of hypervisor consistently

This commit is contained in:
Joshua Boniface 2019-10-12 01:59:08 -04:00
parent 0763bd2d51
commit 18fc49fc6c
4 changed files with 30 additions and 30 deletions

View File

@ -290,7 +290,7 @@ def getPrimaryNode(zk_conn):
# #
# Find a migration target # Find a migration target
# #
def findTargetHypervisor(zk_conn, config, dom_uuid): def findTargetNode(zk_conn, config, dom_uuid):
# Determine VM node limits; set config value if read fails # Determine VM node limits; set config value if read fails
try: try:
node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(node)).split(',') node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(node)).split(',')
@ -307,19 +307,19 @@ def findTargetHypervisor(zk_conn, config, dom_uuid):
# Execute the search # Execute the search
if search_field == 'mem': if search_field == 'mem':
return findTargetHypervisorMem(zk_conn, node_limit, dom_uuid) return findTargetNodeMem(zk_conn, node_limit, dom_uuid)
if search_field == 'load': if search_field == 'load':
return findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid) return findTargetNodeLoad(zk_conn, node_limit, dom_uuid)
if search_field == 'vcpus': if search_field == 'vcpus':
return findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid) return findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid)
if search_field == 'vms': if search_field == 'vms':
return findTargetHypervisorVMs(zk_conn, node_limit, dom_uuid) return findTargetNodeVMs(zk_conn, node_limit, dom_uuid)
# Nothing was found # Nothing was found
return None return None
# Get the list of valid target nodes # Get the list of valid target nodes
def getHypervisors(zk_conn, node_limit, dom_uuid): def getNodes(zk_conn, node_limit, dom_uuid):
valid_node_list = [] valid_node_list = []
full_node_list = zkhandler.listchildren(zk_conn, '/nodes') full_node_list = zkhandler.listchildren(zk_conn, '/nodes')
current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid)) current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid))
@ -342,11 +342,11 @@ def getHypervisors(zk_conn, node_limit, dom_uuid):
return valid_node_list return valid_node_list
# via free memory (relative to allocated memory) # via free memory (relative to allocated memory)
def findTargetHypervisorMem(zk_conn, node_limit, dom_uuid): def findTargetNodeMem(zk_conn, node_limit, dom_uuid):
most_allocfree = 0 most_allocfree = 0
target_node = None target_node = None
node_list = getHypervisors(zk_conn, node_limit, dom_uuid) node_list = getNodes(zk_conn, node_limit, dom_uuid)
for node in node_list: for node in node_list:
memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node))) memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node)))
memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node))) memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node)))
@ -361,11 +361,11 @@ def findTargetHypervisorMem(zk_conn, node_limit, dom_uuid):
return target_node return target_node
# via load average # via load average
def findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid): def findTargetNodeLoad(zk_conn, node_limit, dom_uuid):
least_load = 9999 least_load = 9999
target_node = None target_node = None
node_list = getHypervisors(zk_conn, node_limit, dom_uuid) node_list = getNodes(zk_conn, node_limit, dom_uuid)
for node in node_list: for node in node_list:
load = int(zkhandler.readdata(zk_conn, '/nodes/{}/load'.format(node))) load = int(zkhandler.readdata(zk_conn, '/nodes/{}/load'.format(node)))
@ -376,11 +376,11 @@ def findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid):
return target_node return target_node
# via total vCPUs # via total vCPUs
def findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid): def findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid):
least_vcpus = 9999 least_vcpus = 9999
target_node = None target_node = None
node_list = getHypervisors(zk_conn, node_limit, dom_uuid) node_list = getNodes(zk_conn, node_limit, dom_uuid)
for node in node_list: for node in node_list:
vcpus = int(zkhandler.readdata(zk_conn, '/nodes/{}/vcpualloc'.format(node))) vcpus = int(zkhandler.readdata(zk_conn, '/nodes/{}/vcpualloc'.format(node)))
@ -391,11 +391,11 @@ def findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid):
return target_node return target_node
# via total VMs # via total VMs
def findTargetHypervisorVMs(zk_conn, node_limit, dom_uuid): def findTargetNodeVMs(zk_conn, node_limit, dom_uuid):
least_vms = 9999 least_vms = 9999
target_node = None target_node = None
node_list = getHypervisors(zk_conn, node_limit, dom_uuid) node_list = getNodes(zk_conn, node_limit, dom_uuid)
for node in node_list: for node in node_list:
vms = int(zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node))) vms = int(zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node)))

View File

@ -382,7 +382,7 @@ class NodeInstance(object):
self.logger.out('Selecting target to migrate VM "{}"'.format(dom_uuid), state='i') self.logger.out('Selecting target to migrate VM "{}"'.format(dom_uuid), state='i')
target_node = common.findTargetHypervisor(self.zk_conn, self.config, dom_uuid) target_node = common.findTargetNode(self.zk_conn, self.config, dom_uuid)
# Don't replace the previous node if the VM is already migrated # Don't replace the previous node if the VM is already migrated
if zkhandler.readdata(self.zk_conn, '/domains/{}/lastnode'.format(dom_uuid)): if zkhandler.readdata(self.zk_conn, '/domains/{}/lastnode'.format(dom_uuid)):

View File

@ -139,7 +139,7 @@ def removeIPAddress(ipaddr, cidrnetmask, dev):
# #
# Find a migration target # Find a migration target
# #
def findTargetHypervisor(zk_conn, config, dom_uuid): def findTargetNode(zk_conn, config, dom_uuid):
# Determine VM node limits; set config value if read fails # Determine VM node limits; set config value if read fails
try: try:
node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(node)).split(',') node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(node)).split(',')
@ -156,19 +156,19 @@ def findTargetHypervisor(zk_conn, config, dom_uuid):
# Execute the search # Execute the search
if search_field == 'mem': if search_field == 'mem':
return findTargetHypervisorMem(zk_conn, node_limit, dom_uuid) return findTargetNodeMem(zk_conn, node_limit, dom_uuid)
if search_field == 'load': if search_field == 'load':
return findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid) return findTargetNodeLoad(zk_conn, node_limit, dom_uuid)
if search_field == 'vcpus': if search_field == 'vcpus':
return findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid) return findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid)
if search_field == 'vms': if search_field == 'vms':
return findTargetHypervisorVMs(zk_conn, node_limit, dom_uuid) return findTargetNodeVMs(zk_conn, node_limit, dom_uuid)
# Nothing was found # Nothing was found
return None return None
# Get the list of valid target nodes # Get the list of valid target nodes
def getHypervisors(zk_conn, node_limit, dom_uuid): def getNodes(zk_conn, node_limit, dom_uuid):
valid_node_list = [] valid_node_list = []
full_node_list = zkhandler.listchildren(zk_conn, '/nodes') full_node_list = zkhandler.listchildren(zk_conn, '/nodes')
current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid)) current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid))
@ -191,11 +191,11 @@ def getHypervisors(zk_conn, node_limit, dom_uuid):
return valid_node_list return valid_node_list
# via free memory (relative to allocated memory) # via free memory (relative to allocated memory)
def findTargetHypervisorMem(zk_conn, node_limit, dom_uuid): def findTargetNodeMem(zk_conn, node_limit, dom_uuid):
most_allocfree = 0 most_allocfree = 0
target_node = None target_node = None
node_list = getHypervisors(zk_conn, node_limit, dom_uuid) node_list = getNodes(zk_conn, node_limit, dom_uuid)
for node in node_list: for node in node_list:
memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node))) memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node)))
memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node))) memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node)))
@ -210,11 +210,11 @@ def findTargetHypervisorMem(zk_conn, node_limit, dom_uuid):
return target_node return target_node
# via load average # via load average
def findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid): def findTargetNodeLoad(zk_conn, node_limit, dom_uuid):
least_load = 9999 least_load = 9999
target_node = None target_node = None
node_list = getHypervisors(zk_conn, node_limit, dom_uuid) node_list = getNodes(zk_conn, node_limit, dom_uuid)
for node in node_list: for node in node_list:
load = int(zkhandler.readdata(zk_conn, '/nodes/{}/load'.format(node))) load = int(zkhandler.readdata(zk_conn, '/nodes/{}/load'.format(node)))
@ -225,11 +225,11 @@ def findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid):
return target_node return target_node
# via total vCPUs # via total vCPUs
def findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid): def findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid):
least_vcpus = 9999 least_vcpus = 9999
target_node = None target_node = None
node_list = getHypervisors(zk_conn, node_limit, dom_uuid) node_list = getNodes(zk_conn, node_limit, dom_uuid)
for node in node_list: for node in node_list:
vcpus = int(zkhandler.readdata(zk_conn, '/nodes/{}/vcpualloc'.format(node))) vcpus = int(zkhandler.readdata(zk_conn, '/nodes/{}/vcpualloc'.format(node)))
@ -240,11 +240,11 @@ def findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid):
return target_node return target_node
# via total VMs # via total VMs
def findTargetHypervisorVMs(zk_conn, node_limit, dom_uuid): def findTargetNodeVMs(zk_conn, node_limit, dom_uuid):
least_vms = 9999 least_vms = 9999
target_node = None target_node = None
node_list = getHypervisors(zk_conn, node_limit, dom_uuid) node_list = getNodes(zk_conn, node_limit, dom_uuid)
for node in node_list: for node in node_list:
vms = int(zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node))) vms = int(zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node)))

View File

@ -80,7 +80,7 @@ def migrateFromFencedNode(zk_conn, node_name, config, logger):
for dom_uuid in dead_node_running_domains: for dom_uuid in dead_node_running_domains:
VMInstance.flush_locks(zk_conn, logger, dom_uuid) VMInstance.flush_locks(zk_conn, logger, dom_uuid)
target_node = common.findTargetHypervisor(zk_conn, config, dom_uuid) target_node = common.findTargetNode(zk_conn, config, dom_uuid)
if target_node is not None: if target_node is not None:
logger.out('Migrating VM "{}" to node "{}"'.format(dom_uuid, target_node), state='i') logger.out('Migrating VM "{}" to node "{}"'.format(dom_uuid, target_node), state='i')