Use node instead of hypervisor consistently
This commit is contained in:
parent
0763bd2d51
commit
18fc49fc6c
|
@ -290,7 +290,7 @@ def getPrimaryNode(zk_conn):
|
|||
#
|
||||
# Find a migration target
|
||||
#
|
||||
def findTargetHypervisor(zk_conn, config, dom_uuid):
|
||||
def findTargetNode(zk_conn, config, dom_uuid):
|
||||
# Determine VM node limits; set config value if read fails
|
||||
try:
|
||||
node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(node)).split(',')
|
||||
|
@ -307,19 +307,19 @@ def findTargetHypervisor(zk_conn, config, dom_uuid):
|
|||
|
||||
# Execute the search
|
||||
if search_field == 'mem':
|
||||
return findTargetHypervisorMem(zk_conn, node_limit, dom_uuid)
|
||||
return findTargetNodeMem(zk_conn, node_limit, dom_uuid)
|
||||
if search_field == 'load':
|
||||
return findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid)
|
||||
return findTargetNodeLoad(zk_conn, node_limit, dom_uuid)
|
||||
if search_field == 'vcpus':
|
||||
return findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid)
|
||||
return findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid)
|
||||
if search_field == 'vms':
|
||||
return findTargetHypervisorVMs(zk_conn, node_limit, dom_uuid)
|
||||
return findTargetNodeVMs(zk_conn, node_limit, dom_uuid)
|
||||
|
||||
# Nothing was found
|
||||
return None
|
||||
|
||||
# Get the list of valid target nodes
|
||||
def getHypervisors(zk_conn, node_limit, dom_uuid):
|
||||
def getNodes(zk_conn, node_limit, dom_uuid):
|
||||
valid_node_list = []
|
||||
full_node_list = zkhandler.listchildren(zk_conn, '/nodes')
|
||||
current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid))
|
||||
|
@ -342,11 +342,11 @@ def getHypervisors(zk_conn, node_limit, dom_uuid):
|
|||
return valid_node_list
|
||||
|
||||
# via free memory (relative to allocated memory)
|
||||
def findTargetHypervisorMem(zk_conn, node_limit, dom_uuid):
|
||||
def findTargetNodeMem(zk_conn, node_limit, dom_uuid):
|
||||
most_allocfree = 0
|
||||
target_node = None
|
||||
|
||||
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
|
||||
node_list = getNodes(zk_conn, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node)))
|
||||
memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node)))
|
||||
|
@ -361,11 +361,11 @@ def findTargetHypervisorMem(zk_conn, node_limit, dom_uuid):
|
|||
return target_node
|
||||
|
||||
# via load average
|
||||
def findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid):
|
||||
def findTargetNodeLoad(zk_conn, node_limit, dom_uuid):
|
||||
least_load = 9999
|
||||
target_node = None
|
||||
|
||||
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
|
||||
node_list = getNodes(zk_conn, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
load = int(zkhandler.readdata(zk_conn, '/nodes/{}/load'.format(node)))
|
||||
|
||||
|
@ -376,11 +376,11 @@ def findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid):
|
|||
return target_node
|
||||
|
||||
# via total vCPUs
|
||||
def findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid):
|
||||
def findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid):
|
||||
least_vcpus = 9999
|
||||
target_node = None
|
||||
|
||||
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
|
||||
node_list = getNodes(zk_conn, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
vcpus = int(zkhandler.readdata(zk_conn, '/nodes/{}/vcpualloc'.format(node)))
|
||||
|
||||
|
@ -391,11 +391,11 @@ def findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid):
|
|||
return target_node
|
||||
|
||||
# via total VMs
|
||||
def findTargetHypervisorVMs(zk_conn, node_limit, dom_uuid):
|
||||
def findTargetNodeVMs(zk_conn, node_limit, dom_uuid):
|
||||
least_vms = 9999
|
||||
target_node = None
|
||||
|
||||
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
|
||||
node_list = getNodes(zk_conn, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
vms = int(zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node)))
|
||||
|
||||
|
|
|
@ -382,7 +382,7 @@ class NodeInstance(object):
|
|||
|
||||
self.logger.out('Selecting target to migrate VM "{}"'.format(dom_uuid), state='i')
|
||||
|
||||
target_node = common.findTargetHypervisor(self.zk_conn, self.config, dom_uuid)
|
||||
target_node = common.findTargetNode(self.zk_conn, self.config, dom_uuid)
|
||||
|
||||
# Don't replace the previous node if the VM is already migrated
|
||||
if zkhandler.readdata(self.zk_conn, '/domains/{}/lastnode'.format(dom_uuid)):
|
||||
|
|
|
@ -139,7 +139,7 @@ def removeIPAddress(ipaddr, cidrnetmask, dev):
|
|||
#
|
||||
# Find a migration target
|
||||
#
|
||||
def findTargetHypervisor(zk_conn, config, dom_uuid):
|
||||
def findTargetNode(zk_conn, config, dom_uuid):
|
||||
# Determine VM node limits; set config value if read fails
|
||||
try:
|
||||
node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(node)).split(',')
|
||||
|
@ -156,19 +156,19 @@ def findTargetHypervisor(zk_conn, config, dom_uuid):
|
|||
|
||||
# Execute the search
|
||||
if search_field == 'mem':
|
||||
return findTargetHypervisorMem(zk_conn, node_limit, dom_uuid)
|
||||
return findTargetNodeMem(zk_conn, node_limit, dom_uuid)
|
||||
if search_field == 'load':
|
||||
return findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid)
|
||||
return findTargetNodeLoad(zk_conn, node_limit, dom_uuid)
|
||||
if search_field == 'vcpus':
|
||||
return findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid)
|
||||
return findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid)
|
||||
if search_field == 'vms':
|
||||
return findTargetHypervisorVMs(zk_conn, node_limit, dom_uuid)
|
||||
return findTargetNodeVMs(zk_conn, node_limit, dom_uuid)
|
||||
|
||||
# Nothing was found
|
||||
return None
|
||||
|
||||
# Get the list of valid target nodes
|
||||
def getHypervisors(zk_conn, node_limit, dom_uuid):
|
||||
def getNodes(zk_conn, node_limit, dom_uuid):
|
||||
valid_node_list = []
|
||||
full_node_list = zkhandler.listchildren(zk_conn, '/nodes')
|
||||
current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid))
|
||||
|
@ -191,11 +191,11 @@ def getHypervisors(zk_conn, node_limit, dom_uuid):
|
|||
return valid_node_list
|
||||
|
||||
# via free memory (relative to allocated memory)
|
||||
def findTargetHypervisorMem(zk_conn, node_limit, dom_uuid):
|
||||
def findTargetNodeMem(zk_conn, node_limit, dom_uuid):
|
||||
most_allocfree = 0
|
||||
target_node = None
|
||||
|
||||
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
|
||||
node_list = getNodes(zk_conn, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node)))
|
||||
memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node)))
|
||||
|
@ -210,11 +210,11 @@ def findTargetHypervisorMem(zk_conn, node_limit, dom_uuid):
|
|||
return target_node
|
||||
|
||||
# via load average
|
||||
def findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid):
|
||||
def findTargetNodeLoad(zk_conn, node_limit, dom_uuid):
|
||||
least_load = 9999
|
||||
target_node = None
|
||||
|
||||
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
|
||||
node_list = getNodes(zk_conn, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
load = int(zkhandler.readdata(zk_conn, '/nodes/{}/load'.format(node)))
|
||||
|
||||
|
@ -225,11 +225,11 @@ def findTargetHypervisorLoad(zk_conn, node_limit, dom_uuid):
|
|||
return target_node
|
||||
|
||||
# via total vCPUs
|
||||
def findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid):
|
||||
def findTargetNodeVCPUs(zk_conn, node_limit, dom_uuid):
|
||||
least_vcpus = 9999
|
||||
target_node = None
|
||||
|
||||
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
|
||||
node_list = getNodes(zk_conn, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
vcpus = int(zkhandler.readdata(zk_conn, '/nodes/{}/vcpualloc'.format(node)))
|
||||
|
||||
|
@ -240,11 +240,11 @@ def findTargetHypervisorVCPUs(zk_conn, node_limit, dom_uuid):
|
|||
return target_node
|
||||
|
||||
# via total VMs
|
||||
def findTargetHypervisorVMs(zk_conn, node_limit, dom_uuid):
|
||||
def findTargetNodeVMs(zk_conn, node_limit, dom_uuid):
|
||||
least_vms = 9999
|
||||
target_node = None
|
||||
|
||||
node_list = getHypervisors(zk_conn, node_limit, dom_uuid)
|
||||
node_list = getNodes(zk_conn, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
vms = int(zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node)))
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ def migrateFromFencedNode(zk_conn, node_name, config, logger):
|
|||
for dom_uuid in dead_node_running_domains:
|
||||
VMInstance.flush_locks(zk_conn, logger, dom_uuid)
|
||||
|
||||
target_node = common.findTargetHypervisor(zk_conn, config, dom_uuid)
|
||||
target_node = common.findTargetNode(zk_conn, config, dom_uuid)
|
||||
|
||||
if target_node is not None:
|
||||
logger.out('Migrating VM "{}" to node "{}"'.format(dom_uuid, target_node), state='i')
|
||||
|
|
Loading…
Reference in New Issue