Use memalloc for target selection
Uses the new memalloc ZK data to calculate the most free target hypervisor during migrations. Also functionizes the selection to avoid code duplication and facilitate adding alternate search functions in the future. Addresses #9
This commit is contained in:
parent
d3cb8d4a13
commit
d8962bf998
52
pvc.py
52
pvc.py
|
@ -354,6 +354,44 @@ def verifyNode(zk_conn, node):
|
||||||
click.echo('ERROR: No node named "{}" is present in the cluster.'.format(node))
|
click.echo('ERROR: No node named "{}" is present in the cluster.'.format(node))
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
|
def findTargetHypervisor(zk_conn, search_field, dom_uuid, this_node)
|
||||||
|
if search_field == 'mem':
|
||||||
|
return findTargetHypervisorMem(zk_conn, dom_uuid, this_node)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def findTargetHypervisorMem(zk_conn, dom_uuid, this_node):
|
||||||
|
# Find a target node
|
||||||
|
most_allocfree = 0
|
||||||
|
target_hypervisor = None
|
||||||
|
|
||||||
|
hypervisor_list = zkhandler.listchildren(zk_conn, '/nodes')
|
||||||
|
current_hypervisor = zkhandler.readdata(zk_conn, '/domains/{}/hypervisor'.format(dom_uuid))
|
||||||
|
|
||||||
|
if current_hypervisor != this_node:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for hypervisor in hypervisor_list:
|
||||||
|
daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(hypervisor))
|
||||||
|
domain_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(hypervisor))
|
||||||
|
|
||||||
|
if hypervisor == current_hypervisor:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if daemon_state != 'run' or domain_state != 'ready':
|
||||||
|
continue
|
||||||
|
|
||||||
|
memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(hypervisor)))
|
||||||
|
memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(hypervisor)))
|
||||||
|
memfree = int(zkhandler.readdata(zk_conn, '/nodes/{}/memfree'.format(hypervisor)))
|
||||||
|
memtotal = memused + memfree
|
||||||
|
allocfree = memtotal - memalloc
|
||||||
|
|
||||||
|
if allocfree > most_allocfree:
|
||||||
|
most_allocfree = allocfree
|
||||||
|
target_hypervisor = hypervisor
|
||||||
|
|
||||||
|
return target_hypervisor
|
||||||
|
|
||||||
|
|
||||||
########################
|
########################
|
||||||
########################
|
########################
|
||||||
|
@ -945,19 +983,7 @@ def move_vm(domain, target_hypervisor):
|
||||||
current_hypervisor = zk_conn.get('/domains/{}/hypervisor'.format(dom_uuid))[0].decode('ascii')
|
current_hypervisor = zk_conn.get('/domains/{}/hypervisor'.format(dom_uuid))[0].decode('ascii')
|
||||||
|
|
||||||
if target_hypervisor == None:
|
if target_hypervisor == None:
|
||||||
# Determine the best hypervisor to migrate the VM to based on active memory usage
|
target_hypervisor = findTargetHypervisor(zk_conn, 'mem', dom_uuid, current_hypervisor)
|
||||||
hypervisor_list = zk_conn.get_children('/nodes')
|
|
||||||
most_memfree = 0
|
|
||||||
for hypervisor in hypervisor_list:
|
|
||||||
daemon_state = zk_conn.get('/nodes/{}/daemonstate'.format(hypervisor))[0].decode('ascii')
|
|
||||||
domain_state = zk_conn.get('/nodes/{}/domainstate'.format(hypervisor))[0].decode('ascii')
|
|
||||||
if daemon_state != 'run' or domain_state != 'ready' or hypervisor == current_hypervisor:
|
|
||||||
continue
|
|
||||||
|
|
||||||
memfree = int(zk_conn.get('/nodes/{}/memfree'.format(hypervisor))[0].decode('ascii'))
|
|
||||||
if memfree > most_memfree:
|
|
||||||
most_memfree = memfree
|
|
||||||
target_hypervisor = hypervisor
|
|
||||||
else:
|
else:
|
||||||
if target_hypervisor == current_hypervisor:
|
if target_hypervisor == current_hypervisor:
|
||||||
click.echo('ERROR: The VM "{}" is already running on hypervisor "{}".'.format(dom_uuid, current_hypervisor))
|
click.echo('ERROR: The VM "{}" is already running on hypervisor "{}".'.format(dom_uuid, current_hypervisor))
|
||||||
|
|
|
@ -144,27 +144,7 @@ class NodeInstance():
|
||||||
for dom_uuid in fixed_domain_list:
|
for dom_uuid in fixed_domain_list:
|
||||||
ansiiprint.echo('Selecting target to migrate VM "{}"'.format(dom_uuid), '', 'i')
|
ansiiprint.echo('Selecting target to migrate VM "{}"'.format(dom_uuid), '', 'i')
|
||||||
|
|
||||||
most_memfree = 0
|
target_hypervisor = findTargetHypervisor(self.zk_conn, 'mem', dom_uuid, self.this_node)
|
||||||
target_hypervisor = None
|
|
||||||
hypervisor_list = zkhandler.listchildren(self.zk_conn, '/nodes')
|
|
||||||
current_hypervisor = zkhandler.readdata(self.zk_conn, '/domains/{}/hypervisor'.format(dom_uuid))
|
|
||||||
if current_hypervisor != self.this_node:
|
|
||||||
continue
|
|
||||||
|
|
||||||
for hypervisor in hypervisor_list:
|
|
||||||
daemon_state = zkhandler.readdata(self.zk_conn, '/nodes/{}/daemonstate'.format(hypervisor))
|
|
||||||
domain_state = zkhandler.readdata(self.zk_conn, '/nodes/{}/domainstate'.format(hypervisor))
|
|
||||||
if hypervisor == current_hypervisor:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if daemon_state != 'run' or domain_state != 'ready':
|
|
||||||
continue
|
|
||||||
|
|
||||||
memfree = int(zkhandler.readdata(self.zk_conn, '/nodes/{}/memfree'.format(hypervisor)))
|
|
||||||
if memfree > most_memfree:
|
|
||||||
most_memfree = memfree
|
|
||||||
target_hypervisor = hypervisor
|
|
||||||
|
|
||||||
if target_hypervisor == None:
|
if target_hypervisor == None:
|
||||||
ansiiprint.echo('Failed to find migration target for VM "{}"; shutting down'.format(dom_uuid), '', 'e')
|
ansiiprint.echo('Failed to find migration target for VM "{}"; shutting down'.format(dom_uuid), '', 'e')
|
||||||
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(dom_uuid): 'shutdown' })
|
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(dom_uuid): 'shutdown' })
|
||||||
|
@ -336,6 +316,44 @@ class NodeInstance():
|
||||||
ansiiprint.echo('{}Inactive nodes:{} {}'.format(ansiiprint.bold(), ansiiprint.end(), ' '.join(self.inactive_node_list)), '', 'c')
|
ansiiprint.echo('{}Inactive nodes:{} {}'.format(ansiiprint.bold(), ansiiprint.end(), ' '.join(self.inactive_node_list)), '', 'c')
|
||||||
ansiiprint.echo('{}Flushed nodes:{} {}'.format(ansiiprint.bold(), ansiiprint.end(), ' '.join(self.flushed_node_list)), '', 'c')
|
ansiiprint.echo('{}Flushed nodes:{} {}'.format(ansiiprint.bold(), ansiiprint.end(), ' '.join(self.flushed_node_list)), '', 'c')
|
||||||
|
|
||||||
|
# Find a target node
|
||||||
|
def findTargetHypervisor(zk_conn, search_field, dom_uuid, this_node)
|
||||||
|
if search_field == 'mem':
|
||||||
|
return findTargetHypervisorMem(zk_conn, dom_uuid, this_node)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def findTargetHypervisorMem(zk_conn, search_field, dom_uuid, this_node):
|
||||||
|
most_allocfree = 0
|
||||||
|
target_hypervisor = None
|
||||||
|
|
||||||
|
hypervisor_list = zkhandler.listchildren(zk_conn, '/nodes')
|
||||||
|
current_hypervisor = zkhandler.readdata(zk_conn, '/domains/{}/hypervisor'.format(dom_uuid))
|
||||||
|
|
||||||
|
if current_hypervisor != this_node:
|
||||||
|
continue
|
||||||
|
|
||||||
|
for hypervisor in hypervisor_list:
|
||||||
|
daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(hypervisor))
|
||||||
|
domain_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(hypervisor))
|
||||||
|
|
||||||
|
if hypervisor == current_hypervisor:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if daemon_state != 'run' or domain_state != 'ready':
|
||||||
|
continue
|
||||||
|
|
||||||
|
memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(hypervisor)))
|
||||||
|
memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(hypervisor)))
|
||||||
|
memfree = int(zkhandler.readdata(zk_conn, '/nodes/{}/memfree'.format(hypervisor)))
|
||||||
|
memtotal = memused + memfree
|
||||||
|
allocfree = memtotal - memalloc
|
||||||
|
|
||||||
|
if allocfree > most_allocfree:
|
||||||
|
most_allocfree = allocfree
|
||||||
|
target_hypervisor = hypervisor
|
||||||
|
|
||||||
|
return target_hypervisor
|
||||||
|
|
||||||
#
|
#
|
||||||
# Fence thread entry function
|
# Fence thread entry function
|
||||||
#
|
#
|
||||||
|
@ -380,20 +398,7 @@ def migrateFromFencedHost(zk_conn, node_name):
|
||||||
ansiiprint.echo('Moving VMs from dead hypervisor "{}" to new hosts'.format(node_name), '', 'i')
|
ansiiprint.echo('Moving VMs from dead hypervisor "{}" to new hosts'.format(node_name), '', 'i')
|
||||||
dead_node_running_domains = zkhandler.readdata(zk_conn, '/nodes/{}/runningdomains'.format(node_name)).split()
|
dead_node_running_domains = zkhandler.readdata(zk_conn, '/nodes/{}/runningdomains'.format(node_name)).split()
|
||||||
for dom_uuid in dead_node_running_domains:
|
for dom_uuid in dead_node_running_domains:
|
||||||
most_memfree = 0
|
target_hypervisor = findTargetHypervisor(zk_conn, 'mem', dom_uuid, node_name)
|
||||||
hypervisor_list = zkhandler.listchildren(zk_conn, '/nodes')
|
|
||||||
current_hypervisor = zkhandler.readdata(zk_conn, '/domains/{}/hypervisor'.format(dom_uuid))
|
|
||||||
for hypervisor in hypervisor_list:
|
|
||||||
print(hypervisor)
|
|
||||||
daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(hypervisor))
|
|
||||||
domain_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(hypervisor))
|
|
||||||
if daemon_state != 'run' or domain_state != 'ready':
|
|
||||||
continue
|
|
||||||
|
|
||||||
memfree = int(zkhandler.readdata(zk_conn, '/nodes/{}/memfree'.format(hypervisor)))
|
|
||||||
if memfree > most_memfree:
|
|
||||||
most_memfree = memfree
|
|
||||||
target_hypervisor = hypervisor
|
|
||||||
|
|
||||||
ansiiprint.echo('Moving VM "{}" to hypervisor "{}"'.format(dom_uuid, target_hypervisor), '', 'i')
|
ansiiprint.echo('Moving VM "{}" to hypervisor "{}"'.format(dom_uuid, target_hypervisor), '', 'i')
|
||||||
zkhandler.writedata(zk_conn, {
|
zkhandler.writedata(zk_conn, {
|
||||||
|
|
Loading…
Reference in New Issue