diff --git a/api-daemon/pvcapid/flaskapi.py b/api-daemon/pvcapid/flaskapi.py index 562a9f55..212e15a9 100755 --- a/api-daemon/pvcapid/flaskapi.py +++ b/api-daemon/pvcapid/flaskapi.py @@ -531,15 +531,18 @@ class API_Node_Root(Resource): total: type: integer description: The total amount of node RAM in MB - allocated: - type: integer - description: The total amount of RAM allocated to domains in MB used: type: integer description: The total used RAM on the node in MB free: type: integer description: The total free RAM on the node in MB + allocated: + type: integer + description: The total amount of RAM allocated to running domains in MB + provisioned: + type: integer + description: The total amount of RAM provisioned to all domains (regardless of state) on this node in MB parameters: - in: query name: limit diff --git a/client-cli/cli_lib/node.py b/client-cli/cli_lib/node.py index 097b0ada..c2ac3c6d 100644 --- a/client-cli/cli_lib/node.py +++ b/client-cli/cli_lib/node.py @@ -141,32 +141,38 @@ def getOutputColours(node_information): else: mem_allocated_colour = '' - return daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour + if node_information['memory']['provisioned'] > node_information['memory']['total']: + mem_provisioned_colour = ansiprint.yellow() + else: + mem_provisioned_colour = '' + + return daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour def format_info(node_information, long_output): - daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour = getOutputColours(node_information) + daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour = getOutputColours(node_information) # Format a nice output; do this line-by-line then concat the elements at the end ainformation = [] # Basic information - ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['name'])) - ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), daemon_state_colour, node_information['daemon_state'], ansiprint.end())) - ainformation.append('{}Coordinator State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), coordinator_state_colour, node_information['coordinator_state'], ansiprint.end())) - ainformation.append('{}Domain State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), domain_state_colour, node_information['domain_state'], ansiprint.end())) - ainformation.append('{}Active VM Count:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['domains_count'])) + ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['name'])) + ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), daemon_state_colour, node_information['daemon_state'], ansiprint.end())) + ainformation.append('{}Coordinator State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), coordinator_state_colour, node_information['coordinator_state'], ansiprint.end())) + ainformation.append('{}Domain State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), domain_state_colour, node_information['domain_state'], ansiprint.end())) + ainformation.append('{}Active VM Count:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['domains_count'])) if long_output: ainformation.append('') - ainformation.append('{}Architecture:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['arch'])) - ainformation.append('{}Operating System:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['os'])) - ainformation.append('{}Kernel Version:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['kernel'])) + ainformation.append('{}Architecture:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['arch'])) + ainformation.append('{}Operating System:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['os'])) + ainformation.append('{}Kernel Version:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['kernel'])) ainformation.append('') - ainformation.append('{}Host CPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['total'])) - ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['allocated'])) - ainformation.append('{}Load:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['load'])) - ainformation.append('{}Total RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['total'])) - ainformation.append('{}Used RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['used'])) - ainformation.append('{}Free RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['free'])) - ainformation.append('{}Allocated RAM (MiB):{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), mem_allocated_colour, node_information['memory']['allocated'], ansiprint.end())) + ainformation.append('{}Host CPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['total'])) + ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['allocated'])) + ainformation.append('{}Load:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['load'])) + ainformation.append('{}Total RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['total'])) + ainformation.append('{}Used RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['used'])) + ainformation.append('{}Free RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['free'])) + ainformation.append('{}Allocated RAM (MiB):{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), mem_allocated_colour, node_information['memory']['allocated'], ansiprint.end())) + ainformation.append('{}Provisioned RAM (MiB):{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), mem_provisioned_colour, node_information['memory']['provisioned'], ansiprint.end())) # Join it all together ainformation.append('') @@ -196,7 +202,8 @@ def format_list(node_list, raw): mem_total_length = 6 mem_used_length = 5 mem_free_length = 5 - mem_alloc_length = 4 + mem_alloc_length = 6 + mem_prov_length = 5 for node_information in node_list: # node_name column _node_name_length = len(node_information['name']) + 1 @@ -243,12 +250,17 @@ def format_list(node_list, raw): if _mem_alloc_length > mem_alloc_length: mem_alloc_length = _mem_alloc_length + # mem_prov column + _mem_prov_length = len(str(node_information['memory']['provisioned'])) + 1 + if _mem_prov_length > mem_prov_length: + mem_prov_length = _mem_prov_length + # Format the string (header) node_list_output.append( '{bold}{node_name: <{node_name_length}} \ St: {daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \ Res: {node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \ -Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}}{end_bold}'.format( +Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}'.format( node_name_length=node_name_length, daemon_state_length=daemon_state_length, coordinator_state_length=coordinator_state_length, @@ -260,6 +272,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length mem_used_length=mem_used_length, mem_free_length=mem_free_length, mem_alloc_length=mem_alloc_length, + mem_prov_length=mem_prov_length, bold=ansiprint.bold(), end_bold=ansiprint.end(), daemon_state_colour='', @@ -276,18 +289,19 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length node_mem_total='Total', node_mem_used='Used', node_mem_free='Free', - node_mem_allocated='VMs' + node_mem_allocated='Alloc', + node_mem_provisioned='Prov' ) ) # Format the string (elements) for node_information in node_list: - daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour = getOutputColours(node_information) + daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour = getOutputColours(node_information) node_list_output.append( '{bold}{node_name: <{node_name_length}} \ {daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \ {node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \ - {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {mem_allocated_colour}{node_mem_allocated: <{mem_alloc_length}}{end_colour}{end_bold}'.format( + {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {mem_allocated_colour}{node_mem_allocated: <{mem_alloc_length}}{end_colour} {mem_provisioned_colour}{node_mem_provisioned: <{mem_prov_length}}{end_colour}{end_bold}'.format( node_name_length=node_name_length, daemon_state_length=daemon_state_length, coordinator_state_length=coordinator_state_length, @@ -299,12 +313,14 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length mem_used_length=mem_used_length, mem_free_length=mem_free_length, mem_alloc_length=mem_alloc_length, + mem_prov_length=mem_prov_length, bold='', end_bold='', daemon_state_colour=daemon_state_colour, coordinator_state_colour=coordinator_state_colour, domain_state_colour=domain_state_colour, mem_allocated_colour=mem_allocated_colour, + mem_provisioned_colour=mem_allocated_colour, end_colour=ansiprint.end(), node_name=node_information['name'], node_daemon_state=node_information['daemon_state'], @@ -316,7 +332,8 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length node_mem_total=node_information['memory']['total'], node_mem_used=node_information['memory']['used'], node_mem_free=node_information['memory']['free'], - node_mem_allocated=node_information['memory']['allocated'] + node_mem_allocated=node_information['memory']['allocated'], + node_mem_provisioned=node_information['memory']['provisioned'] ) ) diff --git a/daemon-common/node.py b/daemon-common/node.py index 153dec56..85e335ac 100644 --- a/daemon-common/node.py +++ b/daemon-common/node.py @@ -54,6 +54,7 @@ def getNodeInformation(zk_conn, node_name): node_vcpu_allocated = int(zkhandler.readdata(zk_conn, 'nodes/{}/vcpualloc'.format(node_name))) node_mem_total = int(zkhandler.readdata(zk_conn, '/nodes/{}/memtotal'.format(node_name))) node_mem_allocated = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node_name))) + node_mem_provisioned = int(zkhandler.readdata(zk_conn, '/nodes/{}/memprov'.format(node_name))) node_mem_used = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node_name))) node_mem_free = int(zkhandler.readdata(zk_conn, '/nodes/{}/memfree'.format(node_name))) node_load = float(zkhandler.readdata(zk_conn, '/nodes/{}/cpuload'.format(node_name))) @@ -80,6 +81,7 @@ def getNodeInformation(zk_conn, node_name): 'memory': { 'total': node_mem_total, 'allocated': node_mem_allocated, + 'provisioned': node_mem_provisioned, 'used': node_mem_used, 'free': node_mem_free } @@ -263,24 +265,25 @@ def format_info(node_information, long_output): # Format a nice output; do this line-by-line then concat the elements at the end ainformation = [] # Basic information - ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['name'])) - ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), daemon_state_colour, node_information['daemon_state'], ansiprint.end())) - ainformation.append('{}Coordinator State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), coordinator_state_colour, node_information['coordinator_state'], ansiprint.end())) - ainformation.append('{}Domain State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), domain_state_colour, node_information['domain_state'], ansiprint.end())) - ainformation.append('{}Active VM Count:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['domains_count'])) + ainformation.append('{}Name:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['name'])) + ainformation.append('{}Daemon State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), daemon_state_colour, node_information['daemon_state'], ansiprint.end())) + ainformation.append('{}Coordinator State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), coordinator_state_colour, node_information['coordinator_state'], ansiprint.end())) + ainformation.append('{}Domain State:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), domain_state_colour, node_information['domain_state'], ansiprint.end())) + ainformation.append('{}Active VM Count:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['domains_count'])) if long_output: ainformation.append('') - ainformation.append('{}Architecture:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['arch'])) - ainformation.append('{}Operating System:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['os'])) - ainformation.append('{}Kernel Version:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['kernel'])) + ainformation.append('{}Architecture:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['arch'])) + ainformation.append('{}Operating System:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['os'])) + ainformation.append('{}Kernel Version:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['kernel'])) ainformation.append('') - ainformation.append('{}Host CPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['total'])) - ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['allocated'])) - ainformation.append('{}Load:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['load'])) - ainformation.append('{}Total RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['total'])) - ainformation.append('{}Used RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['used'])) - ainformation.append('{}Free RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['free'])) - ainformation.append('{}Allocated RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['allocated'])) + ainformation.append('{}Host CPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['total'])) + ainformation.append('{}vCPUs:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['vcpu']['allocated'])) + ainformation.append('{}Load:{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['load'])) + ainformation.append('{}Total RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['total'])) + ainformation.append('{}Used RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['used'])) + ainformation.append('{}Free RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['free'])) + ainformation.append('{}Allocated RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['allocated'])) + ainformation.append('{}Provisioned RAM (MiB):{} {}'.format(ansiprint.purple(), ansiprint.end(), node_information['memory']['provisioned'])) # Join it all together information = '\n'.join(ainformation) @@ -303,6 +306,7 @@ def format_list(node_list): mem_used_length = 5 mem_free_length = 5 mem_alloc_length = 4 + mem_prov_length = 4 for node_information in node_list: # node_name column _node_name_length = len(node_information['name']) + 1 @@ -348,13 +352,18 @@ def format_list(node_list): _mem_alloc_length = len(str(node_information['memory']['allocated'])) + 1 if _mem_alloc_length > mem_alloc_length: mem_alloc_length = _mem_alloc_length + # mem_prov column + _mem_prov_length = len(str(node_information['memory']['provisioned'])) + 1 + if _mem_prov_length > mem_prov_length: + mem_prov_length = _mem_prov_length + # Format the string (header) node_list_output.append( '{bold}{node_name: <{node_name_length}} \ St: {daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \ Res: {node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \ -Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}}{end_bold}'.format( +Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}'.format( node_name_length=node_name_length, daemon_state_length=daemon_state_length, coordinator_state_length=coordinator_state_length, @@ -366,6 +375,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length mem_used_length=mem_used_length, mem_free_length=mem_free_length, mem_alloc_length=mem_alloc_length, + mem_prov_length=mem_prov_length, bold=ansiprint.bold(), end_bold=ansiprint.end(), daemon_state_colour='', @@ -382,7 +392,8 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length node_mem_total='Total', node_mem_used='Used', node_mem_free='Free', - node_mem_allocated='VMs' + node_mem_allocated='VMs Run', + node_mem_provisioned='VMs Total' ) ) @@ -393,7 +404,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length '{bold}{node_name: <{node_name_length}} \ {daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \ {node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \ - {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}}{end_bold}'.format( + {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}'.format( node_name_length=node_name_length, daemon_state_length=daemon_state_length, coordinator_state_length=coordinator_state_length, @@ -405,6 +416,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length mem_used_length=mem_used_length, mem_free_length=mem_free_length, mem_alloc_length=mem_alloc_length, + mem_prov_length=mem_prov_length, bold='', end_bold='', daemon_state_colour=daemon_state_colour, @@ -421,7 +433,8 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length node_mem_total=node_information['memory']['total'], node_mem_used=node_information['memory']['used'], node_mem_free=node_information['memory']['free'], - node_mem_allocated=node_information['memory']['allocated'] + node_mem_allocated=node_information['memory']['allocated'], + node_mem_provisioned=node_information['memory']['provisioned'] ) ) diff --git a/docs/manuals/swagger.json b/docs/manuals/swagger.json index b91d2549..3aa29a2c 100644 --- a/docs/manuals/swagger.json +++ b/docs/manuals/swagger.json @@ -427,13 +427,17 @@ "memory": { "properties": { "allocated": { - "description": "The total amount of RAM allocated to domains in MB", + "description": "The total amount of RAM allocated to running domains in MB", "type": "integer" }, "free": { "description": "The total free RAM on the node in MB", "type": "integer" }, + "provisioned": { + "description": "The total amount of RAM provisioned to all domains (regardless of state) on this node in MB", + "type": "integer" + }, "total": { "description": "The total amount of node RAM in MB", "type": "integer" diff --git a/node-daemon/pvcnoded/Daemon.py b/node-daemon/pvcnoded/Daemon.py index f062fe00..a50a730d 100644 --- a/node-daemon/pvcnoded/Daemon.py +++ b/node-daemon/pvcnoded/Daemon.py @@ -668,6 +668,7 @@ else: '/nodes/{}/memfree'.format(myhostname): '0', '/nodes/{}/memused'.format(myhostname): '0', '/nodes/{}/memalloc'.format(myhostname): '0', + '/nodes/{}/memprov'.format(myhostname): '0', '/nodes/{}/vcpualloc'.format(myhostname): '0', '/nodes/{}/cpuload'.format(myhostname): '0.0', '/nodes/{}/networkscount'.format(myhostname): '0', @@ -1322,6 +1323,7 @@ def collect_vm_stats(queue): return memalloc = 0 + memprov = 0 vcpualloc = 0 # Toggle state management of dead VMs to restart them if debug: @@ -1332,6 +1334,7 @@ def collect_vm_stats(queue): if domain in this_node.domain_list: # Add the allocated memory to our memalloc value memalloc += instance.getmemory() + memprov += instance.getmemory() vcpualloc += instance.getvcpus() if instance.getstate() == 'start' and instance.getnode() == this_node.name: if instance.getdom() != None: @@ -1341,6 +1344,8 @@ def collect_vm_stats(queue): except Exception as e: # Toggle a state "change" zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(domain): instance.getstate() }) + elif instance.getnode() == this_node.name: + memprov += instance.getmemory() # Get list of running domains from Libvirt running_domains = lv_conn.listAllDomains(libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE) @@ -1440,6 +1445,7 @@ def collect_vm_stats(queue): queue.put(len(running_domains)) queue.put(memalloc) + queue.put(memprov) queue.put(vcpualloc) if debug: @@ -1509,12 +1515,14 @@ def node_keepalive(): try: this_node.domains_count = vm_thread_queue.get() this_node.memalloc = vm_thread_queue.get() + this_node.memprov = vm_thread_queue.get() this_node.vcpualloc = vm_thread_queue.get() except: pass else: this_node.domains_count = 0 this_node.memalloc = 0 + this_node.memprov = 0 this_node.vcpualloc = 0 if enable_storage: @@ -1537,6 +1545,7 @@ def node_keepalive(): '/nodes/{}/memused'.format(this_node.name): str(this_node.memused), '/nodes/{}/memfree'.format(this_node.name): str(this_node.memfree), '/nodes/{}/memalloc'.format(this_node.name): str(this_node.memalloc), + '/nodes/{}/memprov'.format(this_node.name): str(this_node.memprov), '/nodes/{}/vcpualloc'.format(this_node.name): str(this_node.vcpualloc), '/nodes/{}/cpuload'.format(this_node.name): str(this_node.cpuload), '/nodes/{}/domainscount'.format(this_node.name): str(this_node.domains_count), diff --git a/node-daemon/pvcnoded/VMInstance.py b/node-daemon/pvcnoded/VMInstance.py index a6e32b16..1b21396d 100644 --- a/node-daemon/pvcnoded/VMInstance.py +++ b/node-daemon/pvcnoded/VMInstance.py @@ -33,6 +33,8 @@ import pvcnoded.common as common import pvcnoded.VMConsoleWatcherInstance as VMConsoleWatcherInstance +import daemon_lib.common as daemon_common + def flush_locks(zk_conn, logger, dom_uuid): logger.out('Flushing RBD locks for VM "{}"'.format(dom_uuid), state='i') # Get the list of RBD images @@ -153,7 +155,11 @@ class VMInstance(object): def getmemory(self): try: - memory = int(self.dom.info()[2] / 1024) + if self.dom is not None: + memory = int(self.dom.info()[2] / 1024) + else: + domain_information = daemon_common.getInformationFromXML(self.zk_conn, self.domuuid) + memory = int(domain_information['memory']) except: memory = 0