diff --git a/api-daemon/pvcapid/ova.py b/api-daemon/pvcapid/ova.py index 7ea1b8df..329a644c 100755 --- a/api-daemon/pvcapid/ova.py +++ b/api-daemon/pvcapid/ova.py @@ -263,12 +263,10 @@ def upload_ova(pool, name, ova_size): disk_identifier = "sd{}".format(chr(ord('a') + idx)) volume = "ova_{}_{}".format(name, disk_identifier) dev_src = disk.get('src') - dev_type = dev_src.split('.')[-1] dev_size_raw = ova_archive.getmember(dev_src).size vm_volume_size = disk.get('capacity') # Normalize the dev size to bytes - dev_size_bytes = int(pvc_ceph.format_bytes_fromhuman(dev_size_raw)[:-1]) dev_size = pvc_ceph.format_bytes_fromhuman(dev_size_raw) def cleanup_img_maps(): @@ -311,8 +309,6 @@ def upload_ova(pool, name, ova_size): # Open the temporary blockdev and seek to byte 0 blk_file = open(temp_blockdev, 'wb') blk_file.seek(0) - # Write the contents of vmdk_file into blk_file - bytes_written = blk_file.write(vmdk_file.read()) # Close blk_file (and flush the buffers) blk_file.close() # Close vmdk_file diff --git a/api-daemon/pvcapid/provisioner.py b/api-daemon/pvcapid/provisioner.py index d5d0a566..cecf9117 100755 --- a/api-daemon/pvcapid/provisioner.py +++ b/api-daemon/pvcapid/provisioner.py @@ -792,7 +792,7 @@ def list_profile(limit, is_fuzzy=True): cur.execute(query, args) try: name = cur.fetchone()['name'] - except Exception as e: + except Exception: name = "N/A" profile_data[etype] = name # Split the arguments back into a list diff --git a/client-cli/cli_lib/ceph.py b/client-cli/cli_lib/ceph.py index b36de0ad..1bbe958b 100644 --- a/client-cli/cli_lib/ceph.py +++ b/client-cli/cli_lib/ceph.py @@ -1302,7 +1302,6 @@ def ceph_benchmark_list(config, job): def format_list_benchmark(config, benchmark_information): benchmark_list_output = [] - benchmark_id_length = 3 benchmark_job_length = 20 benchmark_bandwidth_length = dict() benchmark_iops_length = dict() @@ -1422,10 +1421,6 @@ def format_list_benchmark(config, benchmark_information): return '\n'.join(benchmark_list_output) def format_info_benchmark(config, benchmark_information): - # Load information from benchmark output - benchmark_id = benchmark_information[0]['id'] - benchmark_job = benchmark_information[0]['job'] - if benchmark_information[0]['benchmark_result'] == "Running": return "Benchmark test is still running." diff --git a/client-cli/cli_lib/network.py b/client-cli/cli_lib/network.py index 4523d499..df375ce7 100644 --- a/client-cli/cli_lib/network.py +++ b/client-cli/cli_lib/network.py @@ -494,11 +494,6 @@ def format_list(config, network_list): else: v6_flag = 'False' - if network_information['ip4']['dhcp_flag'] == "True": - dhcp4_range = '{} - {}'.format(network_information['ip4']['dhcp_start'], network_information['ip4']['dhcp_end']) - else: - dhcp4_range = 'N/A' - network_list_output.append( '{bold}\ {net_vni: <{net_vni_length}} \ diff --git a/client-cli/cli_lib/provisioner.py b/client-cli/cli_lib/provisioner.py index c4cd44da..c70201cb 100644 --- a/client-cli/cli_lib/provisioner.py +++ b/client-cli/cli_lib/provisioner.py @@ -804,10 +804,7 @@ Meta: {template_node_limit: <{template_node_limit_length}} \ template_migration_method='Migration' ) - # Keep track of nets we found to be valid to cut down on duplicate API hits - valid_net_list = [] # Format the string (elements) - for template in sorted(template_data, key=lambda i: i.get('name', None)): template_list_output.append( '{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ @@ -1069,7 +1066,6 @@ def format_list_userdata(userdata_data, lines=None): # Determine optimal column widths userdata_name_length = 5 userdata_id_length = 3 - userdata_useruserdata_length = 8 for userdata in userdata_data: # userdata_name column @@ -1144,7 +1140,6 @@ def format_list_script(script_data, lines=None): # Determine optimal column widths script_name_length = 5 script_id_length = 3 - script_script_length = 8 for script in script_data: # script_name column diff --git a/client-cli/cli_lib/zkhandler.py b/client-cli/cli_lib/zkhandler.py index bd9e00fa..6c307432 100644 --- a/client-cli/cli_lib/zkhandler.py +++ b/client-cli/cli_lib/zkhandler.py @@ -43,7 +43,6 @@ def deletekey(zk_conn, key, recursive=True): def readdata(zk_conn, key): data_raw = zk_conn.get(key) data = data_raw[0].decode('utf8') - meta = data_raw[1] return data # Data write function diff --git a/client-cli/pvc.py b/client-cli/pvc.py index 8e374fff..2ea863bc 100755 --- a/client-cli/pvc.py +++ b/client-cli/pvc.py @@ -689,7 +689,6 @@ def vm_modify(domain, cfgfile, editor, restart): if not retcode and not vm_information.get('name', None): cleanup(False, 'ERROR: Could not find VM "{}"!'.format(domain)) - dom_uuid = vm_information.get('uuid') dom_name = vm_information.get('name') if editor is True: @@ -3144,7 +3143,6 @@ def provisioner_script_remove(name, confirm_flag): click.confirm('Remove provisioning script {}'.format(name), prompt_suffix='? ', abort=True) except Exception: exit(0) - params = dict() retcode, retdata = pvc_provisioner.script_remove(config, name) cleanup(retcode, retdata) diff --git a/daemon-common/network.py b/daemon-common/network.py index fe5c74aa..01a2fd26 100644 --- a/daemon-common/network.py +++ b/daemon-common/network.py @@ -545,11 +545,9 @@ def get_list_dhcp(zk_conn, network, limit, only_static=False, is_fuzzy=True): if only_static: full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni) - reservations = True else: full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni) full_dhcp_list += getNetworkDHCPLeases(zk_conn, net_vni) - reservations = False if limit: try: @@ -693,6 +691,9 @@ def format_info(network_information, long_output): ainformation.append('{}Network firewall rules:{}'.format(ansiprint.bold(), ansiprint.end())) ainformation.append('') formatted_firewall_rules = get_list_firewall_rules(zk_conn, vni) + for rule in formatted_firewall_rules: + ainformation.append(rule) + # Join it all together click.echo('\n'.join(ainformation)) @@ -771,11 +772,6 @@ def format_list(network_list): else: v6_flag = 'False' - if network_information['ip4']['dhcp_flag'] == "True": - dhcp4_range = '{} - {}'.format(network_information['ip4']['dhcp_start'], network_information['ip4']['dhcp_end']) - else: - dhcp4_range = 'N/A' - network_list_output.append( '{bold}\ {net_vni: <{net_vni_length}} \ diff --git a/daemon-common/vm.py b/daemon-common/vm.py index 53d064cc..fed0bb72 100644 --- a/daemon-common/vm.py +++ b/daemon-common/vm.py @@ -427,9 +427,6 @@ def stop_vm(zk_conn, domain): if not dom_uuid: return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain) - # Get state and verify we're OK to proceed - current_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid)) - # Set the VM to start lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock.acquire() diff --git a/daemon-common/zkhandler.py b/daemon-common/zkhandler.py index 31d8b6c1..55dd123d 100644 --- a/daemon-common/zkhandler.py +++ b/daemon-common/zkhandler.py @@ -82,7 +82,6 @@ def renamekey(zk_conn, kv): def readdata(zk_conn, key): data_raw = zk_conn.get(key) data = data_raw[0].decode('utf8') - meta = data_raw[1] return data # Data write function diff --git a/lint b/lint new file mode 100755 index 00000000..993f919d --- /dev/null +++ b/lint @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +if ! which flake8 &>/dev/null; then + echo "Flake8 is required to lint this project" + exit 1 +fi + +flake8 \ + --exclude=api-daemon/migrations/versions \ + --exclude=api-daemon/provisioner/examples diff --git a/node-daemon/pvcnoded/DNSAggregatorInstance.py b/node-daemon/pvcnoded/DNSAggregatorInstance.py index e7256bda..bf64bd5e 100644 --- a/node-daemon/pvcnoded/DNSAggregatorInstance.py +++ b/node-daemon/pvcnoded/DNSAggregatorInstance.py @@ -158,10 +158,6 @@ class DNSNetworkInstance(object): # Add a new network to the aggregator database def add_network(self): network_domain = self.network.domain - if self.network.ip4_gateway != 'None': - network_gateway = self.network.ip4_gateway - else: - network_gateway = self.network.ip6_gateway self.logger.out( 'Adding entry for client domain {}'.format( @@ -330,8 +326,6 @@ class AXFRDaemonInstance(object): while not self.thread_stopper.is_set(): # We do this for each network for network, instance in self.dns_networks.items(): - zone_modified = False - # Set up our SQL cursor try: sql_curs = self.sql_conn.cursor() diff --git a/node-daemon/pvcnoded/Daemon.py b/node-daemon/pvcnoded/Daemon.py index 7abee370..3d9befe1 100644 --- a/node-daemon/pvcnoded/Daemon.py +++ b/node-daemon/pvcnoded/Daemon.py @@ -233,7 +233,7 @@ def readConfig(pvcnoded_config_file, myhostname): # Verify the network provided is valid try: network = ip_network(config[network_key]) - except Exception as e: + except Exception: print('ERROR: Network address {} for {} is not valid!'.format(config[network_key], network_key)) exit(1) @@ -253,7 +253,7 @@ def readConfig(pvcnoded_config_file, myhostname): # Verify we're in the network if floating_addr not in list(network.hosts()): raise - except Exception as e: + except Exception: print('ERROR: Floating address {} for {} is not valid!'.format(config[floating_key], floating_key)) exit(1) @@ -571,15 +571,14 @@ def cleanup(): if d_domain[domain].getnode() == myhostname: try: d_domain[domain].console_log_instance.stop() - except NameError as e: + except NameError: pass - except AttributeError as e: + except AttributeError: pass # Force into secondary coordinator state if needed try: if this_node.router_state == 'primary': - is_primary = True zkhandler.writedata(zk_conn, { '/primary_node': 'none' }) @@ -891,7 +890,7 @@ if enable_networking: try: dns_aggregator.add_network(d_network[network]) except Exception as e: - logger.out('Failed to create DNS Aggregator for network {}'.format(network), 'w') + logger.out('Failed to create DNS Aggregator for network {}: {}'.format(network, e), 'w') # Start primary functionality if this_node.router_state == 'primary' and d_network[network].nettype == 'managed': d_network[network].createGateways() @@ -1150,7 +1149,7 @@ def collect_ceph_stats(queue): }) except Exception as e: # One or more of the status commands timed out, just continue - logger.out('Failed to format and send pool data', state='w') + logger.out('Failed to format and send pool data: {}'.format(e), state='w') pass # Only grab OSD stats if there are OSDs to grab (otherwise `ceph osd df` hangs) @@ -1341,7 +1340,7 @@ def collect_vm_stats(queue): try: if instance.getdom().state()[0] != libvirt.VIR_DOMAIN_RUNNING: raise - except Exception as e: + except Exception: # Toggle a state "change" zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(domain): instance.getstate() }) elif instance.getnode() == this_node.name: @@ -1621,11 +1620,9 @@ def node_keepalive(): for node_name in d_node: try: node_daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node_name)) - node_domain_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node_name)) node_keepalive = int(zkhandler.readdata(zk_conn, '/nodes/{}/keepalive'.format(node_name))) except Exception: node_daemon_state = 'unknown' - node_domain_state = 'unknown' node_keepalive = 0 # Handle deadtime and fencng if needed diff --git a/node-daemon/pvcnoded/MetadataAPIInstance.py b/node-daemon/pvcnoded/MetadataAPIInstance.py index 5aca7cfb..32fea150 100644 --- a/node-daemon/pvcnoded/MetadataAPIInstance.py +++ b/node-daemon/pvcnoded/MetadataAPIInstance.py @@ -174,19 +174,15 @@ class MetadataAPIInstance(object): pass # Get our real information on the host; now we can start querying about it - client_hostname = host_information.get('hostname', None) client_macaddr = host_information.get('mac_address', None) - client_ipaddr = host_information.get('ip4_address', None) # Find the VM with that MAC address - we can't assume that the hostname is actually right _discard, vm_list = pvc_vm.get_list(self.zk_conn, None, None, None) - vm_name = None vm_details = dict() for vm in vm_list: try: for network in vm.get('networks'): if network.get('mac', None) == client_macaddr: - vm_name = vm.get('name') vm_details = vm except Exception: pass diff --git a/node-daemon/pvcnoded/VMInstance.py b/node-daemon/pvcnoded/VMInstance.py index f2868395..f75060b3 100644 --- a/node-daemon/pvcnoded/VMInstance.py +++ b/node-daemon/pvcnoded/VMInstance.py @@ -512,7 +512,7 @@ class VMInstance(object): time.sleep(0.5) # Time for reader to acquire the lock if do_migrate_shutdown: - migrate_shutdown_result = migrate_shutdown() + migrate_shutdown() self.logger.out('Releasing write lock for synchronization phase C', state='i', prefix='Domain {}'.format(self.domuuid)) lock.release() @@ -547,7 +547,6 @@ class VMInstance(object): time.sleep(0.1) self.inreceive = True - live_receive = True self.logger.out('Receiving VM migration from node "{}"'.format(self.node), state='i', prefix='Domain {}'.format(self.domuuid)) diff --git a/node-daemon/pvcnoded/common.py b/node-daemon/pvcnoded/common.py index 6adcc689..d39d5f85 100644 --- a/node-daemon/pvcnoded/common.py +++ b/node-daemon/pvcnoded/common.py @@ -149,7 +149,7 @@ def findTargetNode(zk_conn, config, logger, dom_uuid): # Determine VM search field try: search_field = zkhandler.readdata(zk_conn, '/domains/{}/node_selector'.format(dom_uuid)) - except Exception as e: + except Exception: search_field = None # If our search field is invalid, use and set the default (for next time) diff --git a/node-daemon/pvcnoded/zkhandler.py b/node-daemon/pvcnoded/zkhandler.py index 1e2c1710..b7ee74f0 100644 --- a/node-daemon/pvcnoded/zkhandler.py +++ b/node-daemon/pvcnoded/zkhandler.py @@ -43,7 +43,6 @@ def readdata(zk_conn, key): try: data_raw = zk_conn.get(key) data = data_raw[0].decode('utf8') - meta = data_raw[1] return data except Exception: return None