Lint: F841 local variable '<variable>' is assigned to but never used

This commit is contained in:
Joshua Boniface 2020-11-06 21:13:13 -05:00
parent 98a573bbc7
commit 5da314902f
17 changed files with 23 additions and 58 deletions

View File

@ -263,12 +263,10 @@ def upload_ova(pool, name, ova_size):
disk_identifier = "sd{}".format(chr(ord('a') + idx)) disk_identifier = "sd{}".format(chr(ord('a') + idx))
volume = "ova_{}_{}".format(name, disk_identifier) volume = "ova_{}_{}".format(name, disk_identifier)
dev_src = disk.get('src') dev_src = disk.get('src')
dev_type = dev_src.split('.')[-1]
dev_size_raw = ova_archive.getmember(dev_src).size dev_size_raw = ova_archive.getmember(dev_src).size
vm_volume_size = disk.get('capacity') vm_volume_size = disk.get('capacity')
# Normalize the dev size to bytes # Normalize the dev size to bytes
dev_size_bytes = int(pvc_ceph.format_bytes_fromhuman(dev_size_raw)[:-1])
dev_size = pvc_ceph.format_bytes_fromhuman(dev_size_raw) dev_size = pvc_ceph.format_bytes_fromhuman(dev_size_raw)
def cleanup_img_maps(): def cleanup_img_maps():
@ -311,8 +309,6 @@ def upload_ova(pool, name, ova_size):
# Open the temporary blockdev and seek to byte 0 # Open the temporary blockdev and seek to byte 0
blk_file = open(temp_blockdev, 'wb') blk_file = open(temp_blockdev, 'wb')
blk_file.seek(0) blk_file.seek(0)
# Write the contents of vmdk_file into blk_file
bytes_written = blk_file.write(vmdk_file.read())
# Close blk_file (and flush the buffers) # Close blk_file (and flush the buffers)
blk_file.close() blk_file.close()
# Close vmdk_file # Close vmdk_file

View File

@ -792,7 +792,7 @@ def list_profile(limit, is_fuzzy=True):
cur.execute(query, args) cur.execute(query, args)
try: try:
name = cur.fetchone()['name'] name = cur.fetchone()['name']
except Exception as e: except Exception:
name = "N/A" name = "N/A"
profile_data[etype] = name profile_data[etype] = name
# Split the arguments back into a list # Split the arguments back into a list

View File

@ -1302,7 +1302,6 @@ def ceph_benchmark_list(config, job):
def format_list_benchmark(config, benchmark_information): def format_list_benchmark(config, benchmark_information):
benchmark_list_output = [] benchmark_list_output = []
benchmark_id_length = 3
benchmark_job_length = 20 benchmark_job_length = 20
benchmark_bandwidth_length = dict() benchmark_bandwidth_length = dict()
benchmark_iops_length = dict() benchmark_iops_length = dict()
@ -1422,10 +1421,6 @@ def format_list_benchmark(config, benchmark_information):
return '\n'.join(benchmark_list_output) return '\n'.join(benchmark_list_output)
def format_info_benchmark(config, benchmark_information): def format_info_benchmark(config, benchmark_information):
# Load information from benchmark output
benchmark_id = benchmark_information[0]['id']
benchmark_job = benchmark_information[0]['job']
if benchmark_information[0]['benchmark_result'] == "Running": if benchmark_information[0]['benchmark_result'] == "Running":
return "Benchmark test is still running." return "Benchmark test is still running."

View File

@ -494,11 +494,6 @@ def format_list(config, network_list):
else: else:
v6_flag = 'False' v6_flag = 'False'
if network_information['ip4']['dhcp_flag'] == "True":
dhcp4_range = '{} - {}'.format(network_information['ip4']['dhcp_start'], network_information['ip4']['dhcp_end'])
else:
dhcp4_range = 'N/A'
network_list_output.append( network_list_output.append(
'{bold}\ '{bold}\
{net_vni: <{net_vni_length}} \ {net_vni: <{net_vni_length}} \

View File

@ -804,10 +804,7 @@ Meta: {template_node_limit: <{template_node_limit_length}} \
template_migration_method='Migration' template_migration_method='Migration'
) )
# Keep track of nets we found to be valid to cut down on duplicate API hits
valid_net_list = []
# Format the string (elements) # Format the string (elements)
for template in sorted(template_data, key=lambda i: i.get('name', None)): for template in sorted(template_data, key=lambda i: i.get('name', None)):
template_list_output.append( template_list_output.append(
'{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ '{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \
@ -1069,7 +1066,6 @@ def format_list_userdata(userdata_data, lines=None):
# Determine optimal column widths # Determine optimal column widths
userdata_name_length = 5 userdata_name_length = 5
userdata_id_length = 3 userdata_id_length = 3
userdata_useruserdata_length = 8
for userdata in userdata_data: for userdata in userdata_data:
# userdata_name column # userdata_name column
@ -1144,7 +1140,6 @@ def format_list_script(script_data, lines=None):
# Determine optimal column widths # Determine optimal column widths
script_name_length = 5 script_name_length = 5
script_id_length = 3 script_id_length = 3
script_script_length = 8
for script in script_data: for script in script_data:
# script_name column # script_name column

View File

@ -43,7 +43,6 @@ def deletekey(zk_conn, key, recursive=True):
def readdata(zk_conn, key): def readdata(zk_conn, key):
data_raw = zk_conn.get(key) data_raw = zk_conn.get(key)
data = data_raw[0].decode('utf8') data = data_raw[0].decode('utf8')
meta = data_raw[1]
return data return data
# Data write function # Data write function

View File

@ -689,7 +689,6 @@ def vm_modify(domain, cfgfile, editor, restart):
if not retcode and not vm_information.get('name', None): if not retcode and not vm_information.get('name', None):
cleanup(False, 'ERROR: Could not find VM "{}"!'.format(domain)) cleanup(False, 'ERROR: Could not find VM "{}"!'.format(domain))
dom_uuid = vm_information.get('uuid')
dom_name = vm_information.get('name') dom_name = vm_information.get('name')
if editor is True: if editor is True:
@ -3144,7 +3143,6 @@ def provisioner_script_remove(name, confirm_flag):
click.confirm('Remove provisioning script {}'.format(name), prompt_suffix='? ', abort=True) click.confirm('Remove provisioning script {}'.format(name), prompt_suffix='? ', abort=True)
except Exception: except Exception:
exit(0) exit(0)
params = dict()
retcode, retdata = pvc_provisioner.script_remove(config, name) retcode, retdata = pvc_provisioner.script_remove(config, name)
cleanup(retcode, retdata) cleanup(retcode, retdata)

View File

@ -545,11 +545,9 @@ def get_list_dhcp(zk_conn, network, limit, only_static=False, is_fuzzy=True):
if only_static: if only_static:
full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni) full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni)
reservations = True
else: else:
full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni) full_dhcp_list = getNetworkDHCPReservations(zk_conn, net_vni)
full_dhcp_list += getNetworkDHCPLeases(zk_conn, net_vni) full_dhcp_list += getNetworkDHCPLeases(zk_conn, net_vni)
reservations = False
if limit: if limit:
try: try:
@ -693,6 +691,9 @@ def format_info(network_information, long_output):
ainformation.append('{}Network firewall rules:{}'.format(ansiprint.bold(), ansiprint.end())) ainformation.append('{}Network firewall rules:{}'.format(ansiprint.bold(), ansiprint.end()))
ainformation.append('') ainformation.append('')
formatted_firewall_rules = get_list_firewall_rules(zk_conn, vni) formatted_firewall_rules = get_list_firewall_rules(zk_conn, vni)
for rule in formatted_firewall_rules:
ainformation.append(rule)
# Join it all together # Join it all together
click.echo('\n'.join(ainformation)) click.echo('\n'.join(ainformation))
@ -771,11 +772,6 @@ def format_list(network_list):
else: else:
v6_flag = 'False' v6_flag = 'False'
if network_information['ip4']['dhcp_flag'] == "True":
dhcp4_range = '{} - {}'.format(network_information['ip4']['dhcp_start'], network_information['ip4']['dhcp_end'])
else:
dhcp4_range = 'N/A'
network_list_output.append( network_list_output.append(
'{bold}\ '{bold}\
{net_vni: <{net_vni_length}} \ {net_vni: <{net_vni_length}} \

View File

@ -427,9 +427,6 @@ def stop_vm(zk_conn, domain):
if not dom_uuid: if not dom_uuid:
return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain) return False, 'ERROR: Could not find VM "{}" in the cluster!'.format(domain)
# Get state and verify we're OK to proceed
current_state = zkhandler.readdata(zk_conn, '/domains/{}/state'.format(dom_uuid))
# Set the VM to start # Set the VM to start
lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid)) lock = zkhandler.exclusivelock(zk_conn, '/domains/{}/state'.format(dom_uuid))
lock.acquire() lock.acquire()

View File

@ -82,7 +82,6 @@ def renamekey(zk_conn, kv):
def readdata(zk_conn, key): def readdata(zk_conn, key):
data_raw = zk_conn.get(key) data_raw = zk_conn.get(key)
data = data_raw[0].decode('utf8') data = data_raw[0].decode('utf8')
meta = data_raw[1]
return data return data
# Data write function # Data write function

10
lint Executable file
View File

@ -0,0 +1,10 @@
#!/usr/bin/env bash
if ! which flake8 &>/dev/null; then
echo "Flake8 is required to lint this project"
exit 1
fi
flake8 \
--exclude=api-daemon/migrations/versions \
--exclude=api-daemon/provisioner/examples

View File

@ -158,10 +158,6 @@ class DNSNetworkInstance(object):
# Add a new network to the aggregator database # Add a new network to the aggregator database
def add_network(self): def add_network(self):
network_domain = self.network.domain network_domain = self.network.domain
if self.network.ip4_gateway != 'None':
network_gateway = self.network.ip4_gateway
else:
network_gateway = self.network.ip6_gateway
self.logger.out( self.logger.out(
'Adding entry for client domain {}'.format( 'Adding entry for client domain {}'.format(
@ -330,8 +326,6 @@ class AXFRDaemonInstance(object):
while not self.thread_stopper.is_set(): while not self.thread_stopper.is_set():
# We do this for each network # We do this for each network
for network, instance in self.dns_networks.items(): for network, instance in self.dns_networks.items():
zone_modified = False
# Set up our SQL cursor # Set up our SQL cursor
try: try:
sql_curs = self.sql_conn.cursor() sql_curs = self.sql_conn.cursor()

View File

@ -233,7 +233,7 @@ def readConfig(pvcnoded_config_file, myhostname):
# Verify the network provided is valid # Verify the network provided is valid
try: try:
network = ip_network(config[network_key]) network = ip_network(config[network_key])
except Exception as e: except Exception:
print('ERROR: Network address {} for {} is not valid!'.format(config[network_key], network_key)) print('ERROR: Network address {} for {} is not valid!'.format(config[network_key], network_key))
exit(1) exit(1)
@ -253,7 +253,7 @@ def readConfig(pvcnoded_config_file, myhostname):
# Verify we're in the network # Verify we're in the network
if floating_addr not in list(network.hosts()): if floating_addr not in list(network.hosts()):
raise raise
except Exception as e: except Exception:
print('ERROR: Floating address {} for {} is not valid!'.format(config[floating_key], floating_key)) print('ERROR: Floating address {} for {} is not valid!'.format(config[floating_key], floating_key))
exit(1) exit(1)
@ -571,15 +571,14 @@ def cleanup():
if d_domain[domain].getnode() == myhostname: if d_domain[domain].getnode() == myhostname:
try: try:
d_domain[domain].console_log_instance.stop() d_domain[domain].console_log_instance.stop()
except NameError as e: except NameError:
pass pass
except AttributeError as e: except AttributeError:
pass pass
# Force into secondary coordinator state if needed # Force into secondary coordinator state if needed
try: try:
if this_node.router_state == 'primary': if this_node.router_state == 'primary':
is_primary = True
zkhandler.writedata(zk_conn, { zkhandler.writedata(zk_conn, {
'/primary_node': 'none' '/primary_node': 'none'
}) })
@ -891,7 +890,7 @@ if enable_networking:
try: try:
dns_aggregator.add_network(d_network[network]) dns_aggregator.add_network(d_network[network])
except Exception as e: except Exception as e:
logger.out('Failed to create DNS Aggregator for network {}'.format(network), 'w') logger.out('Failed to create DNS Aggregator for network {}: {}'.format(network, e), 'w')
# Start primary functionality # Start primary functionality
if this_node.router_state == 'primary' and d_network[network].nettype == 'managed': if this_node.router_state == 'primary' and d_network[network].nettype == 'managed':
d_network[network].createGateways() d_network[network].createGateways()
@ -1150,7 +1149,7 @@ def collect_ceph_stats(queue):
}) })
except Exception as e: except Exception as e:
# One or more of the status commands timed out, just continue # One or more of the status commands timed out, just continue
logger.out('Failed to format and send pool data', state='w') logger.out('Failed to format and send pool data: {}'.format(e), state='w')
pass pass
# Only grab OSD stats if there are OSDs to grab (otherwise `ceph osd df` hangs) # Only grab OSD stats if there are OSDs to grab (otherwise `ceph osd df` hangs)
@ -1341,7 +1340,7 @@ def collect_vm_stats(queue):
try: try:
if instance.getdom().state()[0] != libvirt.VIR_DOMAIN_RUNNING: if instance.getdom().state()[0] != libvirt.VIR_DOMAIN_RUNNING:
raise raise
except Exception as e: except Exception:
# Toggle a state "change" # Toggle a state "change"
zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(domain): instance.getstate() }) zkhandler.writedata(zk_conn, { '/domains/{}/state'.format(domain): instance.getstate() })
elif instance.getnode() == this_node.name: elif instance.getnode() == this_node.name:
@ -1621,11 +1620,9 @@ def node_keepalive():
for node_name in d_node: for node_name in d_node:
try: try:
node_daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node_name)) node_daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node_name))
node_domain_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node_name))
node_keepalive = int(zkhandler.readdata(zk_conn, '/nodes/{}/keepalive'.format(node_name))) node_keepalive = int(zkhandler.readdata(zk_conn, '/nodes/{}/keepalive'.format(node_name)))
except Exception: except Exception:
node_daemon_state = 'unknown' node_daemon_state = 'unknown'
node_domain_state = 'unknown'
node_keepalive = 0 node_keepalive = 0
# Handle deadtime and fencng if needed # Handle deadtime and fencng if needed

View File

@ -174,19 +174,15 @@ class MetadataAPIInstance(object):
pass pass
# Get our real information on the host; now we can start querying about it # Get our real information on the host; now we can start querying about it
client_hostname = host_information.get('hostname', None)
client_macaddr = host_information.get('mac_address', None) client_macaddr = host_information.get('mac_address', None)
client_ipaddr = host_information.get('ip4_address', None)
# Find the VM with that MAC address - we can't assume that the hostname is actually right # Find the VM with that MAC address - we can't assume that the hostname is actually right
_discard, vm_list = pvc_vm.get_list(self.zk_conn, None, None, None) _discard, vm_list = pvc_vm.get_list(self.zk_conn, None, None, None)
vm_name = None
vm_details = dict() vm_details = dict()
for vm in vm_list: for vm in vm_list:
try: try:
for network in vm.get('networks'): for network in vm.get('networks'):
if network.get('mac', None) == client_macaddr: if network.get('mac', None) == client_macaddr:
vm_name = vm.get('name')
vm_details = vm vm_details = vm
except Exception: except Exception:
pass pass

View File

@ -512,7 +512,7 @@ class VMInstance(object):
time.sleep(0.5) # Time for reader to acquire the lock time.sleep(0.5) # Time for reader to acquire the lock
if do_migrate_shutdown: if do_migrate_shutdown:
migrate_shutdown_result = migrate_shutdown() migrate_shutdown()
self.logger.out('Releasing write lock for synchronization phase C', state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Releasing write lock for synchronization phase C', state='i', prefix='Domain {}'.format(self.domuuid))
lock.release() lock.release()
@ -547,7 +547,6 @@ class VMInstance(object):
time.sleep(0.1) time.sleep(0.1)
self.inreceive = True self.inreceive = True
live_receive = True
self.logger.out('Receiving VM migration from node "{}"'.format(self.node), state='i', prefix='Domain {}'.format(self.domuuid)) self.logger.out('Receiving VM migration from node "{}"'.format(self.node), state='i', prefix='Domain {}'.format(self.domuuid))

View File

@ -149,7 +149,7 @@ def findTargetNode(zk_conn, config, logger, dom_uuid):
# Determine VM search field # Determine VM search field
try: try:
search_field = zkhandler.readdata(zk_conn, '/domains/{}/node_selector'.format(dom_uuid)) search_field = zkhandler.readdata(zk_conn, '/domains/{}/node_selector'.format(dom_uuid))
except Exception as e: except Exception:
search_field = None search_field = None
# If our search field is invalid, use and set the default (for next time) # If our search field is invalid, use and set the default (for next time)

View File

@ -43,7 +43,6 @@ def readdata(zk_conn, key):
try: try:
data_raw = zk_conn.get(key) data_raw = zk_conn.get(key)
data = data_raw[0].decode('utf8') data = data_raw[0].decode('utf8')
meta = data_raw[1]
return data return data
except Exception: except Exception:
return None return None