From aecb845d6adb28275453cf468eb73e7f52c76d54 Mon Sep 17 00:00:00 2001 From: "Joshua M. Boniface" Date: Fri, 6 Nov 2020 20:37:52 -0500 Subject: [PATCH] Lint: E713 test for membership should be 'not in' --- api-daemon/pvcapid/provisioner.py | 4 ++-- client-cli/cli_lib/vm.py | 2 +- daemon-common/vm.py | 2 +- node-daemon/pvcnoded/CephInstance.py | 2 +- node-daemon/pvcnoded/Daemon.py | 26 +++++++++++++------------- node-daemon/pvcnoded/VMInstance.py | 2 +- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/api-daemon/pvcapid/provisioner.py b/api-daemon/pvcapid/provisioner.py index 66d8c5a3..d5d0a566 100755 --- a/api-daemon/pvcapid/provisioner.py +++ b/api-daemon/pvcapid/provisioner.py @@ -1177,7 +1177,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r cluster_networks, _discard = pvc_network.getClusterNetworkList(zk_conn) for network in vm_data['networks']: vni = str(network['vni']) - if not vni in cluster_networks: + if vni not in cluster_networks: raise ClusterError('The network VNI "{}" is not present on the cluster.'.format(vni)) print("All configured networks for VM are valid") @@ -1261,7 +1261,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r loader.exec_module(installer_script) # Verify that the install() function is valid - if not "install" in dir(installer_script): + if "install" not in dir(installer_script): raise ProvisioningError("Specified script does not contain an install() function.") print("Provisioning script imported successfully") diff --git a/client-cli/cli_lib/vm.py b/client-cli/cli_lib/vm.py index d16a5958..89a9c76f 100644 --- a/client-cli/cli_lib/vm.py +++ b/client-cli/cli_lib/vm.py @@ -592,7 +592,7 @@ def format_list(config, vm_list, raw): net_list = [] vm_net_colour = '' for net_vni in raw_net_list: - if not net_vni in valid_net_list: + if net_vni not in valid_net_list: response = call_api(config, 'get', '/network/{net}'.format(net=net_vni)) if response.status_code != 200 and net_vni not in ['cluster', 'storage', 'upstream']: vm_net_colour = ansiprint.red() diff --git a/daemon-common/vm.py b/daemon-common/vm.py index 8ed2807c..53d064cc 100644 --- a/daemon-common/vm.py +++ b/daemon-common/vm.py @@ -660,7 +660,7 @@ def get_list(zk_conn, node, state, limit, is_fuzzy=True): if state: valid_states = [ 'start', 'restart', 'shutdown', 'stop', 'disable', 'fail', 'migrate', 'unmigrate', 'provision' ] - if not state in valid_states: + if state not in valid_states: return False, 'VM state "{}" is not valid.'.format(state) full_vm_list = zkhandler.listchildren(zk_conn, '/domains') diff --git a/node-daemon/pvcnoded/CephInstance.py b/node-daemon/pvcnoded/CephInstance.py index d4461579..e82817e5 100644 --- a/node-daemon/pvcnoded/CephInstance.py +++ b/node-daemon/pvcnoded/CephInstance.py @@ -194,7 +194,7 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj): # 1. Verify the OSD is present retcode, stdout, stderr = common.run_os_command('ceph osd ls') osd_list = stdout.split('\n') - if not osd_id in osd_list: + if osd_id not in osd_list: logger.out('Could not find OSD {} in the cluster'.format(osd_id), state='e') return True diff --git a/node-daemon/pvcnoded/Daemon.py b/node-daemon/pvcnoded/Daemon.py index 036d131e..bdac4a13 100644 --- a/node-daemon/pvcnoded/Daemon.py +++ b/node-daemon/pvcnoded/Daemon.py @@ -251,7 +251,7 @@ def readConfig(pvcnoded_config_file, myhostname): # Set the ipaddr floating_addr = ip_address(config[floating_key].split('/')[0]) # Verify we're in the network - if not floating_addr in list(network.hosts()): + if floating_addr not in list(network.hosts()): raise except Exception as e: print('ERROR: Floating address {} for {} is not valid!'.format(config[floating_key], floating_key)) @@ -803,12 +803,12 @@ def update_nodes(new_node_list): # Add any missing nodes to the list for node in new_node_list: - if not node in node_list: + if node not in node_list: d_node[node] = NodeInstance.NodeInstance(node, myhostname, zk_conn, config, logger, d_node, d_network, d_domain, dns_aggregator, metadata_api) # Remove any deleted nodes from the list for node in node_list: - if not node in new_node_list: + if node not in new_node_list: # Delete the object del(d_node[node]) @@ -885,7 +885,7 @@ if enable_networking: # Add any missing networks to the list for network in new_network_list: - if not network in network_list: + if network not in network_list: d_network[network] = VXNetworkInstance.VXNetworkInstance(network, zk_conn, config, logger, this_node, dns_aggregator) if config['daemon_mode'] == 'coordinator' and d_network[network].nettype == 'managed': try: @@ -899,7 +899,7 @@ if enable_networking: # Remove any deleted networks from the list for network in network_list: - if not network in new_network_list: + if network not in new_network_list: if d_network[network].nettype == 'managed': # Stop primary functionality if this_node.router_state == 'primary': @@ -934,12 +934,12 @@ if enable_hypervisor: # Add any missing domains to the list for domain in new_domain_list: - if not domain in domain_list: + if domain not in domain_list: d_domain[domain] = VMInstance.VMInstance(domain, zk_conn, config, logger, this_node) # Remove any deleted domains from the list for domain in domain_list: - if not domain in new_domain_list: + if domain not in new_domain_list: # Delete the object del(d_domain[domain]) @@ -965,12 +965,12 @@ if enable_storage: # Add any missing OSDs to the list for osd in new_osd_list: - if not osd in osd_list: + if osd not in osd_list: d_osd[osd] = CephInstance.CephOSDInstance(zk_conn, this_node, osd) # Remove any deleted OSDs from the list for osd in osd_list: - if not osd in new_osd_list: + if osd not in new_osd_list: # Delete the object del(d_osd[osd]) @@ -985,14 +985,14 @@ if enable_storage: # Add any missing Pools to the list for pool in new_pool_list: - if not pool in pool_list: + if pool not in pool_list: d_pool[pool] = CephInstance.CephPoolInstance(zk_conn, this_node, pool) d_volume[pool] = dict() volume_list[pool] = [] # Remove any deleted Pools from the list for pool in pool_list: - if not pool in new_pool_list: + if pool not in new_pool_list: # Delete the object del(d_pool[pool]) @@ -1008,12 +1008,12 @@ if enable_storage: # Add any missing Volumes to the list for volume in new_volume_list: - if not volume in volume_list[pool]: + if volume not in volume_list[pool]: d_volume[pool][volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume) # Remove any deleted Volumes from the list for volume in volume_list[pool]: - if not volume in new_volume_list: + if volume not in new_volume_list: # Delete the object del(d_volume[pool][volume]) diff --git a/node-daemon/pvcnoded/VMInstance.py b/node-daemon/pvcnoded/VMInstance.py index d46aa297..f2868395 100644 --- a/node-daemon/pvcnoded/VMInstance.py +++ b/node-daemon/pvcnoded/VMInstance.py @@ -180,7 +180,7 @@ class VMInstance(object): # Manage local node domain_list def addDomainToList(self): - if not self.domuuid in self.this_node.domain_list: + if self.domuuid not in self.this_node.domain_list: try: # Add the domain to the domain_list array self.this_node.domain_list.append(self.domuuid)