Lint: E722 do not use bare 'except'
This commit is contained in:
parent
601ab1a181
commit
63f4f9aed7
|
@ -47,7 +47,7 @@ def strtobool(stringv):
|
|||
return bool(stringv)
|
||||
try:
|
||||
return bool(dustrtobool(stringv))
|
||||
except:
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
#
|
||||
|
@ -134,13 +134,13 @@ def run_benchmark(self, pool):
|
|||
# Phase 0 - connect to databases
|
||||
try:
|
||||
db_conn, db_cur = open_database(config)
|
||||
except:
|
||||
except Exception:
|
||||
print('FATAL - failed to connect to Postgres')
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
except:
|
||||
except Exception:
|
||||
print('FATAL - failed to connect to Zookeeper')
|
||||
raise Exception
|
||||
|
||||
|
|
|
@ -53,13 +53,13 @@ def strtobool(stringv):
|
|||
return bool(stringv)
|
||||
try:
|
||||
return bool(dustrtobool(stringv))
|
||||
except:
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
# Parse the configuration file
|
||||
try:
|
||||
pvc_config_file = os.environ['PVC_CONFIG_FILE']
|
||||
except:
|
||||
except Exception:
|
||||
print('Error: The "PVC_CONFIG_FILE" environment variable must be set before starting pvcapid.')
|
||||
exit(1)
|
||||
|
||||
|
@ -4205,11 +4205,11 @@ class API_Provisioner_Template_System_Root(Resource):
|
|||
# Validate arguments
|
||||
try:
|
||||
vcpus = int(reqargs.get('vcpus'))
|
||||
except:
|
||||
except Exception:
|
||||
return { "message": "A vcpus value must be an integer" }, 400
|
||||
try:
|
||||
vram = int(reqargs.get('vram'))
|
||||
except:
|
||||
except Exception:
|
||||
return { "message": "A vram value must be an integer" }, 400
|
||||
# Cast boolean arguments
|
||||
if bool(strtobool(reqargs.get('serial', 'false'))):
|
||||
|
@ -4345,11 +4345,11 @@ class API_Provisioner_Template_System_Element(Resource):
|
|||
# Validate arguments
|
||||
try:
|
||||
vcpus = int(reqargs.get('vcpus'))
|
||||
except:
|
||||
except Exception:
|
||||
return { "message": "A vcpus value must be an integer" }, 400
|
||||
try:
|
||||
vram = int(reqargs.get('vram'))
|
||||
except:
|
||||
except Exception:
|
||||
return { "message": "A vram value must be an integer" }, 400
|
||||
# Cast boolean arguments
|
||||
if bool(strtobool(reqargs.get('serial', False))):
|
||||
|
|
|
@ -42,7 +42,7 @@ def strtobool(stringv):
|
|||
return bool(stringv)
|
||||
try:
|
||||
return bool(dustrtobool(stringv))
|
||||
except:
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
#
|
||||
|
@ -499,7 +499,7 @@ def update_vm_meta(vm, limit, selector, autostart, provisioner_profile, migratio
|
|||
if autostart is not None:
|
||||
try:
|
||||
autostart = bool(strtobool(autostart))
|
||||
except:
|
||||
except Exception:
|
||||
autostart = False
|
||||
retflag, retdata = pvc_vm.modify_vm_metadata(zk_conn, vm, limit, selector, autostart, provisioner_profile, migration_method)
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
|
@ -1393,7 +1393,7 @@ def ceph_volume_upload(pool, volume, img_type):
|
|||
# Save the data to the blockdev directly
|
||||
try:
|
||||
data.save(dest_blockdev)
|
||||
except:
|
||||
except Exception:
|
||||
output = {
|
||||
'message': "Failed to write image file to volume."
|
||||
}
|
||||
|
@ -1457,7 +1457,7 @@ def ceph_volume_upload(pool, volume, img_type):
|
|||
def ova_stream_factory(total_content_length, filename, content_type, content_length=None):
|
||||
return open(temp_blockdev, 'wb')
|
||||
parse_form_data(flask.request.environ, stream_factory=ova_stream_factory)
|
||||
except:
|
||||
except Exception:
|
||||
output = {
|
||||
'message': "Failed to upload or write image file to temporary volume."
|
||||
}
|
||||
|
|
|
@ -233,7 +233,7 @@ def upload_ova(pool, name, ova_size):
|
|||
def ova_stream_factory(total_content_length, filename, content_type, content_length=None):
|
||||
return open(ova_blockdev, 'wb')
|
||||
parse_form_data(flask.request.environ, stream_factory=ova_stream_factory)
|
||||
except:
|
||||
except Exception:
|
||||
output = {
|
||||
'message': "Failed to upload or write OVA file to temporary volume."
|
||||
}
|
||||
|
@ -329,7 +329,7 @@ def upload_ova(pool, name, ova_size):
|
|||
vmdk_file.close()
|
||||
# Perform an OS-level sync
|
||||
pvc_common.run_os_command('sync')
|
||||
except:
|
||||
except Exception:
|
||||
output = {
|
||||
'message': "Failed to write image file '{}' to temporary volume.".format(disk.get('src'))
|
||||
}
|
||||
|
@ -493,7 +493,7 @@ class OVFParser(object):
|
|||
for item in hardware_list:
|
||||
try:
|
||||
item_type = self.RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)).text]
|
||||
except:
|
||||
except Exception:
|
||||
continue
|
||||
quantity = item.find("{{{rasd}}}VirtualQuantity".format(rasd=self.RASD_SCHEMA))
|
||||
if quantity is None:
|
||||
|
|
|
@ -49,7 +49,7 @@ def strtobool(stringv):
|
|||
return bool(stringv)
|
||||
try:
|
||||
return bool(dustrtobool(stringv))
|
||||
except:
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
#
|
||||
|
@ -370,7 +370,7 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
|
|||
if vcpu_count is not None:
|
||||
try:
|
||||
vcpu_count = int(vcpu_count)
|
||||
except:
|
||||
except Exception:
|
||||
retmsg = { 'message': 'The vcpus value must be an integer.' }
|
||||
retcode = 400
|
||||
return retmsg, retcode
|
||||
|
@ -379,7 +379,7 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
|
|||
if vram_mb is not None:
|
||||
try:
|
||||
vram_mb = int(vram_mb)
|
||||
except:
|
||||
except Exception:
|
||||
retmsg = { 'message': 'The vram value must be an integer.' }
|
||||
retcode = 400
|
||||
return retmsg, retcode
|
||||
|
@ -388,7 +388,7 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
|
|||
if serial is not None:
|
||||
try:
|
||||
serial = bool(strtobool(serial))
|
||||
except:
|
||||
except Exception:
|
||||
retmsg = { 'message': 'The serial value must be a boolean.' }
|
||||
retcode = 400
|
||||
return retmsg, retcode
|
||||
|
@ -397,7 +397,7 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
|
|||
if vnc is not None:
|
||||
try:
|
||||
vnc = bool(strtobool(vnc))
|
||||
except:
|
||||
except Exception:
|
||||
retmsg = { 'message': 'The vnc value must be a boolean.' }
|
||||
retcode = 400
|
||||
return retmsg, retcode
|
||||
|
@ -415,7 +415,7 @@ def modify_template_system(name, vcpu_count=None, vram_mb=None, serial=None, vnc
|
|||
if node_autostart is not None:
|
||||
try:
|
||||
node_autostart = bool(strtobool(node_autostart))
|
||||
except:
|
||||
except Exception:
|
||||
retmsg = { 'message': 'The node_autostart value must be a boolean.' }
|
||||
retcode = 400
|
||||
fields.append({'field': 'node_autostart', 'data': node_autostart})
|
||||
|
@ -1044,13 +1044,13 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
|
|||
# Phase 0 - connect to databases
|
||||
try:
|
||||
db_conn, db_cur = open_database(config)
|
||||
except:
|
||||
except Exception:
|
||||
print('FATAL - failed to connect to Postgres')
|
||||
raise Exception
|
||||
|
||||
try:
|
||||
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
||||
except:
|
||||
except Exception:
|
||||
print('FATAL - failed to connect to Zookeeper')
|
||||
raise Exception
|
||||
|
||||
|
@ -1212,7 +1212,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
|
|||
pool_information = pvc_ceph.getPoolInformation(zk_conn, pool)
|
||||
if not pool_information:
|
||||
raise
|
||||
except:
|
||||
except Exception:
|
||||
raise ClusterError('Pool "{}" is not present on the cluster.'.format(pool))
|
||||
pool_free_space_gb = int(pool_information['stats']['free_bytes'] / 1024 / 1024 / 1024)
|
||||
pool_vm_usage_gb = int(pools[pool])
|
||||
|
|
|
@ -1471,7 +1471,7 @@ def format_info_benchmark(config, benchmark_information):
|
|||
for element in benchmark_details[test]['bandwidth']:
|
||||
try:
|
||||
_element_length = len(format_bytes_tohuman(int(float(benchmark_details[test]['bandwidth'][element]))))
|
||||
except:
|
||||
except Exception:
|
||||
_element_length = len(benchmark_details[test]['bandwidth'][element])
|
||||
if _element_length > bandwidth_column_length:
|
||||
bandwidth_column_length = _element_length
|
||||
|
@ -1479,7 +1479,7 @@ def format_info_benchmark(config, benchmark_information):
|
|||
for element in benchmark_details[test]['iops']:
|
||||
try:
|
||||
_element_length = len(format_ops_tohuman(int(float(benchmark_details[test]['iops'][element]))))
|
||||
except:
|
||||
except Exception:
|
||||
_element_length = len(benchmark_details[test]['iops'][element])
|
||||
if _element_length > iops_column_length:
|
||||
iops_column_length = _element_length
|
||||
|
|
|
@ -301,7 +301,7 @@ def follow_console_log(config, vm, lines=10):
|
|||
try:
|
||||
response = call_api(config, 'get', '/vm/{vm}/console'.format(vm=vm), params=params)
|
||||
new_console_log = response.json()['data']
|
||||
except:
|
||||
except Exception:
|
||||
break
|
||||
# Split the new and old log strings into constitutent lines
|
||||
old_console_loglines = console_log.split('\n')
|
||||
|
|
|
@ -400,7 +400,7 @@ def node_secondary(node, wait):
|
|||
break
|
||||
else:
|
||||
time.sleep(0.5)
|
||||
except:
|
||||
except Exception:
|
||||
time.sleep(0.5)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
@ -444,7 +444,7 @@ def node_primary(node, wait):
|
|||
break
|
||||
else:
|
||||
time.sleep(0.5)
|
||||
except:
|
||||
except Exception:
|
||||
time.sleep(0.5)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
@ -615,7 +615,7 @@ def vm_define(vmconfig, target_node, node_limit, node_selector, node_autostart,
|
|||
try:
|
||||
xml_data = etree.fromstring(vmconfig_data)
|
||||
new_cfg = etree.tostring(xml_data, pretty_print=True).decode('utf8')
|
||||
except:
|
||||
except Exception:
|
||||
cleanup(False, 'Error: XML is malformed or invalid')
|
||||
|
||||
retcode, retmsg = pvc_vm.vm_define(config, new_cfg, target_node, node_limit, node_selector, node_autostart, migration_method)
|
||||
|
@ -771,7 +771,7 @@ def vm_undefine(domain, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Undefine VM {}'.format(domain), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_vm.vm_remove(config, domain, delete_disks=False)
|
||||
|
@ -797,7 +797,7 @@ def vm_remove(domain, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Undefine VM {} and remove all disks'.format(domain), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_vm.vm_remove(config, domain, delete_disks=True)
|
||||
|
@ -1296,7 +1296,7 @@ def net_remove(net, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove network {}'.format(net), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_network.net_remove(config, net)
|
||||
|
@ -1399,7 +1399,7 @@ def net_dhcp_remove(net, macaddr, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove DHCP lease for {} in network {}'.format(macaddr, net), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_network.net_dhcp_remove(config, net, macaddr)
|
||||
|
@ -1513,7 +1513,7 @@ def net_acl_remove(net, rule, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove ACL {} in network {}'.format(rule, net), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_network.net_acl_remove(config, net, rule)
|
||||
|
@ -1621,7 +1621,7 @@ def ceph_benchmark_run(pool):
|
|||
"""
|
||||
try:
|
||||
click.confirm('NOTE: Storage benchmarks generate significant load on the cluster and can take a very long time to complete on slow storage. They should be run sparingly. Continue', prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_benchmark_run(config, pool)
|
||||
|
@ -1700,7 +1700,7 @@ def ceph_osd_add(node, device, weight, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Destroy all data and create a new OSD on {}:{}'.format(node, device), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_osd_add(config, node, device, weight)
|
||||
|
@ -1728,7 +1728,7 @@ def ceph_osd_remove(osdid, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove OSD {}'.format(osdid), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_osd_remove(config, osdid)
|
||||
|
@ -1883,7 +1883,7 @@ def ceph_pool_remove(name, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove RBD pool {}'.format(name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_pool_remove(config, name)
|
||||
|
@ -1998,7 +1998,7 @@ def ceph_volume_remove(pool, name, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove volume {}/{}'.format(pool, name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_volume_remove(config, pool, name)
|
||||
|
@ -2173,7 +2173,7 @@ def ceph_volume_snapshot_remove(pool, volume, name, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove snapshot {} for volume {}/{}'.format(name, pool, volume), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_ceph.ceph_snapshot_remove(config, pool, volume, name)
|
||||
|
@ -2439,7 +2439,7 @@ def provisioner_template_system_remove(name, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove system template {}'.format(name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retdata = pvc_provisioner.template_remove(config, name, template_type='system')
|
||||
|
@ -2542,7 +2542,7 @@ def provisioner_template_network_remove(name, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove network template {}'.format(name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retdata = pvc_provisioner.template_remove(config, name, template_type='network')
|
||||
|
@ -2602,7 +2602,7 @@ def provisioner_template_network_vni_remove(name, vni, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove VNI {} from network template {}'.format(vni, name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retdata = pvc_provisioner.template_element_remove(config, name, vni, element_type='net', template_type='network')
|
||||
|
@ -2673,7 +2673,7 @@ def provisioner_template_storage_remove(name, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove storage template {}'.format(name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retdata = pvc_provisioner.template_remove(config, name, template_type='storage')
|
||||
|
@ -2788,7 +2788,7 @@ def provisioner_template_storage_disk_remove(name, disk, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove disk {} from storage template {}'.format(disk, name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retdata = pvc_provisioner.template_element_remove(config, name, disk, element_type='disk', template_type='storage')
|
||||
|
@ -2972,7 +2972,7 @@ def provisioner_userdata_remove(name, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove userdata document {}'.format(name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retdata = pvc_provisioner.userdata_remove(config, name)
|
||||
|
@ -3145,7 +3145,7 @@ def provisioner_script_remove(name, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove provisioning script {}'.format(name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
params = dict()
|
||||
|
||||
|
@ -3238,7 +3238,7 @@ def provisioner_ova_remove(name, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove OVA image {}'.format(name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retdata = pvc_provisioner.ova_remove(config, name)
|
||||
|
@ -3418,7 +3418,7 @@ def provisioner_profile_remove(name, confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove profile {}'.format(name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retdata = pvc_provisioner.profile_remove(config, name)
|
||||
|
@ -3614,7 +3614,7 @@ def init_cluster(confirm_flag):
|
|||
if not confirm_flag:
|
||||
try:
|
||||
click.confirm('Remove all existing cluster data from coordinators and initialize a new cluster'.format(name), prompt_suffix='? ', abort=True)
|
||||
except:
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
# Easter-egg
|
||||
|
|
|
@ -223,7 +223,7 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
|
|||
time.sleep(5)
|
||||
else:
|
||||
raise
|
||||
except:
|
||||
except Exception:
|
||||
break
|
||||
|
||||
# 3. Stop the OSD process and wait for it to be terminated
|
||||
|
|
|
@ -337,7 +337,7 @@ class AXFRDaemonInstance(object):
|
|||
# Set up our SQL cursor
|
||||
try:
|
||||
sql_curs = self.sql_conn.cursor()
|
||||
except:
|
||||
except Exception:
|
||||
time.sleep(0.5)
|
||||
continue
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ def stopKeepaliveTimer():
|
|||
try:
|
||||
update_timer.shutdown()
|
||||
logger.out('Stopping keepalive timer', state='s')
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
###############################################################################
|
||||
|
@ -100,7 +100,7 @@ def stopKeepaliveTimer():
|
|||
# Get the config file variable from the environment
|
||||
try:
|
||||
pvcnoded_config_file = os.environ['PVCD_CONFIG_FILE']
|
||||
except:
|
||||
except Exception:
|
||||
print('ERROR: The "PVCD_CONFIG_FILE" environment variable must be set before starting pvcnoded.')
|
||||
exit(1)
|
||||
|
||||
|
@ -176,7 +176,7 @@ def readConfig(pvcnoded_config_file, myhostname):
|
|||
config_debug = {
|
||||
'debug': o_config['pvc']['debug']
|
||||
}
|
||||
except:
|
||||
except Exception:
|
||||
config_debug = {
|
||||
'debug': False
|
||||
}
|
||||
|
@ -535,7 +535,7 @@ def zk_listener(state):
|
|||
_zk_conn = kazoo.client.KazooClient(hosts=config['coordinators'])
|
||||
try:
|
||||
_zk_conn.start()
|
||||
except:
|
||||
except Exception:
|
||||
del _zk_conn
|
||||
continue
|
||||
|
||||
|
@ -586,7 +586,7 @@ def cleanup():
|
|||
logger.out('Waiting for primary migration', state='s')
|
||||
while this_node.router_state != 'secondary':
|
||||
time.sleep(0.5)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Stop keepalive thread
|
||||
|
@ -610,7 +610,7 @@ def cleanup():
|
|||
try:
|
||||
zk_conn.stop()
|
||||
zk_conn.close()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
logger.out('Terminated pvc daemon', state='s')
|
||||
|
@ -829,7 +829,7 @@ def set_maintenance(_maintenance, stat, event=''):
|
|||
global maintenance
|
||||
try:
|
||||
maintenance = bool(strtobool(_maintenance.decode('ascii')))
|
||||
except:
|
||||
except Exception:
|
||||
maintenance = False
|
||||
|
||||
# Primary node
|
||||
|
@ -1371,7 +1371,7 @@ def collect_vm_stats(queue):
|
|||
if debug:
|
||||
try:
|
||||
logger.out("Failed getting VM information for {}: {}".format(domain.name(), e), state='d', prefix='vm-thread')
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
continue
|
||||
|
||||
|
@ -1462,7 +1462,7 @@ def node_keepalive():
|
|||
try:
|
||||
if zkhandler.readdata(zk_conn, '/upstream_ip') != config['upstream_floating_ip']:
|
||||
raise
|
||||
except:
|
||||
except Exception:
|
||||
zkhandler.writedata(zk_conn, {'/upstream_ip': config['upstream_floating_ip']})
|
||||
|
||||
# Get past state and update if needed
|
||||
|
@ -1517,7 +1517,7 @@ def node_keepalive():
|
|||
this_node.memalloc = vm_thread_queue.get()
|
||||
this_node.memprov = vm_thread_queue.get()
|
||||
this_node.vcpualloc = vm_thread_queue.get()
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
this_node.domains_count = 0
|
||||
|
@ -1530,7 +1530,7 @@ def node_keepalive():
|
|||
ceph_health_colour = ceph_thread_queue.get()
|
||||
ceph_health = ceph_thread_queue.get()
|
||||
osds_this_node = ceph_thread_queue.get()
|
||||
except:
|
||||
except Exception:
|
||||
ceph_health_colour = fmt_cyan
|
||||
ceph_health = 'UNKNOWN'
|
||||
osds_this_node = '?'
|
||||
|
@ -1552,7 +1552,7 @@ def node_keepalive():
|
|||
'/nodes/{}/runningdomains'.format(this_node.name): ' '.join(this_node.domain_list),
|
||||
'/nodes/{}/keepalive'.format(this_node.name): str(keepalive_time)
|
||||
})
|
||||
except:
|
||||
except Exception:
|
||||
logger.out('Failed to set keepalive data', state='e')
|
||||
return
|
||||
|
||||
|
@ -1623,7 +1623,7 @@ def node_keepalive():
|
|||
node_daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node_name))
|
||||
node_domain_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node_name))
|
||||
node_keepalive = int(zkhandler.readdata(zk_conn, '/nodes/{}/keepalive'.format(node_name)))
|
||||
except:
|
||||
except Exception:
|
||||
node_daemon_state = 'unknown'
|
||||
node_domain_state = 'unknown'
|
||||
node_keepalive = 0
|
||||
|
@ -1654,5 +1654,5 @@ update_timer = startKeepaliveTimer()
|
|||
while True:
|
||||
try:
|
||||
time.sleep(1)
|
||||
except:
|
||||
except Exception:
|
||||
break
|
||||
|
|
|
@ -170,7 +170,7 @@ class MetadataAPIInstance(object):
|
|||
try:
|
||||
if information.get('ip4_address', None) == source_address:
|
||||
host_information = information
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Get our real information on the host; now we can start querying about it
|
||||
|
@ -188,7 +188,7 @@ class MetadataAPIInstance(object):
|
|||
if network.get('mac', None) == client_macaddr:
|
||||
vm_name = vm.get('name')
|
||||
vm_details = vm
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return vm_details
|
||||
|
|
|
@ -608,7 +608,7 @@ class NodeInstance(object):
|
|||
try:
|
||||
lock.acquire(timeout=60) # Don't wait forever and completely block us
|
||||
self.logger.out('Acquired read lock for synchronization phase G', state='o')
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
self.logger.out('Releasing read lock for synchronization phase G', state='i')
|
||||
lock.release()
|
||||
|
@ -698,7 +698,7 @@ class NodeInstance(object):
|
|||
|
||||
try:
|
||||
last_node = zkhandler.readdata(self.zk_conn, '/domains/{}/lastnode'.format(dom_uuid))
|
||||
except:
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if last_node != self.name:
|
||||
|
|
|
@ -112,11 +112,11 @@ class VMInstance(object):
|
|||
self.last_lastnode = zkhandler.readdata(self.zk_conn, '/domains/{}/lastnode'.format(self.domuuid))
|
||||
try:
|
||||
self.pinpolicy = zkhandler.readdata(self.zk_conn, '/domains/{}/pinpolicy'.format(self.domuuid))
|
||||
except:
|
||||
except Exception:
|
||||
self.pinpolicy = "none"
|
||||
try:
|
||||
self.migration_method = zkhandler.readdata(self.zk_conn, '/domains/{}/migration_method'.format(self.domuuid))
|
||||
except:
|
||||
except Exception:
|
||||
self.migration_method = 'none'
|
||||
|
||||
# These will all be set later
|
||||
|
@ -166,7 +166,7 @@ class VMInstance(object):
|
|||
else:
|
||||
domain_information = daemon_common.getInformationFromXML(self.zk_conn, self.domuuid)
|
||||
memory = int(domain_information['memory'])
|
||||
except:
|
||||
except Exception:
|
||||
memory = 0
|
||||
|
||||
return memory
|
||||
|
@ -174,7 +174,7 @@ class VMInstance(object):
|
|||
def getvcpus(self):
|
||||
try:
|
||||
vcpus = int(self.dom.info()[3])
|
||||
except:
|
||||
except Exception:
|
||||
vcpus = 0
|
||||
|
||||
return vcpus
|
||||
|
@ -220,7 +220,7 @@ class VMInstance(object):
|
|||
try:
|
||||
self.dom = self.lookupByUUID(self.domuuid)
|
||||
curstate = self.dom.state()[0]
|
||||
except:
|
||||
except Exception:
|
||||
curstate = 'notstart'
|
||||
|
||||
if curstate == libvirt.VIR_DOMAIN_RUNNING:
|
||||
|
@ -325,7 +325,7 @@ class VMInstance(object):
|
|||
|
||||
try:
|
||||
lvdomstate = self.dom.state()[0]
|
||||
except:
|
||||
except Exception:
|
||||
lvdomstate = None
|
||||
|
||||
if lvdomstate != libvirt.VIR_DOMAIN_RUNNING:
|
||||
|
@ -435,7 +435,7 @@ class VMInstance(object):
|
|||
dest_lv_conn = libvirt.open(dest_lv)
|
||||
if not dest_lv_conn:
|
||||
raise
|
||||
except:
|
||||
except Exception:
|
||||
self.logger.out('Failed to open connection to {}; aborting live migration.'.format(dest_lv), state='e', prefix='Domain {}'.format(self.domuuid))
|
||||
return False
|
||||
|
||||
|
@ -643,7 +643,7 @@ class VMInstance(object):
|
|||
running, reason = self.dom.state()
|
||||
else:
|
||||
raise
|
||||
except:
|
||||
except Exception:
|
||||
running = libvirt.VIR_DOMAIN_NOSTATE
|
||||
|
||||
self.logger.out('VM state change for "{}": {} {}'.format(self.domuuid, self.state, self.node), state='i')
|
||||
|
@ -761,7 +761,7 @@ class VMInstance(object):
|
|||
dom = lv_conn.lookupByUUID(buuid)
|
||||
|
||||
# Fail
|
||||
except:
|
||||
except Exception:
|
||||
dom = None
|
||||
|
||||
# After everything
|
||||
|
|
|
@ -409,7 +409,7 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
|||
try:
|
||||
os.remove(filename)
|
||||
self.dhcp_server_daemon.signal('hup')
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def updateFirewallRules(self):
|
||||
|
@ -802,7 +802,7 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
|||
|
||||
try:
|
||||
os.remove(self.nftables_netconf_filename)
|
||||
except:
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Reload firewall rules
|
||||
|
|
|
@ -94,11 +94,11 @@ def run_os_command(command_string, background=False, environment=None, timeout=N
|
|||
|
||||
try:
|
||||
stdout = command_output.stdout.decode('ascii')
|
||||
except:
|
||||
except Exception:
|
||||
stdout = ''
|
||||
try:
|
||||
stderr = command_output.stderr.decode('ascii')
|
||||
except:
|
||||
except Exception:
|
||||
stderr = ''
|
||||
return retcode, stdout, stderr
|
||||
|
||||
|
@ -144,7 +144,7 @@ def findTargetNode(zk_conn, config, logger, dom_uuid):
|
|||
node_limit = zkhandler.readdata(zk_conn, '/domains/{}/node_limit'.format(dom_uuid)).split(',')
|
||||
if not any(node_limit):
|
||||
node_limit = ''
|
||||
except:
|
||||
except Exception:
|
||||
node_limit = ''
|
||||
zkhandler.writedata(zk_conn, { '/domains/{}/node_limit'.format(dom_uuid): '' })
|
||||
|
||||
|
|
|
@ -47,14 +47,14 @@ def get_zookeeper_key():
|
|||
def get_lease_expiry():
|
||||
try:
|
||||
expiry = os.environ['DNSMASQ_LEASE_EXPIRES']
|
||||
except:
|
||||
except Exception:
|
||||
expiry = '0'
|
||||
return expiry
|
||||
|
||||
def get_client_id():
|
||||
try:
|
||||
client_id = os.environ['DNSMASQ_CLIENT_ID']
|
||||
except:
|
||||
except Exception:
|
||||
client_id = '*'
|
||||
return client_id
|
||||
|
||||
|
@ -62,7 +62,7 @@ def connect_zookeeper():
|
|||
# We expect the environ to contain the config file
|
||||
try:
|
||||
pvcnoded_config_file = os.environ['PVCD_CONFIG_FILE']
|
||||
except:
|
||||
except Exception:
|
||||
# Default place
|
||||
pvcnoded_config_file = '/etc/pvc/pvcnoded.yaml'
|
||||
|
||||
|
|
Loading…
Reference in New Issue