Lint: W293 blank line contains whitespace

This commit is contained in:
Joshua Boniface 2020-11-06 19:05:48 -05:00
parent 2deee9a329
commit ebf254f62d
16 changed files with 68 additions and 68 deletions

View File

@ -446,7 +446,7 @@ def run_benchmark(self, pool):
"minfault": results[150]
}
}
# Phase 3 - cleanup
self.update_state(state='RUNNING', meta={'current': 3, 'total': 3, 'status': 'Cleaning up and storing results'})
time.sleep(1)

View File

@ -514,7 +514,7 @@ class OVFParser(object):
"{{{schema}}}VirtualHardwareSection/{{{schema}}}StorageItem".format(schema=self.OVF_SCHEMA)
)
disk_list = []
for item in hardware_list:
item_type = None

View File

@ -1060,13 +1060,13 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
# * Assemble a VM configuration dictionary
self.update_state(state='RUNNING', meta={'current': 1, 'total': 10, 'status': 'Collecting configuration'})
time.sleep(1)
vm_id = re.findall(r'/(\d+)$/', vm_name)
if not vm_id:
vm_id = 0
else:
vm_id = vm_id[0]
vm_data = dict()
# Get the profile information
@ -1078,7 +1078,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
vm_data['script_arguments'] = profile_data.get('arguments').split('|')
else:
vm_data['script_arguments'] = []
if profile_data.get('profile_type') == 'ova':
is_ova_install = True
is_script_install = False # By definition
@ -1230,7 +1230,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
continue
if volume['filesystem'] and volume['filesystem'] not in used_filesystems:
used_filesystems.append(volume['filesystem'])
for filesystem in used_filesystems:
if filesystem == 'swap':
retcode, stdout, stderr = pvc_common.run_os_command("which mkswap")
@ -1416,7 +1416,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
# * Create each Ceph storage volume for the disks
self.update_state(state='RUNNING', meta={'current': 6, 'total': 10, 'status': 'Creating storage volumes'})
time.sleep(1)
for volume in vm_data['volumes']:
if volume.get('source_volume') is not None:
success, message = pvc_ceph.clone_volume(zk_conn, volume['pool'], "{}_{}".format(vm_name, volume['disk_id']), volume['source_volume'])
@ -1477,7 +1477,7 @@ def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True, script_r
else:
if volume.get('source_volume') is not None:
continue
if volume.get('filesystem') is None:
continue

View File

@ -115,7 +115,7 @@ def ceph_status(config):
return True, response.json()
else:
return False, response.json().get('message', '')
def ceph_util(config):
"""
Get utilization of the Ceph cluster
@ -130,7 +130,7 @@ def ceph_util(config):
return True, response.json()
else:
return False, response.json().get('message', '')
def format_raw_output(status_data):
ainformation = list()
ainformation.append('{bold}Ceph cluster {stype} (primary node {end}{blue}{primary}{end}{bold}){end}\n'.format(bold=ansiprint.bold(), end=ansiprint.end(), blue=ansiprint.blue(), stype=status_data['type'], primary=status_data['primary_node']))
@ -1272,7 +1272,7 @@ def ceph_benchmark_run(config, pool):
else:
retvalue = False
retdata = response.json().get('message', '')
return retvalue, retdata
def ceph_benchmark_list(config, job):
@ -1303,7 +1303,7 @@ def ceph_benchmark_list(config, job):
def format_list_benchmark(config, benchmark_information):
benchmark_list_output = []
benchmark_id_length = 3
benchmark_job_length = 20
benchmark_bandwidth_length = dict()
@ -1392,13 +1392,13 @@ def format_list_benchmark(config, benchmark_information):
benchmark_data = json.loads(benchmark['benchmark_result'])
benchmark_bandwidth[test] = format_bytes_tohuman(int(benchmark_data[test]['overall']['bandwidth']) * 1024)
benchmark_iops[test] = format_ops_tohuman(int(benchmark_data[test]['overall']['iops']))
seq_benchmark_bandwidth = "{} / {}".format(benchmark_bandwidth['seq_read'], benchmark_bandwidth['seq_write'])
seq_benchmark_iops = "{} / {}".format(benchmark_iops['seq_read'], benchmark_iops['seq_write'])
rand_benchmark_bandwidth = "{} / {}".format(benchmark_bandwidth['rand_read_4K'], benchmark_bandwidth['rand_write_4K'])
rand_benchmark_iops = "{} / {}".format(benchmark_iops['rand_read_4K'], benchmark_iops['rand_write_4K'])
benchmark_list_output.append('{bold}\
{benchmark_job: <{benchmark_job_length}} \
{seq_benchmark_bandwidth: <{seq_benchmark_bandwidth_length}} \

View File

@ -92,7 +92,7 @@ def net_list(config, limit):
def net_add(config, vni, description, nettype, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, dhcp4_flag, dhcp4_start, dhcp4_end):
"""
Add new network
API endpoint: POST /api/v1/network
API arguments: lots
API schema: {"message":"{data}"}
@ -123,7 +123,7 @@ def net_add(config, vni, description, nettype, domain, name_servers, ip4_network
def net_modify(config, net, description, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, dhcp4_flag, dhcp4_start, dhcp4_end):
"""
Modify a network
API endpoint: POST /api/v1/network/{net}
API arguments: lots
API schema: {"message":"{data}"}
@ -162,7 +162,7 @@ def net_modify(config, net, description, domain, name_servers, ip4_network, ip4_
def net_remove(config, net):
"""
Remove a network
API endpoint: DELETE /api/v1/network/{net}
API arguments:
API schema: {"message":"{data}"}
@ -221,7 +221,7 @@ def net_dhcp_list(config, net, limit, only_static=False):
def net_dhcp_add(config, net, ipaddr, macaddr, hostname):
"""
Add new network DHCP lease
API endpoint: POST /api/v1/network/{net}/lease
API arguments: macaddress=macaddr, ipaddress=ipaddr, hostname=hostname
API schema: {"message":"{data}"}
@ -243,7 +243,7 @@ def net_dhcp_add(config, net, ipaddr, macaddr, hostname):
def net_dhcp_remove(config, net, mac):
"""
Remove a network DHCP lease
API endpoint: DELETE /api/v1/network/{vni}/lease/{mac}
API arguments:
API schema: {"message":"{data}"}
@ -299,7 +299,7 @@ def net_acl_list(config, net, limit, direction):
def net_acl_add(config, net, direction, description, rule, order):
"""
Add new network acl
API endpoint: POST /api/v1/network/{net}/acl
API arguments: description=description, direction=direction, order=order, rule=rule
API schema: {"message":"{data}"}
@ -323,7 +323,7 @@ def net_acl_add(config, net, direction, description, rule, order):
def net_acl_remove(config, net, description):
"""
Remove a network ACL
API endpoint: DELETE /api/v1/network/{vni}/acl/{description}
API arguments:
API schema: {"message":"{data}"}

View File

@ -293,7 +293,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
node_mem_provisioned='Prov'
)
)
# Format the string (elements)
for node_information in node_list:
daemon_state_colour, coordinator_state_colour, domain_state_colour, mem_allocated_colour, mem_provisioned_colour = getOutputColours(node_information)

View File

@ -84,7 +84,7 @@ def template_add(config, params, template_type=None):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def template_modify(config, params, name, template_type):
@ -101,7 +101,7 @@ def template_modify(config, params, name, template_type):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def template_remove(config, name, template_type):
@ -118,7 +118,7 @@ def template_remove(config, name, template_type):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def template_element_add(config, name, element_id, params, element_type=None, template_type=None):
@ -135,7 +135,7 @@ def template_element_add(config, name, element_id, params, element_type=None, te
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def template_element_remove(config, name, element_id, element_type=None, template_type=None):
@ -152,7 +152,7 @@ def template_element_remove(config, name, element_id, element_type=None, templat
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def userdata_info(config, userdata):
@ -227,7 +227,7 @@ def userdata_add(config, params):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def userdata_modify(config, name, params):
@ -252,7 +252,7 @@ def userdata_modify(config, name, params):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def userdata_remove(config, name):
@ -269,7 +269,7 @@ def userdata_remove(config, name):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def script_info(config, script):
@ -344,7 +344,7 @@ def script_add(config, params):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def script_modify(config, name, params):
@ -369,7 +369,7 @@ def script_modify(config, name, params):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def script_remove(config, name):
@ -386,7 +386,7 @@ def script_remove(config, name):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def ova_info(config, name):
@ -469,7 +469,7 @@ def ova_remove(config, name):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def profile_info(config, profile):
@ -520,7 +520,7 @@ def profile_add(config, params):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def profile_modify(config, name, params):
@ -537,7 +537,7 @@ def profile_modify(config, name, params):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def profile_remove(config, name):
@ -554,7 +554,7 @@ def profile_remove(config, name):
retvalue = True
else:
retvalue = False
return retvalue, response.json().get('message', '')
def vm_create(config, name, profile, wait_flag, define_flag, start_flag, script_args):
@ -584,7 +584,7 @@ def vm_create(config, name, profile, wait_flag, define_flag, start_flag, script_
else:
retvalue = False
retdata = response.json().get('message', '')
return retvalue, retdata
def task_status(config, task_id=None, is_watching=False):

View File

@ -573,7 +573,7 @@ def format_list(config, vm_list, raw):
vm_migrated='Migrated'
)
)
# Keep track of nets we found to be valid to cut down on duplicate API hits
valid_net_list = []
# Format the string (elements)

View File

@ -1722,7 +1722,7 @@ def ceph_osd_add(node, device, weight, confirm_flag):
def ceph_osd_remove(osdid, confirm_flag):
"""
Remove a Ceph OSD with ID OSDID.
DANGER: This will completely remove the OSD from the cluster. OSDs will rebalance which may negatively affect performance or available space.
"""
if not confirm_flag:
@ -1992,7 +1992,7 @@ def ceph_volume_upload(pool, name, image_format, image_file):
def ceph_volume_remove(pool, name, confirm_flag):
"""
Remove a Ceph RBD volume with name NAME from pool POOL.
DANGER: This will completely remove the volume and all data contained in it.
"""
if not confirm_flag:
@ -2734,7 +2734,7 @@ def provisioner_template_storage_disk_add(name, disk, pool, source_volume, size,
Add a new DISK to storage template NAME.
DISK must be a Linux-style sdX/vdX disk identifier, such as "sda" or "vdb". All disks in a template must use the same identifier format.
Disks will be added to VMs in sdX/vdX order. For disks with mountpoints, ensure this order is sensible.
"""

View File

@ -603,7 +603,7 @@ def add_pool(zk_conn, name, pgs, replcfg):
retcode, stdout, stderr = common.run_os_command('ceph osd pool create {} {} replicated'.format(name, pgs))
if retcode:
return False, 'ERROR: Failed to create pool "{}" with {} PGs: {}'.format(name, pgs, stderr)
# 2. Set the size and minsize
retcode, stdout, stderr = common.run_os_command('ceph osd pool set {} size {}'.format(name, copies))
if retcode:
@ -656,7 +656,7 @@ def get_list_pool(zk_conn, limit, is_fuzzy=True):
if limit:
if not is_fuzzy:
limit = '^' + limit + '$'
for pool in full_pool_list:
if limit:
try:
@ -1231,7 +1231,7 @@ def add_snapshot(zk_conn, pool, volume, name):
'/ceph/snapshots/{}/{}/{}'.format(pool, volume, name): '',
'/ceph/snapshots/{}/{}/{}/stats'.format(pool, volume, name): '{}'
})
return True, 'Created RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool)
def rename_snapshot(zk_conn, pool, volume, name, new_name):

View File

@ -101,7 +101,7 @@ def getDomainXML(zk_conn, dom_uuid):
xml = zkhandler.readdata(zk_conn, '/domains/{}/xml'.format(dom_uuid))
except:
return None
# Parse XML using lxml.objectify
parsed_xml = lxml.objectify.fromstring(xml)
return parsed_xml
@ -208,7 +208,7 @@ def getDomainDiskList(zk_conn, dom_uuid):
disk_list = []
for disk in domain_information['disks']:
disk_list.append(disk['name'])
return disk_list
#
@ -269,7 +269,7 @@ def getInformationFromXML(zk_conn, uuid):
domain_features = getDomainCPUFeatures(parsed_xml)
domain_disks = getDomainDisks(parsed_xml, stats_data)
domain_controllers = getDomainControllers(parsed_xml)
if domain_lastnode:
domain_migrated = 'from {}'.format(domain_lastnode)
else:

View File

@ -175,7 +175,7 @@ def getDHCPLeaseInformation(zk_conn, vni, mac_address):
except kazoo.exceptions.NoNodeError:
zkhandler.readdata(zk_conn, '/networks/{}/dhcp4_reservations/{}'.format(vni, mac_address))
type_key = 'dhcp4_reservations'
hostname = zkhandler.readdata(zk_conn, '/networks/{}/{}/{}/hostname'.format(vni, type_key, mac_address))
ip4_address = zkhandler.readdata(zk_conn, '/networks/{}/{}/{}/ipaddr'.format(vni, type_key, mac_address))
if type_key == 'dhcp4_leases':

View File

@ -170,7 +170,7 @@ def ready_node(zk_conn, node, wait=False):
return False, 'ERROR: No node named "{}" is present in the cluster.'.format(node)
retmsg = 'Restoring hypervisor {} to active service.'.format(node)
# Add the new domain to Zookeeper
zkhandler.writedata(zk_conn, {
'/nodes/{}/domainstate'.format(node): 'unflush'
@ -396,7 +396,7 @@ Mem (M): {node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length
node_mem_provisioned='VMs Total'
)
)
# Format the string (elements)
for node_information in node_list:
daemon_state_colour, coordinator_state_colour, domain_state_colour = getOutputColours(node_information)

View File

@ -927,7 +927,7 @@ def format_list(zk_conn, vm_list, raw):
vm_migrated='Migrated'
)
)
# Format the string (elements)
for domain_information in vm_list:
if domain_information['state'] == 'start':

View File

@ -1143,7 +1143,7 @@ def collect_ceph_stats(queue):
'write_ops': pool['write_ops'],
'write_bytes': pool['write_bytes']
}
# Write the pool data to Zookeeper
zkhandler.writedata(zk_conn, {
'/ceph/pools/{}/stats'.format(pool['name']): str(json.dumps(pool_df))
@ -1487,13 +1487,13 @@ def node_keepalive():
vm_thread_queue = Queue()
vm_stats_thread = Thread(target=collect_vm_stats, args=(vm_thread_queue,), kwargs={})
vm_stats_thread.start()
# Run Ceph status collection in separate thread for parallelization
if enable_storage:
ceph_thread_queue = Queue()
ceph_stats_thread = Thread(target=collect_ceph_stats, args=(ceph_thread_queue,), kwargs={})
ceph_stats_thread.start()
# Get node performance statistics
this_node.memtotal = int(psutil.virtual_memory().total / 1024 / 1024)
this_node.memused = int(psutil.virtual_memory().used / 1024 / 1024)
@ -1627,7 +1627,7 @@ def node_keepalive():
node_daemon_state = 'unknown'
node_domain_state = 'unknown'
node_keepalive = 0
# Handle deadtime and fencng if needed
# (A node is considered dead when its keepalive timer is >6*keepalive_interval seconds
# out-of-date while in 'start' state)

View File

@ -49,33 +49,33 @@ class MetadataAPIInstance(object):
@self.mdapi.route('/', methods=['GET'])
def api_root():
return flask.jsonify({"message": "PVC Provisioner Metadata API version 1"}), 209
@self.mdapi.route('/<version>/meta-data/', methods=['GET'])
def api_metadata_root(version):
metadata = """instance-id\nname\nprofile"""
return metadata, 200
@self.mdapi.route('/<version>/meta-data/instance-id', methods=['GET'])
def api_metadata_instanceid(version):
source_address = flask.request.__dict__['environ']['REMOTE_ADDR']
vm_details = self.get_vm_details(source_address)
instance_id = vm_details.get('uuid', None)
return instance_id, 200
@self.mdapi.route('/<version>/meta-data/name', methods=['GET'])
def api_metadata_hostname(version):
source_address = flask.request.__dict__['environ']['REMOTE_ADDR']
vm_details = self.get_vm_details(source_address)
vm_name = vm_details.get('name', None)
return vm_name, 200
@self.mdapi.route('/<version>/meta-data/profile', methods=['GET'])
def api_metadata_profile(version):
source_address = flask.request.__dict__['environ']['REMOTE_ADDR']
vm_details = self.get_vm_details(source_address)
vm_profile = vm_details.get('profile', None)
return vm_profile, 200
@self.mdapi.route('/<version>/user-data', methods=['GET'])
def api_userdata(version):
source_address = flask.request.__dict__['environ']['REMOTE_ADDR']
@ -88,7 +88,7 @@ class MetadataAPIInstance(object):
else:
userdata = None
return flask.Response(userdata)
def launch_wsgi(self):
try:
self.md_http_server = gevent.pywsgi.WSGIServer(
@ -159,7 +159,7 @@ class MetadataAPIInstance(object):
def get_vm_details(self, source_address):
# Start connection to Zookeeper
_discard, networks = pvc_network.get_list(self.zk_conn, None)
# Figure out which server this is via the DHCP address
host_information = dict()
networks_managed = (x for x in networks if x.get('type') == 'managed')
@ -172,12 +172,12 @@ class MetadataAPIInstance(object):
host_information = information
except Exception:
pass
# Get our real information on the host; now we can start querying about it
client_hostname = host_information.get('hostname', None)
client_macaddr = host_information.get('mac_address', None)
client_ipaddr = host_information.get('ip4_address', None)
# Find the VM with that MAC address - we can't assume that the hostname is actually right
_discard, vm_list = pvc_vm.get_list(self.zk_conn, None, None, None)
vm_name = None
@ -190,6 +190,6 @@ class MetadataAPIInstance(object):
vm_details = vm
except Exception:
pass
return vm_details