Remove extra whitespaces on blank lines
This commit is contained in:
parent
08cb16bfbc
commit
3e591bd09e
|
@ -123,7 +123,7 @@ def add_osd(zk_conn, logger, node, device, weight):
|
||||||
for line in stdout.split('\n'):
|
for line in stdout.split('\n'):
|
||||||
if 'osd fsid' in line:
|
if 'osd fsid' in line:
|
||||||
osd_fsid = line.split()[-1]
|
osd_fsid = line.split()[-1]
|
||||||
|
|
||||||
if not osd_fsid:
|
if not osd_fsid:
|
||||||
print('ceph-volume lvm list')
|
print('ceph-volume lvm list')
|
||||||
print('Could not find OSD fsid in data:')
|
print('Could not find OSD fsid in data:')
|
||||||
|
@ -199,7 +199,7 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
|
||||||
if not osd_id in osd_list:
|
if not osd_id in osd_list:
|
||||||
logger.out('Could not find OSD {} in the cluster'.format(osd_id), state='e')
|
logger.out('Could not find OSD {} in the cluster'.format(osd_id), state='e')
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# 1. Set the OSD out so it will flush
|
# 1. Set the OSD out so it will flush
|
||||||
logger.out('Setting out OSD disk with ID {}'.format(osd_id), state='i')
|
logger.out('Setting out OSD disk with ID {}'.format(osd_id), state='i')
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd out {}'.format(osd_id))
|
retcode, stdout, stderr = common.run_os_command('ceph osd out {}'.format(osd_id))
|
||||||
|
@ -208,7 +208,7 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
|
||||||
print(stdout)
|
print(stdout)
|
||||||
print(stderr)
|
print(stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# 2. Wait for the OSD to flush
|
# 2. Wait for the OSD to flush
|
||||||
logger.out('Flushing OSD disk with ID {}'.format(osd_id), state='i')
|
logger.out('Flushing OSD disk with ID {}'.format(osd_id), state='i')
|
||||||
osd_string = str()
|
osd_string = str()
|
||||||
|
@ -258,7 +258,7 @@ def remove_osd(zk_conn, logger, osd_id, osd_obj):
|
||||||
print(stdout)
|
print(stdout)
|
||||||
print(stderr)
|
print(stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# 6. Purge the OSD from Ceph
|
# 6. Purge the OSD from Ceph
|
||||||
logger.out('Purging OSD disk with ID {}'.format(osd_id), state='i')
|
logger.out('Purging OSD disk with ID {}'.format(osd_id), state='i')
|
||||||
retcode, stdout, stderr = common.run_os_command('ceph osd purge {} --yes-i-really-mean-it'.format(osd_id))
|
retcode, stdout, stderr = common.run_os_command('ceph osd purge {} --yes-i-really-mean-it'.format(osd_id))
|
||||||
|
|
|
@ -135,7 +135,7 @@ class PowerDNSInstance(object):
|
||||||
'Successfully started PowerDNS zone aggregator',
|
'Successfully started PowerDNS zone aggregator',
|
||||||
state='o'
|
state='o'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
if self.dns_server_daemon:
|
if self.dns_server_daemon:
|
||||||
|
@ -232,7 +232,7 @@ class DNSNetworkInstance(object):
|
||||||
""",
|
""",
|
||||||
(domain_id, network_domain, ns_server, 'NS', 86400, 0)
|
(domain_id, network_domain, ns_server, 'NS', 86400, 0)
|
||||||
)
|
)
|
||||||
|
|
||||||
self.sql_conn.commit()
|
self.sql_conn.commit()
|
||||||
self.sql_conn.close()
|
self.sql_conn.close()
|
||||||
self.sql_conn = None
|
self.sql_conn = None
|
||||||
|
@ -315,7 +315,7 @@ class AXFRDaemonInstance(object):
|
||||||
self.config['pdns_postgresql_password']
|
self.config['pdns_postgresql_password']
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Start the thread
|
# Start the thread
|
||||||
self.thread.start()
|
self.thread.start()
|
||||||
|
|
||||||
|
@ -471,7 +471,7 @@ class AXFRDaemonInstance(object):
|
||||||
|
|
||||||
# Commit all the previous changes
|
# Commit all the previous changes
|
||||||
self.sql_conn.commit()
|
self.sql_conn.commit()
|
||||||
|
|
||||||
# Reload the domain
|
# Reload the domain
|
||||||
common.run_os_command(
|
common.run_os_command(
|
||||||
'/usr/bin/pdns_control --socket-dir={} reload {}'.format(
|
'/usr/bin/pdns_control --socket-dir={} reload {}'.format(
|
||||||
|
|
|
@ -848,12 +848,12 @@ if enable_storage:
|
||||||
@zk_conn.ChildrenWatch('/ceph/volumes/{}'.format(pool))
|
@zk_conn.ChildrenWatch('/ceph/volumes/{}'.format(pool))
|
||||||
def update_volumes(new_volume_list):
|
def update_volumes(new_volume_list):
|
||||||
global volume_list, d_volume
|
global volume_list, d_volume
|
||||||
|
|
||||||
# Add any missing Volumes to the list
|
# Add any missing Volumes to the list
|
||||||
for volume in new_volume_list:
|
for volume in new_volume_list:
|
||||||
if not volume in volume_list[pool]:
|
if not volume in volume_list[pool]:
|
||||||
d_volume[pool][volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume)
|
d_volume[pool][volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume)
|
||||||
|
|
||||||
# Remove any deleted Volumes from the list
|
# Remove any deleted Volumes from the list
|
||||||
for volume in volume_list[pool]:
|
for volume in volume_list[pool]:
|
||||||
if not volume in new_volume_list:
|
if not volume in new_volume_list:
|
||||||
|
|
|
@ -141,7 +141,7 @@ class DomainInstance(object):
|
||||||
self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}:'.format(self.domuuid))
|
self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}:'.format(self.domuuid))
|
||||||
self.instart = False
|
self.instart = False
|
||||||
return
|
return
|
||||||
|
|
||||||
# Try to get the current state in case it's already running
|
# Try to get the current state in case it's already running
|
||||||
try:
|
try:
|
||||||
self.dom = self.lookupByUUID(self.domuuid)
|
self.dom = self.lookupByUUID(self.domuuid)
|
||||||
|
@ -172,7 +172,7 @@ class DomainInstance(object):
|
||||||
lv_conn.close()
|
lv_conn.close()
|
||||||
|
|
||||||
self.instart = False
|
self.instart = False
|
||||||
|
|
||||||
# Restart the VM
|
# Restart the VM
|
||||||
def restart_vm(self):
|
def restart_vm(self):
|
||||||
self.logger.out('Restarting VM', state='i', prefix='Domain {}:'.format(self.domuuid))
|
self.logger.out('Restarting VM', state='i', prefix='Domain {}:'.format(self.domuuid))
|
||||||
|
@ -185,7 +185,7 @@ class DomainInstance(object):
|
||||||
self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}:'.format(self.domuuid))
|
self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}:'.format(self.domuuid))
|
||||||
self.inrestart = False
|
self.inrestart = False
|
||||||
return
|
return
|
||||||
|
|
||||||
self.shutdown_vm()
|
self.shutdown_vm()
|
||||||
time.sleep(0.2)
|
time.sleep(0.2)
|
||||||
self.start_vm()
|
self.start_vm()
|
||||||
|
@ -227,7 +227,7 @@ class DomainInstance(object):
|
||||||
self.logger.out('Successfully stopped VM', state='o', prefix='Domain {}:'.format(self.domuuid))
|
self.logger.out('Successfully stopped VM', state='o', prefix='Domain {}:'.format(self.domuuid))
|
||||||
self.dom = None
|
self.dom = None
|
||||||
self.instop = False
|
self.instop = False
|
||||||
|
|
||||||
# Stop the log watcher
|
# Stop the log watcher
|
||||||
self.console_log_instance.stop()
|
self.console_log_instance.stop()
|
||||||
|
|
||||||
|
@ -320,7 +320,7 @@ class DomainInstance(object):
|
||||||
# Wait 1 second and increment the tick
|
# Wait 1 second and increment the tick
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
tick += 1
|
tick += 1
|
||||||
|
|
||||||
# Get zookeeper state and look for the VM in the local libvirt database
|
# Get zookeeper state and look for the VM in the local libvirt database
|
||||||
self.state = zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(self.domuuid))
|
self.state = zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(self.domuuid))
|
||||||
self.dom = self.lookupByUUID(self.domuuid)
|
self.dom = self.lookupByUUID(self.domuuid)
|
||||||
|
@ -348,7 +348,7 @@ class DomainInstance(object):
|
||||||
live_receive = False
|
live_receive = False
|
||||||
self.logger.out('Send failed on remote end', state='w', prefix='Domain {}:'.format(self.domuuid))
|
self.logger.out('Send failed on remote end', state='w', prefix='Domain {}:'.format(self.domuuid))
|
||||||
break
|
break
|
||||||
|
|
||||||
# If we've already been waiting 90s for a receive
|
# If we've already been waiting 90s for a receive
|
||||||
# HARDCODE: 90s should be plenty of time for even extremely large VMs on reasonable networks
|
# HARDCODE: 90s should be plenty of time for even extremely large VMs on reasonable networks
|
||||||
if tick > 90:
|
if tick > 90:
|
||||||
|
@ -365,7 +365,7 @@ class DomainInstance(object):
|
||||||
# Wait 1 second and increment the tick
|
# Wait 1 second and increment the tick
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
tick += 1
|
tick += 1
|
||||||
|
|
||||||
# Get zookeeper state and look for the VM in the local libvirt database
|
# Get zookeeper state and look for the VM in the local libvirt database
|
||||||
self.state = zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(self.domuuid))
|
self.state = zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(self.domuuid))
|
||||||
|
|
||||||
|
@ -387,7 +387,7 @@ class DomainInstance(object):
|
||||||
})
|
})
|
||||||
self.logger.out('Shutdown timed out without state change', state='e', prefix='Domain {}:'.format(self.domuuid))
|
self.logger.out('Shutdown timed out without state change', state='e', prefix='Domain {}:'.format(self.domuuid))
|
||||||
break
|
break
|
||||||
|
|
||||||
self.inreceive = False
|
self.inreceive = False
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -474,7 +474,7 @@ class DomainInstance(object):
|
||||||
self.removeDomainFromList()
|
self.removeDomainFromList()
|
||||||
# Stop the log watcher
|
# Stop the log watcher
|
||||||
self.console_log_instance.stop()
|
self.console_log_instance.stop()
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Conditional pass three - Is this VM currently running on this node
|
# Conditional pass three - Is this VM currently running on this node
|
||||||
if running == libvirt.VIR_DOMAIN_RUNNING:
|
if running == libvirt.VIR_DOMAIN_RUNNING:
|
||||||
|
@ -499,10 +499,10 @@ class DomainInstance(object):
|
||||||
|
|
||||||
lv_conn = None
|
lv_conn = None
|
||||||
libvirt_name = "qemu:///system"
|
libvirt_name = "qemu:///system"
|
||||||
|
|
||||||
# Convert the text UUID to bytes
|
# Convert the text UUID to bytes
|
||||||
buuid = uuid.UUID(tuuid).bytes
|
buuid = uuid.UUID(tuuid).bytes
|
||||||
|
|
||||||
# Try
|
# Try
|
||||||
try:
|
try:
|
||||||
# Open a libvirt connection
|
# Open a libvirt connection
|
||||||
|
@ -510,19 +510,19 @@ class DomainInstance(object):
|
||||||
if lv_conn == None:
|
if lv_conn == None:
|
||||||
self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}:'.format(self.domuuid))
|
self.logger.out('Failed to open local libvirt connection', state='e', prefix='Domain {}:'.format(self.domuuid))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Lookup the UUID
|
# Lookup the UUID
|
||||||
dom = lv_conn.lookupByUUID(buuid)
|
dom = lv_conn.lookupByUUID(buuid)
|
||||||
|
|
||||||
# Fail
|
# Fail
|
||||||
except:
|
except:
|
||||||
dom = None
|
dom = None
|
||||||
|
|
||||||
# After everything
|
# After everything
|
||||||
finally:
|
finally:
|
||||||
# Close the libvirt connection
|
# Close the libvirt connection
|
||||||
if lv_conn != None:
|
if lv_conn != None:
|
||||||
lv_conn.close()
|
lv_conn.close()
|
||||||
|
|
||||||
# Return the dom object (or None)
|
# Return the dom object (or None)
|
||||||
return dom
|
return dom
|
||||||
|
|
|
@ -158,7 +158,7 @@ class NodeInstance(object):
|
||||||
|
|
||||||
if data != self.memfree:
|
if data != self.memfree:
|
||||||
self.memfree = data
|
self.memfree = data
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/nodes/{}/memused'.format(self.name))
|
@self.zk_conn.DataWatch('/nodes/{}/memused'.format(self.name))
|
||||||
def watch_node_memused(data, stat, event=''):
|
def watch_node_memused(data, stat, event=''):
|
||||||
if event and event.type == 'DELETED':
|
if event and event.type == 'DELETED':
|
||||||
|
@ -173,7 +173,7 @@ class NodeInstance(object):
|
||||||
|
|
||||||
if data != self.memused:
|
if data != self.memused:
|
||||||
self.memused = data
|
self.memused = data
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/nodes/{}/memalloc'.format(self.name))
|
@self.zk_conn.DataWatch('/nodes/{}/memalloc'.format(self.name))
|
||||||
def watch_node_memalloc(data, stat, event=''):
|
def watch_node_memalloc(data, stat, event=''):
|
||||||
if event and event.type == 'DELETED':
|
if event and event.type == 'DELETED':
|
||||||
|
@ -188,7 +188,7 @@ class NodeInstance(object):
|
||||||
|
|
||||||
if data != self.memalloc:
|
if data != self.memalloc:
|
||||||
self.memalloc = data
|
self.memalloc = data
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/nodes/{}/vcpualloc'.format(self.name))
|
@self.zk_conn.DataWatch('/nodes/{}/vcpualloc'.format(self.name))
|
||||||
def watch_node_vcpualloc(data, stat, event=''):
|
def watch_node_vcpualloc(data, stat, event=''):
|
||||||
if event and event.type == 'DELETED':
|
if event and event.type == 'DELETED':
|
||||||
|
@ -203,7 +203,7 @@ class NodeInstance(object):
|
||||||
|
|
||||||
if data != self.vcpualloc:
|
if data != self.vcpualloc:
|
||||||
self.vcpualloc = data
|
self.vcpualloc = data
|
||||||
|
|
||||||
@self.zk_conn.DataWatch('/nodes/{}/runningdomains'.format(self.name))
|
@self.zk_conn.DataWatch('/nodes/{}/runningdomains'.format(self.name))
|
||||||
def watch_node_runningdomains(data, stat, event=''):
|
def watch_node_runningdomains(data, stat, event=''):
|
||||||
if event and event.type == 'DELETED':
|
if event and event.type == 'DELETED':
|
||||||
|
@ -233,7 +233,7 @@ class NodeInstance(object):
|
||||||
|
|
||||||
if data != self.domains_count:
|
if data != self.domains_count:
|
||||||
self.domains_count = data
|
self.domains_count = data
|
||||||
|
|
||||||
# Update value functions
|
# Update value functions
|
||||||
def update_node_list(self, d_node):
|
def update_node_list(self, d_node):
|
||||||
self.d_node = d_node
|
self.d_node = d_node
|
||||||
|
@ -456,7 +456,7 @@ def getHypervisors(zk_conn, dom_uuid):
|
||||||
valid_node_list.append(node)
|
valid_node_list.append(node)
|
||||||
|
|
||||||
return valid_node_list
|
return valid_node_list
|
||||||
|
|
||||||
# via free memory (relative to allocated memory)
|
# via free memory (relative to allocated memory)
|
||||||
def findTargetHypervisorMem(zk_conn, dom_uuid):
|
def findTargetHypervisorMem(zk_conn, dom_uuid):
|
||||||
most_allocfree = 0
|
most_allocfree = 0
|
||||||
|
|
|
@ -380,12 +380,12 @@ add rule inet filter forward ip6 saddr {netaddr6} counter jump {vxlannic}-out
|
||||||
for acl in self.firewall_rules_out:
|
for acl in self.firewall_rules_out:
|
||||||
order = zkhandler.readdata(self.zk_conn, '/networks/{}/firewall_rules/out/{}/order'.format(self.vni, acl))
|
order = zkhandler.readdata(self.zk_conn, '/networks/{}/firewall_rules/out/{}/order'.format(self.vni, acl))
|
||||||
ordered_acls_out[order] = acl
|
ordered_acls_out[order] = acl
|
||||||
|
|
||||||
for order in sorted(ordered_acls_in.keys()):
|
for order in sorted(ordered_acls_in.keys()):
|
||||||
sorted_acl_list['in'].append(ordered_acls_in[order])
|
sorted_acl_list['in'].append(ordered_acls_in[order])
|
||||||
for order in sorted(ordered_acls_out.keys()):
|
for order in sorted(ordered_acls_out.keys()):
|
||||||
sorted_acl_list['out'].append(ordered_acls_out[order])
|
sorted_acl_list['out'].append(ordered_acls_out[order])
|
||||||
|
|
||||||
for direction in 'in', 'out':
|
for direction in 'in', 'out':
|
||||||
for acl in sorted_acl_list[direction]:
|
for acl in sorted_acl_list[direction]:
|
||||||
rule_prefix = "add rule inet filter vxlan{}-{} counter".format(self.vni, direction)
|
rule_prefix = "add rule inet filter vxlan{}-{} counter".format(self.vni, direction)
|
||||||
|
|
|
@ -106,7 +106,7 @@ def read_lease_database(zk_conn, zk_leases_key):
|
||||||
|
|
||||||
# Output list
|
# Output list
|
||||||
print('\n'.join(output_list))
|
print('\n'.join(output_list))
|
||||||
|
|
||||||
def add_lease(zk_conn, zk_leases_key, expiry, macaddr, ipaddr, hostname, clientid):
|
def add_lease(zk_conn, zk_leases_key, expiry, macaddr, ipaddr, hostname, clientid):
|
||||||
transaction = zk_conn.transaction()
|
transaction = zk_conn.transaction()
|
||||||
transaction.create('{}/{}'.format(zk_leases_key, macaddr), ''.encode('ascii'))
|
transaction.create('{}/{}'.format(zk_leases_key, macaddr), ''.encode('ascii'))
|
||||||
|
|
|
@ -63,7 +63,7 @@ def fenceNode(node_name, zk_conn, config, logger):
|
||||||
zkhandler.writedata(zk_conn, { '/nodes/{}/routerstate'.format(node_name): 'secondary' })
|
zkhandler.writedata(zk_conn, { '/nodes/{}/routerstate'.format(node_name): 'secondary' })
|
||||||
if zkhandler.readdata(zk_conn, '/primary_node') == node_name:
|
if zkhandler.readdata(zk_conn, '/primary_node') == node_name:
|
||||||
zkhandler.writedata(zk_conn, { '/primary_node': 'none' })
|
zkhandler.writedata(zk_conn, { '/primary_node': 'none' })
|
||||||
|
|
||||||
# If the fence succeeded and successful_fence is migrate
|
# If the fence succeeded and successful_fence is migrate
|
||||||
if fence_status == True and config['successful_fence'] == 'migrate':
|
if fence_status == True and config['successful_fence'] == 'migrate':
|
||||||
migrateFromFencedNode(zk_conn, node_name, logger)
|
migrateFromFencedNode(zk_conn, node_name, logger)
|
||||||
|
|
|
@ -47,7 +47,7 @@ class Logger(object):
|
||||||
self.last_colour = self.fmt_cyan
|
self.last_colour = self.fmt_cyan
|
||||||
else:
|
else:
|
||||||
self.last_colour = ""
|
self.last_colour = ""
|
||||||
|
|
||||||
# Provide a hup function to close and reopen the writer
|
# Provide a hup function to close and reopen the writer
|
||||||
def hup(self):
|
def hup(self):
|
||||||
self.writer.close()
|
self.writer.close()
|
||||||
|
@ -90,11 +90,11 @@ class Logger(object):
|
||||||
date = ''
|
date = ''
|
||||||
colour = self.last_colour
|
colour = self.last_colour
|
||||||
prompt = '>>> '
|
prompt = '>>> '
|
||||||
|
|
||||||
# Append space to prefix
|
# Append space to prefix
|
||||||
if prefix != '':
|
if prefix != '':
|
||||||
prefix = prefix + ' - '
|
prefix = prefix + ' - '
|
||||||
|
|
||||||
message = colour + prompt + endc + date + prefix + message
|
message = colour + prompt + endc + date + prefix + message
|
||||||
print(message)
|
print(message)
|
||||||
if self.config['file_logging'] == 'True':
|
if self.config['file_logging'] == 'True':
|
||||||
|
|
Loading…
Reference in New Issue