Fix several small bugs
This commit is contained in:
parent
b17b7bf22b
commit
a66b834ae4
|
@ -66,7 +66,7 @@ class NodeInstance(object):
|
||||||
self.memfree = 0
|
self.memfree = 0
|
||||||
self.memalloc = 0
|
self.memalloc = 0
|
||||||
self.vcpualloc = 0
|
self.vcpualloc = 0
|
||||||
# Floating upstreams
|
# Floating IP configurations
|
||||||
if self.config['enable_networking']:
|
if self.config['enable_networking']:
|
||||||
self.vni_dev = self.config['vni_dev']
|
self.vni_dev = self.config['vni_dev']
|
||||||
self.vni_ipaddr, self.vni_cidrnetmask = self.config['vni_floating_ip'].split('/')
|
self.vni_ipaddr, self.vni_cidrnetmask = self.config['vni_floating_ip'].split('/')
|
||||||
|
@ -118,8 +118,6 @@ class NodeInstance(object):
|
||||||
self.router_state = data
|
self.router_state = data
|
||||||
if self.config['enable_networking']:
|
if self.config['enable_networking']:
|
||||||
if self.router_state == 'primary':
|
if self.router_state == 'primary':
|
||||||
# Skip becoming primary unless already running
|
|
||||||
if self.daemon_state == 'run':
|
|
||||||
self.logger.out('Setting node {} to primary state'.format(self.name), state='i')
|
self.logger.out('Setting node {} to primary state'.format(self.name), state='i')
|
||||||
#self.become_primary()
|
#self.become_primary()
|
||||||
transition_thread = threading.Thread(target=self.become_primary, args=(), kwargs={})
|
transition_thread = threading.Thread(target=self.become_primary, args=(), kwargs={})
|
||||||
|
@ -433,6 +431,7 @@ class NodeInstance(object):
|
||||||
# 5. Transition Patroni primary
|
# 5. Transition Patroni primary
|
||||||
self.logger.out('Setting Patroni leader to this node', state='i')
|
self.logger.out('Setting Patroni leader to this node', state='i')
|
||||||
tick = 1
|
tick = 1
|
||||||
|
patroni_failed = True
|
||||||
# As long as we're primary, keep trying to set the Patroni leader to us
|
# As long as we're primary, keep trying to set the Patroni leader to us
|
||||||
while self.router_state == 'primary':
|
while self.router_state == 'primary':
|
||||||
# Switch Patroni leader to the local instance
|
# Switch Patroni leader to the local instance
|
||||||
|
@ -470,18 +469,22 @@ class NodeInstance(object):
|
||||||
# Otherwise, we succeeded
|
# Otherwise, we succeeded
|
||||||
else:
|
else:
|
||||||
self.logger.out('Successfully switched Patroni leader\n{}'.format(stdout), state='o')
|
self.logger.out('Successfully switched Patroni leader\n{}'.format(stdout), state='o')
|
||||||
|
patroni_failed = False
|
||||||
time.sleep(0.2)
|
time.sleep(0.2)
|
||||||
break
|
break
|
||||||
# 6. Start DHCP servers
|
# 6. Start DHCP servers
|
||||||
for network in self.d_network:
|
for network in self.d_network:
|
||||||
self.d_network[network].startDHCPServer()
|
self.d_network[network].startDHCPServer()
|
||||||
# 7. Start DNS aggregator
|
# 7. Start DNS aggregator; just continue if we fail
|
||||||
|
if not patroni_failed:
|
||||||
self.dns_aggregator.start_aggregator()
|
self.dns_aggregator.start_aggregator()
|
||||||
# 8. Start metadata API
|
else:
|
||||||
|
self.logger.out('Not starting DNS aggregator due to Patroni failures', state='e')
|
||||||
|
# 8. Start metadata API; just continue if we fail
|
||||||
self.metadata_api.start()
|
self.metadata_api.start()
|
||||||
# 9. Start client API (and provisioner worker)
|
# 9. Start client API (and provisioner worker)
|
||||||
if self.config['enable_api']:
|
if self.config['enable_api']:
|
||||||
self.logger.out('Stopping PVC API client service', state='i')
|
self.logger.out('Starting PVC API client service', state='i')
|
||||||
common.run_os_command("systemctl start pvc-api.service")
|
common.run_os_command("systemctl start pvc-api.service")
|
||||||
self.logger.out('Starting PVC Provisioner Worker service', state='i')
|
self.logger.out('Starting PVC Provisioner Worker service', state='i')
|
||||||
common.run_os_command("systemctl start pvc-provisioner-worker.service")
|
common.run_os_command("systemctl start pvc-provisioner-worker.service")
|
||||||
|
@ -603,8 +606,11 @@ class NodeInstance(object):
|
||||||
# Synchronize nodes G (I am reader)
|
# Synchronize nodes G (I am reader)
|
||||||
lock = zkhandler.readlock(self.zk_conn, '/locks/primary_node')
|
lock = zkhandler.readlock(self.zk_conn, '/locks/primary_node')
|
||||||
self.logger.out('Acquiring read lock for synchronization G', state='i')
|
self.logger.out('Acquiring read lock for synchronization G', state='i')
|
||||||
lock.acquire()
|
try:
|
||||||
|
lock.acquire(timeout=60) # Don't wait forever and completely block us
|
||||||
self.logger.out('Acquired read lock for synchronization G', state='o')
|
self.logger.out('Acquired read lock for synchronization G', state='o')
|
||||||
|
except:
|
||||||
|
pass
|
||||||
self.logger.out('Releasing read lock for synchronization G', state='i')
|
self.logger.out('Releasing read lock for synchronization G', state='i')
|
||||||
lock.release()
|
lock.release()
|
||||||
self.logger.out('Released read lock for synchronization G', state='o')
|
self.logger.out('Released read lock for synchronization G', state='o')
|
||||||
|
|
Loading…
Reference in New Issue