Convert NodeInstance from having an internal "daemon" to using the APScheduler library
This commit is contained in:
parent
8f31f49252
commit
66fe258655
|
@ -20,17 +20,15 @@
|
||||||
#
|
#
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
import os, sys, socket, time, threading, libvirt, kazoo.client
|
import os, sys, socket, time, libvirt, kazoo.client
|
||||||
|
|
||||||
class NodeInstance(threading.Thread):
|
class NodeInstance():
|
||||||
def __init__(self, name, t_node, s_domain, zk):
|
def __init__(self, name, t_node, s_domain, zk):
|
||||||
super(NodeInstance, self).__init__()
|
|
||||||
# Passed-in variables on creation
|
# Passed-in variables on creation
|
||||||
self.zkey = '/nodes/%s' % name
|
self.zkey = '/nodes/%s' % name
|
||||||
self.zk = zk
|
self.zk = zk
|
||||||
self.name = name
|
self.name = name
|
||||||
self.state = 'stop'
|
self.state = 'stop'
|
||||||
self.stop_thread = threading.Event()
|
|
||||||
self.t_node = t_node
|
self.t_node = t_node
|
||||||
self.active_node_list = []
|
self.active_node_list = []
|
||||||
self.flushed_node_list = []
|
self.flushed_node_list = []
|
||||||
|
@ -88,10 +86,6 @@ class NodeInstance(threading.Thread):
|
||||||
def updatedomainlist(self, s_domain):
|
def updatedomainlist(self, s_domain):
|
||||||
self.s_domain = s_domain
|
self.s_domain = s_domain
|
||||||
|
|
||||||
# Shutdown the thread
|
|
||||||
def stop(self):
|
|
||||||
self.stop_thread.set()
|
|
||||||
|
|
||||||
# Flush all VMs on the host
|
# Flush all VMs on the host
|
||||||
def flush(self):
|
def flush(self):
|
||||||
for domain in self.domain_list:
|
for domain in self.domain_list:
|
||||||
|
@ -143,11 +137,7 @@ class NodeInstance(threading.Thread):
|
||||||
|
|
||||||
self.zk.set("/nodes/" + self.name + "/state", 'start'.encode('ascii'))
|
self.zk.set("/nodes/" + self.name + "/state", 'start'.encode('ascii'))
|
||||||
|
|
||||||
def run(self):
|
def update_zookeeper(self):
|
||||||
if self.name == socket.gethostname():
|
|
||||||
self.setup_local_node()
|
|
||||||
|
|
||||||
def setup_local_node(self):
|
|
||||||
# Connect to libvirt
|
# Connect to libvirt
|
||||||
libvirt_name = "qemu:///system"
|
libvirt_name = "qemu:///system"
|
||||||
conn = libvirt.open(libvirt_name)
|
conn = libvirt.open(libvirt_name)
|
||||||
|
@ -170,7 +160,6 @@ class NodeInstance(threading.Thread):
|
||||||
else:
|
else:
|
||||||
self.state = 'flush'
|
self.state = 'flush'
|
||||||
|
|
||||||
while True:
|
|
||||||
# Toggle state management of all VMs and remove any non-running VMs
|
# Toggle state management of all VMs and remove any non-running VMs
|
||||||
for domain, instance in self.s_domain.items():
|
for domain, instance in self.s_domain.items():
|
||||||
if instance.inshutdown == False and domain in self.domain_list:
|
if instance.inshutdown == False and domain in self.domain_list:
|
||||||
|
@ -200,7 +189,6 @@ class NodeInstance(threading.Thread):
|
||||||
self.zk.set(self.zkey + '/cpuload', str(self.cpuload).encode('ascii'))
|
self.zk.set(self.zkey + '/cpuload', str(self.cpuload).encode('ascii'))
|
||||||
self.zk.set(self.zkey + '/runningdomains', ' '.join(self.domain_list).encode('ascii'))
|
self.zk.set(self.zkey + '/runningdomains', ' '.join(self.domain_list).encode('ascii'))
|
||||||
except:
|
except:
|
||||||
if self.stop_thread.is_set():
|
|
||||||
return
|
return
|
||||||
|
|
||||||
print(">>> %s - Free memory: %s | Load: %s" % ( time.strftime("%d/%m/%Y %H:%M:%S"), self.memfree, self.cpuload ))
|
print(">>> %s - Free memory: %s | Load: %s" % ( time.strftime("%d/%m/%Y %H:%M:%S"), self.memfree, self.cpuload ))
|
||||||
|
@ -248,9 +236,3 @@ class NodeInstance(threading.Thread):
|
||||||
print('Active nodes: %s' % self.active_node_list)
|
print('Active nodes: %s' % self.active_node_list)
|
||||||
print('Flushed nodes: %s' % self.flushed_node_list)
|
print('Flushed nodes: %s' % self.flushed_node_list)
|
||||||
print('Inactive nodes: %s' % self.inactive_node_list)
|
print('Inactive nodes: %s' % self.inactive_node_list)
|
||||||
|
|
||||||
# Sleep for 9s but with quick interruptability
|
|
||||||
for x in range(0,90):
|
|
||||||
time.sleep(0.1)
|
|
||||||
if self.stop_thread.is_set():
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
32
pvcd.py
32
pvcd.py
|
@ -20,8 +20,7 @@
|
||||||
#
|
#
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
from kazoo.client import KazooClient
|
import kazoo.client
|
||||||
from kazoo.client import KazooState
|
|
||||||
import libvirt
|
import libvirt
|
||||||
import sys
|
import sys
|
||||||
import socket
|
import socket
|
||||||
|
@ -29,8 +28,8 @@ import uuid
|
||||||
import VMInstance
|
import VMInstance
|
||||||
import NodeInstance
|
import NodeInstance
|
||||||
import time
|
import time
|
||||||
import threading
|
|
||||||
import atexit
|
import atexit
|
||||||
|
import apscheduler.schedulers.background
|
||||||
|
|
||||||
def help():
|
def help():
|
||||||
print("pvcd - Parallel Virtual Cluster management daemon")
|
print("pvcd - Parallel Virtual Cluster management daemon")
|
||||||
|
@ -38,8 +37,8 @@ def help():
|
||||||
|
|
||||||
help()
|
help()
|
||||||
|
|
||||||
# Connect to zookeeper
|
# Connect to local zookeeper
|
||||||
zk = KazooClient(hosts='127.0.0.1:2181')
|
zk = kazoo.client.KazooClient(hosts='127.0.0.1:2181')
|
||||||
try:
|
try:
|
||||||
zk.start()
|
zk.start()
|
||||||
except:
|
except:
|
||||||
|
@ -47,10 +46,10 @@ except:
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
def zk_listener(state):
|
def zk_listener(state):
|
||||||
if state == KazooState.LOST:
|
if state == kazoo.client.KazooState.LOST:
|
||||||
cleanup()
|
cleanup()
|
||||||
exit(2)
|
exit(2)
|
||||||
elif state == KazooState.SUSPENDED:
|
elif state == kazoo.client.KazooState.SUSPENDED:
|
||||||
cleanup()
|
cleanup()
|
||||||
exit(2)
|
exit(2)
|
||||||
else:
|
else:
|
||||||
|
@ -63,9 +62,8 @@ myhostname = socket.gethostname()
|
||||||
mynodestring = '/nodes/%s' % myhostname
|
mynodestring = '/nodes/%s' % myhostname
|
||||||
|
|
||||||
def cleanup():
|
def cleanup():
|
||||||
t_node[myhostname].stop()
|
|
||||||
time.sleep(0.2)
|
|
||||||
try:
|
try:
|
||||||
|
update_timer.shutdown()
|
||||||
if t_node[myhostname].getstate() != 'flush':
|
if t_node[myhostname].getstate() != 'flush':
|
||||||
zk.set('/nodes/' + myhostname + '/state', 'stop'.encode('ascii'))
|
zk.set('/nodes/' + myhostname + '/state', 'stop'.encode('ascii'))
|
||||||
zk.stop()
|
zk.stop()
|
||||||
|
@ -75,6 +73,7 @@ def cleanup():
|
||||||
|
|
||||||
atexit.register(cleanup)
|
atexit.register(cleanup)
|
||||||
|
|
||||||
|
|
||||||
# Check if our node exists in Zookeeper, and create it if not
|
# Check if our node exists in Zookeeper, and create it if not
|
||||||
if zk.exists('%s' % mynodestring):
|
if zk.exists('%s' % mynodestring):
|
||||||
print("Node is present in Zookeeper")
|
print("Node is present in Zookeeper")
|
||||||
|
@ -114,13 +113,18 @@ def updatedomains(new_domain_list):
|
||||||
if node in t_node:
|
if node in t_node:
|
||||||
t_node[node].updatedomainlist(s_domain)
|
t_node[node].updatedomainlist(s_domain)
|
||||||
|
|
||||||
t_node[myhostname].start()
|
# Set up our update function
|
||||||
time.sleep(0.2)
|
this_node = t_node[myhostname]
|
||||||
|
update_zookeeper = this_node.update_zookeeper
|
||||||
|
|
||||||
|
# Create timer to update this node in Zookeeper
|
||||||
|
update_timer = apscheduler.schedulers.background.BackgroundScheduler()
|
||||||
|
update_timer.add_job(update_zookeeper, 'interval', seconds=2)
|
||||||
|
update_timer.start()
|
||||||
|
|
||||||
while True:
|
|
||||||
# Tick loop
|
# Tick loop
|
||||||
|
while True:
|
||||||
try:
|
try:
|
||||||
time.sleep(0.1)
|
time.sleep(0.1)
|
||||||
except:
|
except:
|
||||||
cleanup()
|
break
|
||||||
exit(0)
|
|
||||||
|
|
Loading…
Reference in New Issue