2018-05-31 21:49:23 -04:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
# NodeInstance.py - Class implementing a PVC node in pvcd
|
2018-06-06 01:47:53 -04:00
|
|
|
# Part of the Parallel Virtual Cluster (PVC) system
|
|
|
|
#
|
|
|
|
# Copyright (C) 2018 Joshua M. Boniface <joshua@boniface.me>
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
###############################################################################
|
|
|
|
|
2018-09-20 03:42:40 -04:00
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import psutil
|
|
|
|
import socket
|
|
|
|
import time
|
|
|
|
import libvirt
|
|
|
|
import threading
|
|
|
|
import subprocess
|
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
import pvcd.log as log
|
|
|
|
import pvcd.zkhandler as zkhandler
|
|
|
|
import pvcd.common as common
|
2018-06-06 22:56:03 -04:00
|
|
|
|
2018-10-14 22:14:29 -04:00
|
|
|
class NodeInstance(object):
|
2018-06-06 22:59:31 -04:00
|
|
|
# Initialization function
|
2018-10-15 21:09:40 -04:00
|
|
|
def __init__(self, name, this_node, zk_conn, config, logger, d_node, d_network, d_domain, dns_aggregator):
|
2018-05-31 21:49:23 -04:00
|
|
|
# Passed-in variables on creation
|
2018-10-14 02:01:35 -04:00
|
|
|
self.name = name
|
|
|
|
self.this_node = this_node
|
2018-06-17 21:55:39 -04:00
|
|
|
self.zk_conn = zk_conn
|
2018-06-08 12:19:48 -04:00
|
|
|
self.config = config
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger = logger
|
|
|
|
# The IPMI hostname for fencing
|
|
|
|
self.ipmi_hostname = self.config['ipmi_hostname']
|
|
|
|
# Which node is primary
|
|
|
|
self.primary_node = None
|
|
|
|
# States
|
|
|
|
self.daemon_mode = zkhandler.readdata(self.zk_conn, '/nodes/{}/daemonmode'.format(self.name))
|
2018-06-11 02:46:24 -04:00
|
|
|
self.daemon_state = 'stop'
|
2018-10-14 02:01:35 -04:00
|
|
|
self.router_state = 'client'
|
2018-06-11 02:46:24 -04:00
|
|
|
self.domain_state = 'ready'
|
2018-10-14 02:01:35 -04:00
|
|
|
# Object lists
|
|
|
|
self.d_node = d_node
|
|
|
|
self.d_network = d_network
|
|
|
|
self.d_domain = d_domain
|
2018-10-15 21:09:40 -04:00
|
|
|
self.dns_aggregator = dns_aggregator
|
2018-10-14 02:01:35 -04:00
|
|
|
# Printable lists
|
2018-06-04 16:34:41 -04:00
|
|
|
self.active_node_list = []
|
|
|
|
self.flushed_node_list = []
|
|
|
|
self.inactive_node_list = []
|
2018-10-14 02:01:35 -04:00
|
|
|
self.network_list = []
|
2018-06-01 12:21:58 -04:00
|
|
|
self.domain_list = []
|
2018-10-14 02:01:35 -04:00
|
|
|
# Node resources
|
|
|
|
self.networks_count = 0
|
2018-06-11 01:50:06 -04:00
|
|
|
self.domains_count = 0
|
|
|
|
self.memused = 0
|
|
|
|
self.memfree = 0
|
2018-07-17 21:34:11 -04:00
|
|
|
self.memalloc = 0
|
2018-07-18 12:09:07 -04:00
|
|
|
self.vcpualloc = 0
|
2018-10-17 00:23:43 -04:00
|
|
|
# Floating upstreams
|
|
|
|
self.vni_dev = self.config['vni_dev']
|
|
|
|
self.vni_ipaddr, self.vni_cidrnetmask = self.config['vni_floating_ip'].split('/')
|
|
|
|
self.upstream_dev = self.config['upstream_dev']
|
|
|
|
self.upstream_ipaddr, self.upstream_cidrnetmask = self.config['upstream_floating_ip'].split('/')
|
2018-10-14 02:01:35 -04:00
|
|
|
# Flags
|
2018-06-11 18:05:35 -04:00
|
|
|
self.inflush = False
|
2018-06-01 12:21:58 -04:00
|
|
|
|
|
|
|
# Zookeeper handlers for changed states
|
2018-10-01 22:51:34 -04:00
|
|
|
@self.zk_conn.DataWatch('/nodes/{}/daemonstate'.format(self.name))
|
2018-10-14 02:01:35 -04:00
|
|
|
def watch_node_daemonstate(data, stat, event=''):
|
2018-10-01 22:51:34 -04:00
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
2018-06-06 01:24:28 -04:00
|
|
|
try:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = data.decode('ascii')
|
2018-06-06 01:24:28 -04:00
|
|
|
except AttributeError:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = 'stop'
|
|
|
|
|
|
|
|
if data != self.daemon_state:
|
|
|
|
self.daemon_state = data
|
|
|
|
|
|
|
|
@self.zk_conn.DataWatch('/nodes/{}/routerstate'.format(self.name))
|
|
|
|
def watch_node_routerstate(data, stat, event=''):
|
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
|
|
|
data = data.decode('ascii')
|
|
|
|
except AttributeError:
|
|
|
|
data = 'client'
|
|
|
|
|
|
|
|
if self.name == self.this_node and self.daemon_mode == 'coordinator':
|
|
|
|
# We're a coordinator so we care about networking
|
|
|
|
if data != self.router_state:
|
|
|
|
self.router_state = data
|
|
|
|
if self.router_state == 'primary':
|
|
|
|
self.become_primary()
|
|
|
|
else:
|
|
|
|
self.become_secondary()
|
2018-06-11 02:46:24 -04:00
|
|
|
|
2018-10-01 22:51:34 -04:00
|
|
|
@self.zk_conn.DataWatch('/nodes/{}/domainstate'.format(self.name))
|
2018-10-14 02:01:35 -04:00
|
|
|
def watch_node_domainstate(data, stat, event=''):
|
2018-10-01 22:51:34 -04:00
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
2018-06-11 02:46:24 -04:00
|
|
|
try:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = data.decode('ascii')
|
2018-06-11 02:46:24 -04:00
|
|
|
except AttributeError:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = 'unknown'
|
|
|
|
|
|
|
|
if data != self.domain_state:
|
|
|
|
self.domain_state = data
|
|
|
|
|
|
|
|
# toggle state management of this node
|
|
|
|
if self.name == self.this_node:
|
|
|
|
if self.domain_state == 'flush' and self.inflush == False:
|
|
|
|
# Do flushing in a thread so it doesn't block the migrates out
|
|
|
|
flush_thread = threading.Thread(target=self.flush, args=(), kwargs={})
|
|
|
|
flush_thread.start()
|
|
|
|
if self.domain_state == 'unflush' and self.inflush == False:
|
|
|
|
self.unflush()
|
|
|
|
|
|
|
|
@self.zk_conn.DataWatch('/primary_node')
|
|
|
|
def watch_primary_node(data, stat, event=''):
|
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
2018-06-11 22:45:06 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
try:
|
|
|
|
data = data.decode('ascii')
|
|
|
|
except AttributeError:
|
|
|
|
data = 'none'
|
|
|
|
|
|
|
|
if data != self.primary_node:
|
|
|
|
if self.daemon_mode == 'coordinator':
|
|
|
|
# We're a coordinator so we care about networking
|
|
|
|
if data == 'none':
|
|
|
|
# Toggle state management of routing functions
|
|
|
|
if self.name == self.this_node:
|
|
|
|
if self.daemon_state == 'run' and self.router_state != 'primary':
|
|
|
|
# Contend for primary
|
|
|
|
self.logger.out('Contending for primary routing state', state='i')
|
|
|
|
zkhandler.writedata(self.zk_conn, {'/primary_node': self.name })
|
|
|
|
elif data == self.this_node:
|
|
|
|
if self.name == self.this_node:
|
|
|
|
zkhandler.writedata(self.zk_conn, { '/nodes/{}/routerstate'.format(self.name): 'primary' })
|
|
|
|
self.primary_node = data
|
|
|
|
else:
|
|
|
|
if self.name == self.this_node:
|
|
|
|
zkhandler.writedata(self.zk_conn, { '/nodes/{}/routerstate'.format(self.name): 'secondary' })
|
|
|
|
self.primary_node = data
|
|
|
|
else:
|
|
|
|
self.primary_node = data
|
|
|
|
|
2018-06-11 22:45:06 -04:00
|
|
|
|
2018-10-01 22:51:34 -04:00
|
|
|
@self.zk_conn.DataWatch('/nodes/{}/memfree'.format(self.name))
|
2018-10-14 02:01:35 -04:00
|
|
|
def watch_node_memfree(data, stat, event=''):
|
2018-10-01 22:51:34 -04:00
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
2018-06-06 01:24:28 -04:00
|
|
|
try:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = data.decode('ascii')
|
2018-06-06 01:24:28 -04:00
|
|
|
except AttributeError:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = 0
|
|
|
|
|
|
|
|
if data != self.memfree:
|
|
|
|
self.memfree = data
|
2018-06-01 12:21:58 -04:00
|
|
|
|
2018-10-01 22:51:34 -04:00
|
|
|
@self.zk_conn.DataWatch('/nodes/{}/memused'.format(self.name))
|
2018-10-14 02:01:35 -04:00
|
|
|
def watch_node_memused(data, stat, event=''):
|
2018-10-01 22:51:34 -04:00
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
2018-06-11 02:46:24 -04:00
|
|
|
try:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = data.decode('ascii')
|
2018-06-11 02:46:24 -04:00
|
|
|
except AttributeError:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = 0
|
|
|
|
|
|
|
|
if data != self.memused:
|
|
|
|
self.memused = data
|
2018-06-11 02:46:24 -04:00
|
|
|
|
2018-10-01 22:51:34 -04:00
|
|
|
@self.zk_conn.DataWatch('/nodes/{}/memalloc'.format(self.name))
|
2018-10-14 02:01:35 -04:00
|
|
|
def watch_node_memalloc(data, stat, event=''):
|
2018-10-01 22:51:34 -04:00
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
2018-07-17 21:34:11 -04:00
|
|
|
try:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = data.decode('ascii')
|
2018-07-17 21:34:11 -04:00
|
|
|
except AttributeError:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = 0
|
|
|
|
|
|
|
|
if data != self.memalloc:
|
|
|
|
self.memalloc = data
|
2018-07-17 21:34:11 -04:00
|
|
|
|
2018-10-01 22:51:34 -04:00
|
|
|
@self.zk_conn.DataWatch('/nodes/{}/vcpualloc'.format(self.name))
|
2018-10-14 02:01:35 -04:00
|
|
|
def watch_node_vcpualloc(data, stat, event=''):
|
2018-10-01 22:51:34 -04:00
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
2018-07-18 12:09:07 -04:00
|
|
|
try:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = data.decode('ascii')
|
2018-07-18 12:09:07 -04:00
|
|
|
except AttributeError:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = 0
|
|
|
|
|
|
|
|
if data != self.vcpualloc:
|
|
|
|
self.vcpualloc = data
|
2018-07-18 12:09:07 -04:00
|
|
|
|
2018-10-01 22:51:34 -04:00
|
|
|
@self.zk_conn.DataWatch('/nodes/{}/runningdomains'.format(self.name))
|
2018-10-14 02:01:35 -04:00
|
|
|
def watch_node_runningdomains(data, stat, event=''):
|
2018-10-01 22:51:34 -04:00
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
2018-06-06 01:24:28 -04:00
|
|
|
try:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = data.decode('ascii').split()
|
2018-06-06 01:24:28 -04:00
|
|
|
except AttributeError:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = []
|
2018-05-31 21:49:23 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
if data != self.domain_list:
|
|
|
|
self.domain_list = data
|
|
|
|
|
|
|
|
@self.zk_conn.DataWatch('/nodes/{}/networkscount'.format(self.name))
|
|
|
|
def watch_node_networkscount(data, stat, event=''):
|
2018-10-01 22:51:34 -04:00
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
2018-06-11 02:46:24 -04:00
|
|
|
try:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = data.decode('ascii')
|
2018-06-11 02:46:24 -04:00
|
|
|
except AttributeError:
|
2018-10-14 02:01:35 -04:00
|
|
|
data = 0
|
2018-05-31 22:31:20 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
if data != self.networks_count:
|
|
|
|
self.networks_count = data
|
|
|
|
|
|
|
|
@self.zk_conn.DataWatch('/nodes/{}/domainscount'.format(self.name))
|
|
|
|
def watch_node_domainscount(data, stat, event=''):
|
|
|
|
if event and event.type == 'DELETED':
|
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
2018-05-31 23:40:21 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
try:
|
|
|
|
data = data.decode('ascii')
|
|
|
|
except AttributeError:
|
|
|
|
data = 0
|
2018-06-01 12:21:58 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
if data != self.domains_count:
|
|
|
|
self.domains_count = data
|
|
|
|
|
2018-05-31 23:01:22 -04:00
|
|
|
# Update value functions
|
2018-10-14 02:01:35 -04:00
|
|
|
def update_node_list(self, d_node):
|
|
|
|
self.d_node = d_node
|
|
|
|
|
|
|
|
def update_network_list(self, d_network):
|
|
|
|
self.d_network = d_network
|
|
|
|
network_list = []
|
|
|
|
for network in self.d_network:
|
|
|
|
network_list.append(d_network[network].vni)
|
|
|
|
self.network_list = network_list
|
|
|
|
|
|
|
|
def update_domain_list(self, d_domain):
|
|
|
|
self.d_domain = d_domain
|
|
|
|
|
|
|
|
# Routing primary/secondary states
|
|
|
|
def become_secondary(self):
|
|
|
|
self.logger.out('Setting router {} to secondary state'.format(self.name), state='i')
|
|
|
|
self.logger.out('Network list: {}'.format(', '.join(self.network_list)))
|
2018-10-15 21:09:40 -04:00
|
|
|
time.sleep(1)
|
2018-10-14 02:01:35 -04:00
|
|
|
for network in self.d_network:
|
|
|
|
self.d_network[network].stopDHCPServer()
|
|
|
|
self.d_network[network].removeGatewayAddress()
|
2018-10-15 21:09:40 -04:00
|
|
|
self.dns_aggregator.stop_aggregator()
|
2018-10-17 00:23:43 -04:00
|
|
|
self.removeFloatingAddresses()
|
2018-10-14 02:01:35 -04:00
|
|
|
|
|
|
|
def become_primary(self):
|
|
|
|
self.logger.out('Setting router {} to primary state.'.format(self.name), state='i')
|
|
|
|
self.logger.out('Network list: {}'.format(', '.join(self.network_list)))
|
2018-10-17 00:23:43 -04:00
|
|
|
self.createFloatingAddresses()
|
2018-10-15 21:09:40 -04:00
|
|
|
self.dns_aggregator.start_aggregator()
|
|
|
|
time.sleep(0.5)
|
|
|
|
# Start up the gateways and DHCP servers
|
2018-10-14 02:01:35 -04:00
|
|
|
for network in self.d_network:
|
|
|
|
self.d_network[network].createGatewayAddress()
|
|
|
|
self.d_network[network].startDHCPServer()
|
2018-10-15 21:09:40 -04:00
|
|
|
time.sleep(0.5)
|
|
|
|
# Handle AXFRs after to avoid slowdowns
|
|
|
|
for network in self.d_network:
|
|
|
|
self.dns_aggregator.get_axfr(network)
|
2018-06-02 15:03:44 -04:00
|
|
|
|
2018-10-17 00:23:43 -04:00
|
|
|
def createFloatingAddresses(self):
|
|
|
|
# VNI floating IP
|
|
|
|
self.logger.out(
|
|
|
|
'Creating floating management IP {}/{} on interface {}'.format(
|
|
|
|
self.vni_ipaddr,
|
|
|
|
self.vni_cidrnetmask,
|
|
|
|
self.vni_dev
|
|
|
|
),
|
|
|
|
state='o'
|
|
|
|
)
|
|
|
|
common.run_os_command(
|
|
|
|
'ip address add {}/{} dev {}'.format(
|
|
|
|
self.vni_ipaddr,
|
|
|
|
self.vni_cidrnetmask,
|
|
|
|
self.vni_dev
|
|
|
|
)
|
|
|
|
)
|
|
|
|
common.run_os_command(
|
|
|
|
'arping -A -c2 -I {} {}'.format(
|
|
|
|
self.vni_dev,
|
|
|
|
self.vni_ipaddr
|
|
|
|
),
|
|
|
|
background=True
|
|
|
|
)
|
|
|
|
# Upstream floating IP
|
|
|
|
self.logger.out(
|
|
|
|
'Creating floating upstream IP {}/{} on interface {}'.format(
|
|
|
|
self.upstream_ipaddr,
|
|
|
|
self.upstream_cidrnetmask,
|
|
|
|
self.upstream_dev
|
|
|
|
),
|
|
|
|
state='o'
|
|
|
|
)
|
|
|
|
common.run_os_command(
|
|
|
|
'ip address add {}/{} dev {}'.format(
|
|
|
|
self.upstream_ipaddr,
|
|
|
|
self.upstream_cidrnetmask,
|
|
|
|
self.upstream_dev
|
|
|
|
)
|
|
|
|
)
|
|
|
|
common.run_os_command(
|
|
|
|
'arping -A -c2 -I {} {}'.format(
|
|
|
|
self.upstream_dev,
|
|
|
|
self.upstream_ipaddr
|
|
|
|
),
|
|
|
|
background=True
|
|
|
|
)
|
|
|
|
|
|
|
|
def removeFloatingAddresses(self):
|
|
|
|
# VNI floating IP
|
|
|
|
self.logger.out(
|
|
|
|
'Removing floating management IP {}/{} from interface {}'.format(
|
|
|
|
self.vni_ipaddr,
|
|
|
|
self.vni_cidrnetmask,
|
|
|
|
self.vni_dev
|
|
|
|
),
|
|
|
|
state='o'
|
|
|
|
)
|
|
|
|
common.run_os_command(
|
|
|
|
'ip address delete {}/{} dev {}'.format(
|
|
|
|
self.vni_ipaddr,
|
|
|
|
self.vni_cidrnetmask,
|
|
|
|
self.vni_dev
|
|
|
|
)
|
|
|
|
)
|
|
|
|
# Upstream floating IP
|
|
|
|
self.logger.out(
|
|
|
|
'Removing floating upstream IP {}/{} from interface {}'.format(
|
|
|
|
self.upstream_ipaddr,
|
|
|
|
self.upstream_cidrnetmask,
|
|
|
|
self.upstream_dev
|
|
|
|
),
|
|
|
|
state='o'
|
|
|
|
)
|
|
|
|
common.run_os_command(
|
|
|
|
'ip address delete {}/{} dev {}'.format(
|
|
|
|
self.upstream_ipaddr,
|
|
|
|
self.upstream_cidrnetmask,
|
|
|
|
self.upstream_dev
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2018-05-31 22:31:20 -04:00
|
|
|
# Flush all VMs on the host
|
2018-06-04 16:34:41 -04:00
|
|
|
def flush(self):
|
2018-06-11 18:05:35 -04:00
|
|
|
self.inflush = True
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger.out('Flushing node "{}" of running VMs'.format(self.name), state='i')
|
|
|
|
self.logger.out('Domain list: {}'.format(', '.join(self.domain_list)))
|
2018-07-17 14:29:18 -04:00
|
|
|
fixed_domain_list = self.domain_list.copy()
|
|
|
|
for dom_uuid in fixed_domain_list:
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger.out('Selecting target to migrate VM "{}"'.format(dom_uuid), state='i')
|
2018-07-17 14:29:18 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
current_node = zkhandler.readdata(self.zk_conn, '/domains/{}/node'.format(dom_uuid))
|
|
|
|
target_node = findTargetHypervisor(self.zk_conn, 'mem', dom_uuid)
|
|
|
|
if target_node == None:
|
|
|
|
self.logger.out('Failed to find migration target for VM "{}"; shutting down'.format(dom_uuid), state='e')
|
2018-06-26 23:24:33 -04:00
|
|
|
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(dom_uuid): 'shutdown' })
|
2018-06-04 02:22:59 -04:00
|
|
|
else:
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger.out('Migrating VM "{}" to node "{}"'.format(dom_uuid, target_node), state='i')
|
2018-06-26 23:24:33 -04:00
|
|
|
zkhandler.writedata(self.zk_conn, {
|
|
|
|
'/domains/{}/state'.format(dom_uuid): 'migrate',
|
2018-10-14 02:01:35 -04:00
|
|
|
'/domains/{}/node'.format(dom_uuid): target_node,
|
|
|
|
'/domains/{}/lastnode'.format(dom_uuid): current_node
|
2018-06-26 23:24:33 -04:00
|
|
|
})
|
|
|
|
|
2018-07-17 00:25:36 -04:00
|
|
|
# Wait for the VM to migrate so the next VM's free RAM count is accurate (they migrate in serial anyways)
|
|
|
|
while True:
|
|
|
|
time.sleep(1)
|
2018-07-17 00:38:55 -04:00
|
|
|
vm_current_state = zkhandler.readdata(self.zk_conn, '/domains/{}/state'.format(dom_uuid))
|
2018-07-17 00:25:36 -04:00
|
|
|
if vm_current_state == "start":
|
|
|
|
break
|
|
|
|
|
2018-06-26 23:24:33 -04:00
|
|
|
zkhandler.writedata(self.zk_conn, { '/nodes/{}/runningdomains'.format(self.name): '' })
|
|
|
|
zkhandler.writedata(self.zk_conn, { '/nodes/{}/domainstate'.format(self.name): 'flushed' })
|
2018-06-11 18:05:35 -04:00
|
|
|
self.inflush = False
|
|
|
|
|
2018-06-04 03:09:51 -04:00
|
|
|
def unflush(self):
|
2018-06-11 18:05:35 -04:00
|
|
|
self.inflush = True
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger.out('Restoring node {} to active service.'.format(self.name), state='i')
|
2018-06-26 23:24:33 -04:00
|
|
|
zkhandler.writedata(self.zk_conn, { '/nodes/{}/domainstate'.format(self.name): 'ready' })
|
2018-10-14 02:01:35 -04:00
|
|
|
fixed_domain_list = self.d_domain.copy()
|
2018-07-17 14:29:18 -04:00
|
|
|
for dom_uuid in fixed_domain_list:
|
2018-07-17 00:14:46 -04:00
|
|
|
try:
|
2018-10-14 02:01:35 -04:00
|
|
|
last_node = zkhandler.readdata(self.zk_conn, '/domains/{}/lastnode'.format(dom_uuid))
|
2018-07-17 00:14:46 -04:00
|
|
|
except:
|
|
|
|
continue
|
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
if last_node != self.name:
|
2018-06-06 20:36:03 -04:00
|
|
|
continue
|
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger.out('Setting unmigration for VM "{}"'.format(dom_uuid), state='i')
|
2018-06-26 23:24:33 -04:00
|
|
|
zkhandler.writedata(self.zk_conn, {
|
|
|
|
'/domains/{}/state'.format(dom_uuid): 'migrate',
|
2018-10-14 02:01:35 -04:00
|
|
|
'/domains/{}/node'.format(dom_uuid): self.name,
|
|
|
|
'/domains/{}/lastnode'.format(dom_uuid): ''
|
2018-06-26 23:24:33 -04:00
|
|
|
})
|
2018-06-04 03:09:51 -04:00
|
|
|
|
2018-06-11 18:05:35 -04:00
|
|
|
self.inflush = False
|
|
|
|
|
2018-06-06 14:16:52 -04:00
|
|
|
def update_zookeeper(self):
|
2018-05-31 21:49:23 -04:00
|
|
|
# Connect to libvirt
|
|
|
|
libvirt_name = "qemu:///system"
|
2018-06-17 21:55:39 -04:00
|
|
|
lv_conn = libvirt.open(libvirt_name)
|
|
|
|
if lv_conn == None:
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger.out('Failed to open connection to "{}"'.format(libvirt_name), state='e')
|
2018-06-06 14:53:28 -04:00
|
|
|
return
|
2018-05-31 21:49:23 -04:00
|
|
|
|
2018-06-04 02:34:03 -04:00
|
|
|
# Get past state and update if needed
|
2018-06-26 21:52:22 -04:00
|
|
|
past_state = zkhandler.readdata(self.zk_conn, '/nodes/{}/daemonstate'.format(self.name))
|
2018-06-12 21:56:34 -04:00
|
|
|
if past_state != 'run':
|
|
|
|
self.daemon_state = 'run'
|
2018-06-26 23:24:33 -04:00
|
|
|
zkhandler.writedata(self.zk_conn, { '/nodes/{}/daemonstate'.format(self.name): 'run' })
|
2018-06-04 02:34:03 -04:00
|
|
|
else:
|
2018-06-12 21:56:34 -04:00
|
|
|
self.daemon_state = 'run'
|
2018-06-04 02:34:03 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
# Ensure the primary key is properly set
|
|
|
|
if self.name == self.this_node:
|
|
|
|
if self.router_state == 'primary':
|
|
|
|
if zkhandler.readdata(self.zk_conn, '/primary_node') != self.name:
|
|
|
|
zkhandler.writedata(self.zk_conn, {'/primary_node': self.name})
|
|
|
|
|
2018-06-12 01:54:01 -04:00
|
|
|
# Toggle state management of dead VMs to restart them
|
2018-07-17 21:37:27 -04:00
|
|
|
memalloc = 0
|
2018-07-18 12:09:07 -04:00
|
|
|
vcpualloc = 0
|
2018-10-14 02:01:35 -04:00
|
|
|
for domain, instance in self.d_domain.items():
|
|
|
|
if domain in self.domain_list:
|
2018-07-17 21:37:27 -04:00
|
|
|
# Add the allocated memory to our memalloc value
|
2018-07-17 21:51:49 -04:00
|
|
|
memalloc += instance.getmemory()
|
2018-07-18 12:09:07 -04:00
|
|
|
vcpualloc += instance.getvcpus()
|
2018-10-14 02:01:35 -04:00
|
|
|
if instance.getstate() == 'start' and instance.getnode() == self.name:
|
2018-06-19 19:52:08 -04:00
|
|
|
if instance.getdom() != None:
|
|
|
|
try:
|
|
|
|
if instance.getdom().state()[0] != libvirt.VIR_DOMAIN_RUNNING:
|
|
|
|
raise
|
|
|
|
except Exception as e:
|
|
|
|
# Toggle a state "change"
|
2018-06-26 23:24:33 -04:00
|
|
|
zkhandler.writedata(self.zk_conn, { '/domains/{}/state'.format(domain): instance.getstate() })
|
2018-06-02 14:37:49 -04:00
|
|
|
|
2018-07-17 12:17:22 -04:00
|
|
|
# Ensure that any running VMs are readded to the domain_list
|
|
|
|
running_domains = lv_conn.listAllDomains(libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE)
|
|
|
|
for domain in running_domains:
|
|
|
|
domain_uuid = domain.UUIDString()
|
|
|
|
if domain_uuid not in self.domain_list:
|
|
|
|
self.domain_list.append(domain_uuid)
|
|
|
|
|
2018-06-06 14:16:52 -04:00
|
|
|
# Set our information in zookeeper
|
2018-10-14 02:01:35 -04:00
|
|
|
#self.name = lv_conn.getHostname()
|
2018-06-11 01:55:36 -04:00
|
|
|
self.memused = int(psutil.virtual_memory().used / 1024 / 1024)
|
|
|
|
self.memfree = int(psutil.virtual_memory().free / 1024 / 1024)
|
2018-07-17 21:34:11 -04:00
|
|
|
self.memalloc = memalloc
|
2018-07-18 12:09:07 -04:00
|
|
|
self.vcpualloc = vcpualloc
|
2018-06-06 14:16:52 -04:00
|
|
|
self.cpuload = os.getloadavg()[0]
|
2018-06-17 21:55:39 -04:00
|
|
|
self.domains_count = len(lv_conn.listDomainsID())
|
2018-06-06 15:41:06 -04:00
|
|
|
keepalive_time = int(time.time())
|
2018-06-06 14:16:52 -04:00
|
|
|
try:
|
2018-06-26 23:24:33 -04:00
|
|
|
zkhandler.writedata(self.zk_conn, {
|
|
|
|
'/nodes/{}/memused'.format(self.name): str(self.memused),
|
|
|
|
'/nodes/{}/memfree'.format(self.name): str(self.memfree),
|
2018-07-17 21:34:11 -04:00
|
|
|
'/nodes/{}/memalloc'.format(self.name): str(self.memalloc),
|
2018-07-18 12:09:07 -04:00
|
|
|
'/nodes/{}/vcpualloc'.format(self.name): str(self.vcpualloc),
|
2018-06-26 23:24:33 -04:00
|
|
|
'/nodes/{}/cpuload'.format(self.name): str(self.cpuload),
|
2018-10-14 02:01:35 -04:00
|
|
|
'/nodes/{}/networkscount'.format(self.name): str(self.networks_count),
|
2018-06-26 23:24:33 -04:00
|
|
|
'/nodes/{}/domainscount'.format(self.name): str(self.domains_count),
|
2018-10-14 02:01:35 -04:00
|
|
|
'/nodes/{}/runningdomains'.format(self.name): ' '.join(self.domain_list),
|
2018-06-26 23:24:33 -04:00
|
|
|
'/nodes/{}/keepalive'.format(self.name): str(keepalive_time)
|
|
|
|
})
|
2018-06-06 14:16:52 -04:00
|
|
|
except:
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger.out('Failed to set keepalive data', state='e')
|
2018-06-06 14:16:52 -04:00
|
|
|
return
|
|
|
|
|
2018-06-06 15:16:39 -04:00
|
|
|
# Close the Libvirt connection
|
2018-06-17 21:55:39 -04:00
|
|
|
lv_conn.close()
|
2018-06-06 15:16:39 -04:00
|
|
|
|
2018-06-06 14:53:28 -04:00
|
|
|
# Display node information to the terminal
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger.out('{}{} keepalive{}'.format(self.logger.fmt_purple, self.name, self.logger.fmt_end), state='t')
|
|
|
|
self.logger.out(
|
|
|
|
'{bold}Domains:{nobold} {domcount} '
|
|
|
|
'{bold}Networks:{nobold} {netcount} '
|
2018-10-14 03:08:11 -04:00
|
|
|
'{bold}VM memory [MiB]:{nobold} {allocmem} '
|
2018-10-14 02:01:35 -04:00
|
|
|
'{bold}Free memory [MiB]:{nobold} {freemem} '
|
|
|
|
'{bold}Used memory [MiB]:{nobold} {usedmem} '
|
|
|
|
'{bold}Load:{nobold} {load}'.format(
|
|
|
|
bold=self.logger.fmt_bold,
|
|
|
|
nobold=self.logger.fmt_end,
|
|
|
|
domcount=self.domains_count,
|
|
|
|
freemem=self.memfree,
|
|
|
|
usedmem=self.memused,
|
|
|
|
load=self.cpuload,
|
|
|
|
allocmem=self.memalloc,
|
|
|
|
netcount=self.networks_count
|
|
|
|
),
|
|
|
|
)
|
2018-06-06 14:53:28 -04:00
|
|
|
|
2018-06-06 14:16:52 -04:00
|
|
|
# Update our local node lists
|
2018-10-14 02:01:35 -04:00
|
|
|
for node_name in self.d_node:
|
2018-06-01 01:00:55 -04:00
|
|
|
try:
|
2018-06-26 21:52:22 -04:00
|
|
|
node_daemon_state = zkhandler.readdata(self.zk_conn, '/nodes/{}/daemonstate'.format(node_name))
|
|
|
|
node_domain_state = zkhandler.readdata(self.zk_conn, '/nodes/{}/domainstate'.format(node_name))
|
|
|
|
node_keepalive = int(zkhandler.readdata(self.zk_conn, '/nodes/{}/keepalive'.format(node_name)))
|
2018-06-01 01:00:55 -04:00
|
|
|
except:
|
2018-06-11 19:04:59 -04:00
|
|
|
node_daemon_state = 'unknown'
|
|
|
|
node_domain_state = 'unknown'
|
2018-06-06 15:41:06 -04:00
|
|
|
node_keepalive = 0
|
|
|
|
|
2018-06-06 18:18:58 -04:00
|
|
|
# Handle deadtime and fencng if needed
|
2018-06-08 12:44:47 -04:00
|
|
|
# (A node is considered dead when its keepalive timer is >6*keepalive_interval seconds
|
|
|
|
# out-of-date while in 'start' state)
|
2018-06-28 12:04:05 -04:00
|
|
|
node_deadtime = int(time.time()) - ( int(self.config['keepalive_interval']) * int(self.config['fence_intervals']) )
|
2018-06-12 21:56:34 -04:00
|
|
|
if node_keepalive < node_deadtime and node_daemon_state == 'run':
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger.out('Node {} seems dead - starting monitor for fencing'.format(node_name), state='w')
|
2018-06-26 23:24:33 -04:00
|
|
|
zkhandler.writedata(self.zk_conn, { '/nodes/{}/daemonstate'.format(node_name): 'dead' })
|
2018-10-14 02:01:35 -04:00
|
|
|
fence_thread = threading.Thread(target=fenceNode, args=(node_name, self.zk_conn, self.config, self.logger), kwargs={})
|
2018-06-06 15:41:06 -04:00
|
|
|
fence_thread.start()
|
|
|
|
|
|
|
|
# Update the arrays
|
2018-06-12 21:56:34 -04:00
|
|
|
if node_daemon_state == 'run' and node_domain_state != 'flushed' and node_name not in self.active_node_list:
|
2018-06-06 14:16:52 -04:00
|
|
|
self.active_node_list.append(node_name)
|
2018-06-06 00:49:34 -04:00
|
|
|
try:
|
2018-06-06 14:16:52 -04:00
|
|
|
self.flushed_node_list.remove(node_name)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
try:
|
|
|
|
self.inactive_node_list.remove(node_name)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
2018-06-12 21:56:34 -04:00
|
|
|
if node_daemon_state != 'run' and node_domain_state != 'flushed' and node_name not in self.inactive_node_list:
|
2018-06-11 19:04:59 -04:00
|
|
|
self.inactive_node_list.append(node_name)
|
2018-06-06 14:16:52 -04:00
|
|
|
try:
|
|
|
|
self.active_node_list.remove(node_name)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
try:
|
2018-06-11 19:04:59 -04:00
|
|
|
self.flushed_node_list.remove(node_name)
|
2018-06-06 14:16:52 -04:00
|
|
|
except ValueError:
|
|
|
|
pass
|
2018-06-11 19:07:10 -04:00
|
|
|
if node_domain_state == 'flushed' and node_name not in self.flushed_node_list:
|
2018-06-11 19:04:59 -04:00
|
|
|
self.flushed_node_list.append(node_name)
|
2018-06-06 14:16:52 -04:00
|
|
|
try:
|
|
|
|
self.active_node_list.remove(node_name)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
try:
|
2018-06-11 19:04:59 -04:00
|
|
|
self.inactive_node_list.remove(node_name)
|
2018-06-06 14:16:52 -04:00
|
|
|
except ValueError:
|
|
|
|
pass
|
2018-10-14 02:01:35 -04:00
|
|
|
|
|
|
|
# List of the non-primary coordinators
|
|
|
|
secondary_node_list = self.config['coordinators'].split(',')
|
|
|
|
if secondary_node_list:
|
|
|
|
secondary_node_list.remove(self.primary_node)
|
|
|
|
for node in secondary_node_list:
|
|
|
|
if node in self.inactive_node_list:
|
|
|
|
secondary_node_list.remove(node)
|
|
|
|
|
2018-06-06 14:53:28 -04:00
|
|
|
# Display cluster information to the terminal
|
2018-10-14 02:01:35 -04:00
|
|
|
self.logger.out('{}Cluster status{}'.format(self.logger.fmt_purple, self.logger.fmt_end), state='t')
|
|
|
|
self.logger.out('{}Primary coordinator:{} {}'.format(self.logger.fmt_bold, self.logger.fmt_end, self.primary_node))
|
|
|
|
self.logger.out('{}Secondary coordinators:{} {}'.format(self.logger.fmt_bold, self.logger.fmt_end, ' '.join(secondary_node_list)))
|
|
|
|
self.logger.out('{}Active hypervisors:{} {}'.format(self.logger.fmt_bold, self.logger.fmt_end, ' '.join(self.active_node_list)))
|
|
|
|
self.logger.out('{}Flushed hypervisors:{} {}'.format(self.logger.fmt_bold, self.logger.fmt_end, ' '.join(self.flushed_node_list)))
|
|
|
|
self.logger.out('{}Inactive nodes:{} {}'.format(self.logger.fmt_bold, self.logger.fmt_end, ' '.join(self.inactive_node_list)))
|
2018-06-14 12:00:48 -04:00
|
|
|
|
2018-07-18 12:04:35 -04:00
|
|
|
#
|
|
|
|
# Find a migration target
|
|
|
|
#
|
2018-07-18 02:30:50 -04:00
|
|
|
def findTargetHypervisor(zk_conn, search_field, dom_uuid):
|
2018-07-18 01:53:30 -04:00
|
|
|
if search_field == 'mem':
|
2018-07-18 02:30:50 -04:00
|
|
|
return findTargetHypervisorMem(zk_conn, dom_uuid)
|
2018-07-18 12:04:35 -04:00
|
|
|
if search_field == 'load':
|
|
|
|
return findTargetHypervisorLoad(zk_conn, dom_uuid)
|
|
|
|
if search_field == 'vcpus':
|
|
|
|
return findTargetHypervisorVCPUs(zk_conn, dom_uuid)
|
|
|
|
if search_field == 'vms':
|
|
|
|
return findTargetHypervisorVMs(zk_conn, dom_uuid)
|
2018-07-18 01:53:30 -04:00
|
|
|
return None
|
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
# Get the list of valid target nodes
|
2018-07-18 12:04:35 -04:00
|
|
|
def getHypervisors(zk_conn, dom_uuid):
|
2018-10-14 02:01:35 -04:00
|
|
|
valid_node_list = []
|
|
|
|
full_node_list = zkhandler.listchildren(zk_conn, '/nodes')
|
|
|
|
current_node = zkhandler.readdata(zk_conn, '/domains/{}/node'.format(dom_uuid))
|
2018-07-18 01:53:30 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
for node in full_node_list:
|
|
|
|
daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node))
|
|
|
|
domain_state = zkhandler.readdata(zk_conn, '/nodes/{}/domainstate'.format(node))
|
2018-07-18 01:53:30 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
if node == current_node:
|
2018-07-18 01:53:30 -04:00
|
|
|
continue
|
|
|
|
|
|
|
|
if daemon_state != 'run' or domain_state != 'ready':
|
|
|
|
continue
|
2018-07-18 12:04:35 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
valid_node_list.append(node)
|
2018-07-18 12:04:35 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
return valid_node_list
|
2018-07-18 01:53:30 -04:00
|
|
|
|
2018-07-18 12:04:35 -04:00
|
|
|
# via free memory (relative to allocated memory)
|
|
|
|
def findTargetHypervisorMem(zk_conn, dom_uuid):
|
|
|
|
most_allocfree = 0
|
2018-10-14 02:01:35 -04:00
|
|
|
target_node = None
|
2018-07-18 12:04:35 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
node_list = getHypervisors(zk_conn, dom_uuid)
|
|
|
|
for node in node_list:
|
|
|
|
memalloc = int(zkhandler.readdata(zk_conn, '/nodes/{}/memalloc'.format(node)))
|
|
|
|
memused = int(zkhandler.readdata(zk_conn, '/nodes/{}/memused'.format(node)))
|
|
|
|
memfree = int(zkhandler.readdata(zk_conn, '/nodes/{}/memfree'.format(node)))
|
2018-07-18 01:53:30 -04:00
|
|
|
memtotal = memused + memfree
|
|
|
|
allocfree = memtotal - memalloc
|
|
|
|
|
|
|
|
if allocfree > most_allocfree:
|
|
|
|
most_allocfree = allocfree
|
2018-10-14 02:01:35 -04:00
|
|
|
target_node = node
|
2018-07-18 01:53:30 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
return target_node
|
2018-07-18 01:53:30 -04:00
|
|
|
|
2018-07-18 12:04:35 -04:00
|
|
|
# via load average
|
|
|
|
def findTargetHypervisorLoad(zk_conn, dom_uuid):
|
|
|
|
least_load = 9999
|
2018-10-14 02:01:35 -04:00
|
|
|
target_node = None
|
2018-07-18 12:04:35 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
node_list = getHypervisors(zk_conn, dom_uuid)
|
|
|
|
for node in node_list:
|
|
|
|
load = int(zkhandler.readdata(zk_conn, '/nodes/{}/load'.format(node)))
|
2018-07-18 12:04:35 -04:00
|
|
|
|
|
|
|
if load < least_load:
|
|
|
|
least_load = load
|
2018-10-14 02:01:35 -04:00
|
|
|
target_hypevisor = node
|
2018-07-18 12:04:35 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
return target_node
|
2018-07-18 12:04:35 -04:00
|
|
|
|
|
|
|
# via total vCPUs
|
|
|
|
def findTargetHypervisorVCPUs(zk_conn, dom_uuid):
|
|
|
|
least_vcpus = 9999
|
2018-10-14 02:01:35 -04:00
|
|
|
target_node = None
|
2018-07-18 12:04:35 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
node_list = getHypervisors(zk_conn, dom_uuid)
|
|
|
|
for node in node_list:
|
|
|
|
vcpus = int(zkhandler.readdata(zk_conn, '/nodes/{}/vcpualloc'.format(node)))
|
2018-07-18 12:09:07 -04:00
|
|
|
|
|
|
|
if vcpus < least_vcpus:
|
|
|
|
least_vcpus = vcpus
|
2018-10-14 02:01:35 -04:00
|
|
|
target_node = node
|
2018-07-18 12:04:35 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
return target_node
|
2018-07-18 12:04:35 -04:00
|
|
|
|
|
|
|
# via total VMs
|
|
|
|
def findTargetHypervisorVMs(zk_conn, dom_uuid):
|
|
|
|
least_vms = 9999
|
2018-10-14 02:01:35 -04:00
|
|
|
target_node = None
|
2018-07-18 12:04:35 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
node_list = getHypervisors(zk_conn, dom_uuid)
|
|
|
|
for node in node_list:
|
|
|
|
vms = int(zkhandler.readdata(zk_conn, '/nodes/{}/domainscount'.format(node)))
|
2018-07-18 12:04:35 -04:00
|
|
|
|
|
|
|
if vms < least_vms:
|
|
|
|
least_vms = vms
|
2018-10-14 02:01:35 -04:00
|
|
|
target_node = node
|
2018-07-18 12:04:35 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
return target_node
|
2018-07-18 12:04:35 -04:00
|
|
|
|
|
|
|
|
2018-06-14 12:00:48 -04:00
|
|
|
#
|
|
|
|
# Fence thread entry function
|
|
|
|
#
|
2018-10-14 02:01:35 -04:00
|
|
|
def fenceNode(node_name, zk_conn, config, logger):
|
2018-06-14 12:00:48 -04:00
|
|
|
failcount = 0
|
2018-06-28 12:13:08 -04:00
|
|
|
# We allow exactly 3 saving throws for the host to come back online
|
2018-06-14 12:00:48 -04:00
|
|
|
while failcount < 3:
|
|
|
|
# Wait 5 seconds
|
|
|
|
time.sleep(5)
|
|
|
|
# Get the state
|
2018-06-26 21:52:22 -04:00
|
|
|
node_daemon_state = zkhandler.readdata(zk_conn, '/nodes/{}/daemonstate'.format(node_name))
|
2018-06-14 12:00:48 -04:00
|
|
|
# Is it still 'dead'
|
|
|
|
if node_daemon_state == 'dead':
|
|
|
|
failcount += 1
|
2018-10-14 02:01:35 -04:00
|
|
|
logger.out('Node "{}" failed {} saving throws'.format(node_name, failcount), state='w')
|
2018-06-14 12:00:48 -04:00
|
|
|
# It changed back to something else so it must be alive
|
|
|
|
else:
|
2018-10-14 02:01:35 -04:00
|
|
|
logger.out('Node "{}" passed a saving throw; canceling fence'.format(node_name), state='o')
|
2018-06-14 12:00:48 -04:00
|
|
|
return
|
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
logger.out('Fencing node "{}" via IPMI reboot signal'.format(node_name), state='e')
|
2018-06-14 12:00:48 -04:00
|
|
|
|
2018-06-28 12:13:08 -04:00
|
|
|
# Get IPMI information
|
2018-06-26 21:52:22 -04:00
|
|
|
ipmi_hostname = zkhandler.readdata(zk_conn, '/nodes/{}/ipmihostname'.format(node_name))
|
|
|
|
ipmi_username = zkhandler.readdata(zk_conn, '/nodes/{}/ipmiusername'.format(node_name))
|
|
|
|
ipmi_password = zkhandler.readdata(zk_conn, '/nodes/{}/ipmipassword'.format(node_name))
|
2018-06-14 12:00:48 -04:00
|
|
|
|
2018-06-28 12:13:08 -04:00
|
|
|
# Shoot it in the head
|
2018-10-14 02:01:35 -04:00
|
|
|
fence_status = rebootViaIPMI(ipmi_hostname, ipmi_username, ipmi_password, logger)
|
2018-06-28 12:13:08 -04:00
|
|
|
# Hold to ensure the fence takes effect
|
|
|
|
time.sleep(3)
|
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
# Force into secondary network state if needed
|
|
|
|
if node_name in config['coordinators'].split(','):
|
|
|
|
zkhandler.writedata(zk_conn, { '/nodes/{}/routerstate'.format(node_name): 'secondary' })
|
|
|
|
if zkhandler.readdata(zk_conn, '/primary_node') == node_name:
|
|
|
|
zkhandler.writedata(zk_conn, { '/primary_node': 'none' })
|
|
|
|
|
2018-06-28 12:13:08 -04:00
|
|
|
# If the fence succeeded and successful_fence is migrate
|
|
|
|
if fence_status == True and config['successful_fence'] == 'migrate':
|
2018-10-14 02:01:35 -04:00
|
|
|
migrateFromFencedNode(zk_conn, node_name, logger)
|
2018-06-28 12:13:08 -04:00
|
|
|
# If the fence failed and failed_fence is migrate
|
|
|
|
if fence_status == False and config['failed_fence'] == 'migrate' and config['suicide_intervals'] != '0':
|
2018-10-14 02:01:35 -04:00
|
|
|
migrateFromFencedNode(zk_conn, node_name, logger)
|
2018-06-28 12:13:08 -04:00
|
|
|
|
|
|
|
# Migrate hosts away from a fenced node
|
2018-10-14 02:01:35 -04:00
|
|
|
def migrateFromFencedNode(zk_conn, node_name, logger):
|
|
|
|
logger.out('Moving VMs from dead node "{}" to new hosts'.format(node_name), state='i')
|
2018-06-26 21:52:22 -04:00
|
|
|
dead_node_running_domains = zkhandler.readdata(zk_conn, '/nodes/{}/runningdomains'.format(node_name)).split()
|
2018-06-14 12:00:48 -04:00
|
|
|
for dom_uuid in dead_node_running_domains:
|
2018-10-14 02:01:35 -04:00
|
|
|
target_node = findTargetHypervisor(zk_conn, 'mem', dom_uuid)
|
2018-06-14 12:00:48 -04:00
|
|
|
|
2018-10-14 02:01:35 -04:00
|
|
|
logger.out('Moving VM "{}" to node "{}"'.format(dom_uuid, target_node), state='i')
|
2018-06-28 12:13:08 -04:00
|
|
|
zkhandler.writedata(zk_conn, {
|
2018-06-26 23:24:33 -04:00
|
|
|
'/domains/{}/state'.format(dom_uuid): 'start',
|
2018-10-14 02:01:35 -04:00
|
|
|
'/domains/{}/node'.format(dom_uuid): target_node,
|
|
|
|
'/domains/{}/lastnode'.format(dom_uuid): node_name
|
2018-06-26 23:24:33 -04:00
|
|
|
})
|
2018-06-14 12:00:48 -04:00
|
|
|
|
2018-06-17 02:08:40 -04:00
|
|
|
# Set node in flushed state for easy remigrating when it comes back
|
2018-06-28 12:13:08 -04:00
|
|
|
zkhandler.writedata(zk_conn, { '/nodes/{}/domainstate'.format(node_name): 'flushed' })
|
2018-06-17 02:08:40 -04:00
|
|
|
|
2018-06-14 12:00:48 -04:00
|
|
|
#
|
|
|
|
# Perform an IPMI fence
|
|
|
|
#
|
2018-10-14 02:01:35 -04:00
|
|
|
def rebootViaIPMI(ipmi_hostname, ipmi_user, ipmi_password, logger):
|
2018-07-17 01:39:11 -04:00
|
|
|
ipmi_command = ['/usr/bin/ipmitool', '-I', 'lanplus', '-H', ipmi_hostname, '-U', ipmi_user, '-P', ipmi_password, 'chassis', 'power', 'reset']
|
2018-06-14 12:00:48 -04:00
|
|
|
ipmi_command_output = subprocess.run(ipmi_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
2018-07-17 02:39:03 -04:00
|
|
|
if ipmi_command_output.returncode == 0:
|
2018-10-14 02:01:35 -04:00
|
|
|
logger.out('Successfully rebooted dead node', state='o')
|
2018-06-28 12:13:08 -04:00
|
|
|
return True
|
2018-06-14 12:00:48 -04:00
|
|
|
else:
|
2018-10-14 02:01:35 -04:00
|
|
|
logger.out('Failed to reboot dead node', state='e')
|
2018-06-28 12:13:08 -04:00
|
|
|
return False
|