pvc/node-daemon/pvcnoded/fencing.py

175 lines
6.9 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
# fencing.py - PVC daemon function library, node fencing functions
# Part of the Parallel Virtual Cluster (PVC) system
#
2021-03-25 17:01:55 -04:00
# Copyright (C) 2018-2021 Joshua M. Boniface <joshua@boniface.me>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###############################################################################
import time
2021-06-01 12:17:25 -04:00
import daemon_lib.common as common
import pvcnoded.VMInstance as VMInstance
#
# Fence thread entry function
#
2021-06-01 11:53:21 -04:00
def fenceNode(node_name, zkhandler, config, logger):
# We allow exactly 6 saving throws (30 seconds) for the host to come back online or we kill it
failcount_limit = 6
failcount = 0
while failcount < failcount_limit:
# Wait 5 seconds
time.sleep(config['keepalive_interval'])
# Get the state
2021-06-01 11:53:21 -04:00
node_daemon_state = zkhandler.read('/nodes/{}/daemonstate'.format(node_name))
# Is it still 'dead'
if node_daemon_state == 'dead':
failcount += 1
logger.out('Node "{}" failed {}/{} saving throws'.format(node_name, failcount, failcount_limit), state='w')
# It changed back to something else so it must be alive
else:
logger.out('Node "{}" passed a saving throw; canceling fence'.format(node_name), state='o')
return
logger.out('Fencing node "{}" via IPMI reboot signal'.format(node_name), state='w')
# Get IPMI information
2021-06-01 11:53:21 -04:00
ipmi_hostname = zkhandler.read('/nodes/{}/ipmihostname'.format(node_name))
ipmi_username = zkhandler.read('/nodes/{}/ipmiusername'.format(node_name))
ipmi_password = zkhandler.read('/nodes/{}/ipmipassword'.format(node_name))
# Shoot it in the head
fence_status = rebootViaIPMI(ipmi_hostname, ipmi_username, ipmi_password, logger)
# Hold to ensure the fence takes effect and system stabilizes
time.sleep(config['keepalive_interval'] * 2)
# Force into secondary network state if needed
2019-03-13 19:26:08 -04:00
if node_name in config['coordinators']:
2020-08-11 12:40:35 -04:00
logger.out('Forcing secondary status for node "{}"'.format(node_name), state='i')
2021-06-01 11:53:21 -04:00
zkhandler.write([
('/nodes/{}/routerstate'.format(node_name), 'secondary')
])
if zkhandler.read('/config/primary_node') == node_name:
zkhandler.write([
('/config/primary_node', 'none')
])
# If the fence succeeded and successful_fence is migrate
2020-08-05 21:57:36 -04:00
if fence_status and config['successful_fence'] == 'migrate':
2021-06-01 11:53:21 -04:00
migrateFromFencedNode(zkhandler, node_name, config, logger)
# If the fence failed and failed_fence is migrate
2020-08-05 21:57:36 -04:00
if not fence_status and config['failed_fence'] == 'migrate' and config['suicide_intervals'] != '0':
2021-06-01 11:53:21 -04:00
migrateFromFencedNode(zkhandler, node_name, config, logger)
# Migrate hosts away from a fenced node
2021-06-01 11:53:21 -04:00
def migrateFromFencedNode(zkhandler, node_name, config, logger):
logger.out('Migrating VMs from dead node "{}" to new hosts'.format(node_name), state='i')
# Get the list of VMs
2021-06-01 11:53:21 -04:00
dead_node_running_domains = zkhandler.read('/nodes/{}/runningdomains'.format(node_name)).split()
# Set the node to a custom domainstate so we know what's happening
2021-06-01 11:53:21 -04:00
zkhandler.write([
('/nodes/{}/domainstate'.format(node_name), 'fence-flush')
])
# Migrate a VM after a flush
def fence_migrate_vm(dom_uuid):
2021-06-01 11:53:21 -04:00
VMInstance.flush_locks(zkhandler, logger, dom_uuid)
2019-07-09 19:17:53 -04:00
2021-06-01 11:53:21 -04:00
target_node = common.findTargetNode(zkhandler, config, logger, dom_uuid)
2018-11-23 20:02:31 -05:00
if target_node is not None:
logger.out('Migrating VM "{}" to node "{}"'.format(dom_uuid, target_node), state='i')
2021-06-01 11:53:21 -04:00
zkhandler.write([
('/domains/{}/state'.format(dom_uuid), 'start'),
('/domains/{}/node'.format(dom_uuid), target_node),
('/domains/{}/lastnode'.format(dom_uuid), node_name)
])
else:
logger.out('No target node found for VM "{}"; VM will autostart on next unflush/ready of current node'.format(dom_uuid), state='i')
2021-06-01 11:53:21 -04:00
zkhandler.write({
('/domains/{}/state'.format(dom_uuid), 'stopped'),
('/domains/{}/node_autostart'.format(dom_uuid), 'True')
2019-10-12 01:36:50 -04:00
})
# Loop through the VMs
for dom_uuid in dead_node_running_domains:
fence_migrate_vm(dom_uuid)
# Set node in flushed state for easy remigrating when it comes back
2021-06-01 11:53:21 -04:00
zkhandler.write([
('/nodes/{}/domainstate'.format(node_name), 'flushed')
])
#
# Perform an IPMI fence
#
def rebootViaIPMI(ipmi_hostname, ipmi_user, ipmi_password, logger):
2018-11-23 20:02:31 -05:00
# Forcibly reboot the node
ipmi_command_reset = '/usr/bin/ipmitool -I lanplus -H {} -U {} -P {} chassis power reset'.format(
ipmi_hostname, ipmi_user, ipmi_password
)
2018-11-23 20:02:31 -05:00
ipmi_reset_retcode, ipmi_reset_stdout, ipmi_reset_stderr = common.run_os_command(ipmi_command_reset)
if ipmi_reset_retcode != 0:
logger.out('Failed to reboot dead node', state='e')
print(ipmi_reset_stderr)
return False
time.sleep(2)
2018-11-23 20:02:31 -05:00
# Ensure the node is powered on
ipmi_command_status = '/usr/bin/ipmitool -I lanplus -H {} -U {} -P {} chassis power status'.format(
ipmi_hostname, ipmi_user, ipmi_password
)
ipmi_status_retcode, ipmi_status_stdout, ipmi_status_stderr = common.run_os_command(ipmi_command_status)
# Trigger a power start if needed
if ipmi_status_stdout != "Chassis Power is on":
ipmi_command_start = '/usr/bin/ipmitool -I lanplus -H {} -U {} -P {} chassis power on'.format(
2018-11-23 20:02:31 -05:00
ipmi_hostname, ipmi_user, ipmi_password
)
ipmi_start_retcode, ipmi_start_stdout, ipmi_start_stderr = common.run_os_command(ipmi_command_start)
if ipmi_start_retcode != 0:
logger.out('Failed to start powered-off dead node', state='e')
print(ipmi_reset_stderr)
return False
# Declare success
logger.out('Successfully rebooted dead node', state='o')
return True
#
# Verify that IPMI connectivity to this host exists (used during node init)
#
def verifyIPMI(ipmi_hostname, ipmi_user, ipmi_password):
ipmi_command_status = '/usr/bin/ipmitool -I lanplus -H {} -U {} -P {} chassis power status'.format(
ipmi_hostname, ipmi_user, ipmi_password
)
ipmi_status_retcode, ipmi_status_stdout, ipmi_status_stderr = common.run_os_command(ipmi_command_status, timeout=2)
if ipmi_status_retcode == 0 and ipmi_status_stdout != "Chassis Power is on":
return True
else:
return False