Remove old CLI code
This commit is contained in:
parent
1306054a98
commit
198d083ea6
|
@ -1,33 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# pvc.py - PVC client command-line interface (stub testing interface)
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import pvc.pvc
|
||||
|
||||
|
||||
#
|
||||
# Main entry point
|
||||
#
|
||||
def main():
|
||||
return pvc.pvc.cli(obj={})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,97 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# ansiprint.py - Printing function for formatted messages
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import datetime
|
||||
|
||||
|
||||
# ANSII colours for output
|
||||
def red():
|
||||
return "\033[91m"
|
||||
|
||||
|
||||
def blue():
|
||||
return "\033[94m"
|
||||
|
||||
|
||||
def cyan():
|
||||
return "\033[96m"
|
||||
|
||||
|
||||
def green():
|
||||
return "\033[92m"
|
||||
|
||||
|
||||
def yellow():
|
||||
return "\033[93m"
|
||||
|
||||
|
||||
def purple():
|
||||
return "\033[95m"
|
||||
|
||||
|
||||
def bold():
|
||||
return "\033[1m"
|
||||
|
||||
|
||||
def end():
|
||||
return "\033[0m"
|
||||
|
||||
|
||||
# Print function
|
||||
def echo(message, prefix, state):
|
||||
# Get the date
|
||||
date = "{} - ".format(datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S.%f"))
|
||||
endc = end()
|
||||
|
||||
# Continuation
|
||||
if state == "c":
|
||||
date = ""
|
||||
colour = ""
|
||||
prompt = " "
|
||||
# OK
|
||||
elif state == "o":
|
||||
colour = green()
|
||||
prompt = ">>> "
|
||||
# Error
|
||||
elif state == "e":
|
||||
colour = red()
|
||||
prompt = ">>> "
|
||||
# Warning
|
||||
elif state == "w":
|
||||
colour = yellow()
|
||||
prompt = ">>> "
|
||||
# Tick
|
||||
elif state == "t":
|
||||
colour = purple()
|
||||
prompt = ">>> "
|
||||
# Information
|
||||
elif state == "i":
|
||||
colour = blue()
|
||||
prompt = ">>> "
|
||||
else:
|
||||
colour = bold()
|
||||
prompt = ">>> "
|
||||
|
||||
# Append space to prefix
|
||||
if prefix != "":
|
||||
prefix = prefix + " "
|
||||
|
||||
print(colour + prompt + endc + date + prefix + message)
|
File diff suppressed because it is too large
Load Diff
|
@ -1,313 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# cluster.py - PVC CLI client function library, cluster management
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import json
|
||||
|
||||
import pvc.lib.ansiprint as ansiprint
|
||||
from pvc.lib.common import call_api
|
||||
|
||||
|
||||
def initialize(config, overwrite=False):
|
||||
"""
|
||||
Initialize the PVC cluster
|
||||
|
||||
API endpoint: GET /api/v1/initialize
|
||||
API arguments: overwrite, yes-i-really-mean-it
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
params = {"yes-i-really-mean-it": "yes", "overwrite": overwrite}
|
||||
response = call_api(config, "post", "/initialize", params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def backup(config):
|
||||
"""
|
||||
Get a JSON backup of the cluster
|
||||
|
||||
API endpoint: GET /api/v1/backup
|
||||
API arguments:
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
response = call_api(config, "get", "/backup")
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
|
||||
def restore(config, cluster_data):
|
||||
"""
|
||||
Restore a JSON backup to the cluster
|
||||
|
||||
API endpoint: POST /api/v1/restore
|
||||
API arguments: yes-i-really-mean-it
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
cluster_data_json = json.dumps(cluster_data)
|
||||
|
||||
params = {"yes-i-really-mean-it": "yes"}
|
||||
data = {"cluster_data": cluster_data_json}
|
||||
response = call_api(config, "post", "/restore", params=params, data=data)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def maintenance_mode(config, state):
|
||||
"""
|
||||
Enable or disable PVC cluster maintenance mode
|
||||
|
||||
API endpoint: POST /api/v1/status
|
||||
API arguments: {state}={state}
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
params = {"state": state}
|
||||
response = call_api(config, "post", "/status", params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def get_info(config):
|
||||
"""
|
||||
Get status of the PVC cluster
|
||||
|
||||
API endpoint: GET /api/v1/status
|
||||
API arguments:
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
response = call_api(config, "get", "/status")
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
|
||||
def format_info(cluster_information, oformat):
|
||||
if oformat == "json":
|
||||
return json.dumps(cluster_information)
|
||||
|
||||
if oformat == "json-pretty":
|
||||
return json.dumps(cluster_information, indent=4)
|
||||
|
||||
# Plain formatting, i.e. human-readable
|
||||
if (
|
||||
cluster_information.get("maintenance") == "true"
|
||||
or cluster_information.get("cluster_health", {}).get("health", "N/A") == "N/A"
|
||||
):
|
||||
health_colour = ansiprint.blue()
|
||||
elif cluster_information.get("cluster_health", {}).get("health", 100) > 90:
|
||||
health_colour = ansiprint.green()
|
||||
elif cluster_information.get("cluster_health", {}).get("health", 100) > 50:
|
||||
health_colour = ansiprint.yellow()
|
||||
else:
|
||||
health_colour = ansiprint.red()
|
||||
|
||||
ainformation = []
|
||||
|
||||
ainformation.append(
|
||||
"{}PVC cluster status:{}".format(ansiprint.bold(), ansiprint.end())
|
||||
)
|
||||
ainformation.append("")
|
||||
|
||||
health_text = (
|
||||
f"{cluster_information.get('cluster_health', {}).get('health', 'N/A')}"
|
||||
)
|
||||
if health_text != "N/A":
|
||||
health_text += "%"
|
||||
if cluster_information.get("maintenance") == "true":
|
||||
health_text += " (maintenance on)"
|
||||
|
||||
ainformation.append(
|
||||
"{}Cluster health:{} {}{}{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
health_colour,
|
||||
health_text,
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
if cluster_information.get("cluster_health", {}).get("messages"):
|
||||
health_messages = "\n > ".join(
|
||||
sorted(cluster_information["cluster_health"]["messages"])
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Health messages:{} > {}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
health_messages,
|
||||
)
|
||||
)
|
||||
else:
|
||||
ainformation.append(
|
||||
"{}Health messages:{} N/A".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
|
||||
if oformat == "short":
|
||||
return "\n".join(ainformation)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(
|
||||
"{}Primary node:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["primary_node"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}PVC version:{} {}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information.get("pvc_version", "N/A"),
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Cluster upstream IP:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["upstream_ip"]
|
||||
)
|
||||
)
|
||||
ainformation.append("")
|
||||
ainformation.append(
|
||||
"{}Total nodes:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["nodes"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total VMs:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["vms"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total networks:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["networks"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total OSDs:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["osds"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total pools:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["pools"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total volumes:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["volumes"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total snapshots:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["snapshots"]
|
||||
)
|
||||
)
|
||||
|
||||
nodes_string = "{}Nodes:{} {}/{} {}ready,run{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information["nodes"].get("run,ready", 0),
|
||||
cluster_information["nodes"].get("total", 0),
|
||||
ansiprint.green(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
for state, count in cluster_information["nodes"].items():
|
||||
if state == "total" or state == "run,ready":
|
||||
continue
|
||||
|
||||
nodes_string += " {}/{} {}{}{}".format(
|
||||
count,
|
||||
cluster_information["nodes"]["total"],
|
||||
ansiprint.yellow(),
|
||||
state,
|
||||
ansiprint.end(),
|
||||
)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(nodes_string)
|
||||
|
||||
vms_string = "{}VMs:{} {}/{} {}start{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information["vms"].get("start", 0),
|
||||
cluster_information["vms"].get("total", 0),
|
||||
ansiprint.green(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
for state, count in cluster_information["vms"].items():
|
||||
if state == "total" or state == "start":
|
||||
continue
|
||||
|
||||
if state in ["disable", "migrate", "unmigrate", "provision"]:
|
||||
colour = ansiprint.blue()
|
||||
else:
|
||||
colour = ansiprint.yellow()
|
||||
|
||||
vms_string += " {}/{} {}{}{}".format(
|
||||
count, cluster_information["vms"]["total"], colour, state, ansiprint.end()
|
||||
)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(vms_string)
|
||||
|
||||
if cluster_information["osds"]["total"] > 0:
|
||||
osds_string = "{}Ceph OSDs:{} {}/{} {}up,in{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information["osds"].get("up,in", 0),
|
||||
cluster_information["osds"].get("total", 0),
|
||||
ansiprint.green(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
for state, count in cluster_information["osds"].items():
|
||||
if state == "total" or state == "up,in":
|
||||
continue
|
||||
|
||||
osds_string += " {}/{} {}{}{}".format(
|
||||
count,
|
||||
cluster_information["osds"]["total"],
|
||||
ansiprint.yellow(),
|
||||
state,
|
||||
ansiprint.end(),
|
||||
)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(osds_string)
|
||||
|
||||
ainformation.append("")
|
||||
return "\n".join(ainformation)
|
|
@ -1,201 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# common.py - PVC CLI client function library, Common functions
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import os
|
||||
import math
|
||||
import time
|
||||
import requests
|
||||
import click
|
||||
from urllib3 import disable_warnings
|
||||
|
||||
|
||||
def format_bytes(size_bytes):
|
||||
byte_unit_matrix = {
|
||||
"B": 1,
|
||||
"K": 1024,
|
||||
"M": 1024 * 1024,
|
||||
"G": 1024 * 1024 * 1024,
|
||||
"T": 1024 * 1024 * 1024 * 1024,
|
||||
"P": 1024 * 1024 * 1024 * 1024 * 1024,
|
||||
}
|
||||
human_bytes = "0B"
|
||||
for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get):
|
||||
formatted_bytes = int(math.ceil(size_bytes / byte_unit_matrix[unit]))
|
||||
if formatted_bytes < 10000:
|
||||
human_bytes = "{}{}".format(formatted_bytes, unit)
|
||||
break
|
||||
return human_bytes
|
||||
|
||||
|
||||
def format_metric(integer):
|
||||
integer_unit_matrix = {
|
||||
"": 1,
|
||||
"K": 1000,
|
||||
"M": 1000 * 1000,
|
||||
"B": 1000 * 1000 * 1000,
|
||||
"T": 1000 * 1000 * 1000 * 1000,
|
||||
"Q": 1000 * 1000 * 1000 * 1000 * 1000,
|
||||
}
|
||||
human_integer = "0"
|
||||
for unit in sorted(integer_unit_matrix, key=integer_unit_matrix.get):
|
||||
formatted_integer = int(math.ceil(integer / integer_unit_matrix[unit]))
|
||||
if formatted_integer < 10000:
|
||||
human_integer = "{}{}".format(formatted_integer, unit)
|
||||
break
|
||||
return human_integer
|
||||
|
||||
|
||||
class UploadProgressBar(object):
|
||||
def __init__(self, filename, end_message="", end_nl=True):
|
||||
file_size = os.path.getsize(filename)
|
||||
file_size_human = format_bytes(file_size)
|
||||
click.echo("Uploading file (total size {})...".format(file_size_human))
|
||||
|
||||
self.length = file_size
|
||||
self.time_last = int(round(time.time() * 1000)) - 1000
|
||||
self.bytes_last = 0
|
||||
self.bytes_diff = 0
|
||||
self.is_end = False
|
||||
|
||||
self.end_message = end_message
|
||||
self.end_nl = end_nl
|
||||
if not self.end_nl:
|
||||
self.end_suffix = " "
|
||||
else:
|
||||
self.end_suffix = ""
|
||||
|
||||
self.bar = click.progressbar(length=self.length, show_eta=True)
|
||||
|
||||
def update(self, monitor):
|
||||
bytes_cur = monitor.bytes_read
|
||||
self.bytes_diff += bytes_cur - self.bytes_last
|
||||
if self.bytes_last == bytes_cur:
|
||||
self.is_end = True
|
||||
self.bytes_last = bytes_cur
|
||||
|
||||
time_cur = int(round(time.time() * 1000))
|
||||
if (time_cur - 1000) > self.time_last:
|
||||
self.time_last = time_cur
|
||||
self.bar.update(self.bytes_diff)
|
||||
self.bytes_diff = 0
|
||||
|
||||
if self.is_end:
|
||||
self.bar.update(self.bytes_diff)
|
||||
self.bytes_diff = 0
|
||||
click.echo()
|
||||
click.echo()
|
||||
if self.end_message:
|
||||
click.echo(self.end_message + self.end_suffix, nl=self.end_nl)
|
||||
|
||||
|
||||
class ErrorResponse(requests.Response):
|
||||
def __init__(self, json_data, status_code):
|
||||
self.json_data = json_data
|
||||
self.status_code = status_code
|
||||
|
||||
def json(self):
|
||||
return self.json_data
|
||||
|
||||
|
||||
def call_api(
|
||||
config,
|
||||
operation,
|
||||
request_uri,
|
||||
headers={},
|
||||
params=None,
|
||||
data=None,
|
||||
files=None,
|
||||
):
|
||||
# Set the connect timeout to 2 seconds but extremely long (48 hour) data timeout
|
||||
timeout = (2.05, 172800)
|
||||
|
||||
# Craft the URI
|
||||
uri = "{}://{}{}{}".format(
|
||||
config["api_scheme"], config["api_host"], config["api_prefix"], request_uri
|
||||
)
|
||||
|
||||
# Craft the authentication header if required
|
||||
if config["api_key"]:
|
||||
headers["X-Api-Key"] = config["api_key"]
|
||||
|
||||
# Determine the request type and hit the API
|
||||
disable_warnings()
|
||||
try:
|
||||
if operation == "get":
|
||||
response = requests.get(
|
||||
uri,
|
||||
timeout=timeout,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=data,
|
||||
verify=config["verify_ssl"],
|
||||
)
|
||||
if operation == "post":
|
||||
response = requests.post(
|
||||
uri,
|
||||
timeout=timeout,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=data,
|
||||
files=files,
|
||||
verify=config["verify_ssl"],
|
||||
)
|
||||
if operation == "put":
|
||||
response = requests.put(
|
||||
uri,
|
||||
timeout=timeout,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=data,
|
||||
files=files,
|
||||
verify=config["verify_ssl"],
|
||||
)
|
||||
if operation == "patch":
|
||||
response = requests.patch(
|
||||
uri,
|
||||
timeout=timeout,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=data,
|
||||
verify=config["verify_ssl"],
|
||||
)
|
||||
if operation == "delete":
|
||||
response = requests.delete(
|
||||
uri,
|
||||
timeout=timeout,
|
||||
headers=headers,
|
||||
params=params,
|
||||
data=data,
|
||||
verify=config["verify_ssl"],
|
||||
)
|
||||
except Exception as e:
|
||||
message = "Failed to connect to the API: {}".format(e)
|
||||
response = ErrorResponse({"message": message}, 500)
|
||||
|
||||
# Display debug output
|
||||
if config["debug"]:
|
||||
click.echo("API endpoint: {}".format(uri), err=True)
|
||||
click.echo("Response code: {}".format(response.status_code), err=True)
|
||||
click.echo("Response headers: {}".format(response.headers), err=True)
|
||||
click.echo(err=True)
|
||||
|
||||
# Return the response object
|
||||
return response
|
File diff suppressed because it is too large
Load Diff
|
@ -1,709 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# node.py - PVC CLI client function library, node management
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import time
|
||||
|
||||
import pvc.lib.ansiprint as ansiprint
|
||||
from pvc.lib.common import call_api
|
||||
|
||||
|
||||
#
|
||||
# Primary functions
|
||||
#
|
||||
def node_coordinator_state(config, node, action):
|
||||
"""
|
||||
Set node coordinator state state (primary/secondary)
|
||||
|
||||
API endpoint: POST /api/v1/node/{node}/coordinator-state
|
||||
API arguments: action={action}
|
||||
API schema: {"message": "{data}"}
|
||||
"""
|
||||
params = {"state": action}
|
||||
response = call_api(
|
||||
config,
|
||||
"post",
|
||||
"/node/{node}/coordinator-state".format(node=node),
|
||||
params=params,
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def node_domain_state(config, node, action, wait):
|
||||
"""
|
||||
Set node domain state state (flush/ready)
|
||||
|
||||
API endpoint: POST /api/v1/node/{node}/domain-state
|
||||
API arguments: action={action}, wait={wait}
|
||||
API schema: {"message": "{data}"}
|
||||
"""
|
||||
params = {"state": action, "wait": str(wait).lower()}
|
||||
response = call_api(
|
||||
config, "post", "/node/{node}/domain-state".format(node=node), params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def view_node_log(config, node, lines=100):
|
||||
"""
|
||||
Return node log lines from the API (and display them in a pager in the main CLI)
|
||||
|
||||
API endpoint: GET /node/{node}/log
|
||||
API arguments: lines={lines}
|
||||
API schema: {"name":"{node}","data":"{node_log}"}
|
||||
"""
|
||||
params = {"lines": lines}
|
||||
response = call_api(
|
||||
config, "get", "/node/{node}/log".format(node=node), params=params
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
node_log = response.json()["data"]
|
||||
|
||||
# Shrink the log buffer to length lines
|
||||
shrunk_log = node_log.split("\n")[-lines:]
|
||||
loglines = "\n".join(shrunk_log)
|
||||
|
||||
return True, loglines
|
||||
|
||||
|
||||
def follow_node_log(config, node, lines=10):
|
||||
"""
|
||||
Return and follow node log lines from the API
|
||||
|
||||
API endpoint: GET /node/{node}/log
|
||||
API arguments: lines={lines}
|
||||
API schema: {"name":"{nodename}","data":"{node_log}"}
|
||||
"""
|
||||
# We always grab 200 to match the follow call, but only _show_ `lines` number
|
||||
params = {"lines": 200}
|
||||
response = call_api(
|
||||
config, "get", "/node/{node}/log".format(node=node), params=params
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
# Shrink the log buffer to length lines
|
||||
node_log = response.json()["data"]
|
||||
shrunk_log = node_log.split("\n")[-int(lines) :]
|
||||
loglines = "\n".join(shrunk_log)
|
||||
|
||||
# Print the initial data and begin following
|
||||
print(loglines, end="")
|
||||
print("\n", end="")
|
||||
|
||||
while True:
|
||||
# Grab the next line set (200 is a reasonable number of lines per half-second; any more are skipped)
|
||||
try:
|
||||
params = {"lines": 200}
|
||||
response = call_api(
|
||||
config, "get", "/node/{node}/log".format(node=node), params=params
|
||||
)
|
||||
new_node_log = response.json()["data"]
|
||||
except Exception:
|
||||
break
|
||||
# Split the new and old log strings into constitutent lines
|
||||
old_node_loglines = node_log.split("\n")
|
||||
new_node_loglines = new_node_log.split("\n")
|
||||
|
||||
# Set the node log to the new log value for the next iteration
|
||||
node_log = new_node_log
|
||||
|
||||
# Get the difference between the two sets of lines
|
||||
old_node_loglines_set = set(old_node_loglines)
|
||||
diff_node_loglines = [
|
||||
x for x in new_node_loglines if x not in old_node_loglines_set
|
||||
]
|
||||
|
||||
# If there's a difference, print it out
|
||||
if len(diff_node_loglines) > 0:
|
||||
print("\n".join(diff_node_loglines), end="")
|
||||
print("\n", end="")
|
||||
|
||||
# Wait half a second
|
||||
time.sleep(0.5)
|
||||
|
||||
return True, ""
|
||||
|
||||
|
||||
def node_info(config, node):
|
||||
"""
|
||||
Get information about node
|
||||
|
||||
API endpoint: GET /api/v1/node/{node}
|
||||
API arguments:
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
response = call_api(config, "get", "/node/{node}".format(node=node))
|
||||
|
||||
if response.status_code == 200:
|
||||
if isinstance(response.json(), list) and len(response.json()) != 1:
|
||||
# No exact match, return not found
|
||||
return False, "Node not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
|
||||
def node_list(
|
||||
config, limit, target_daemon_state, target_coordinator_state, target_domain_state
|
||||
):
|
||||
"""
|
||||
Get list information about nodes (limited by {limit})
|
||||
|
||||
API endpoint: GET /api/v1/node
|
||||
API arguments: limit={limit}
|
||||
API schema: [{json_data_object},{json_data_object},etc.]
|
||||
"""
|
||||
params = dict()
|
||||
if limit:
|
||||
params["limit"] = limit
|
||||
if target_daemon_state:
|
||||
params["daemon_state"] = target_daemon_state
|
||||
if target_coordinator_state:
|
||||
params["coordinator_state"] = target_coordinator_state
|
||||
if target_domain_state:
|
||||
params["domain_state"] = target_domain_state
|
||||
|
||||
response = call_api(config, "get", "/node", params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
|
||||
#
|
||||
# Output display functions
|
||||
#
|
||||
def getOutputColours(node_information):
|
||||
node_health = node_information.get("health", "N/A")
|
||||
if isinstance(node_health, int):
|
||||
if node_health <= 50:
|
||||
health_colour = ansiprint.red()
|
||||
elif node_health <= 90:
|
||||
health_colour = ansiprint.yellow()
|
||||
elif node_health <= 100:
|
||||
health_colour = ansiprint.green()
|
||||
else:
|
||||
health_colour = ansiprint.blue()
|
||||
else:
|
||||
health_colour = ansiprint.blue()
|
||||
|
||||
if node_information["daemon_state"] == "run":
|
||||
daemon_state_colour = ansiprint.green()
|
||||
elif node_information["daemon_state"] == "stop":
|
||||
daemon_state_colour = ansiprint.red()
|
||||
elif node_information["daemon_state"] == "shutdown":
|
||||
daemon_state_colour = ansiprint.yellow()
|
||||
elif node_information["daemon_state"] == "init":
|
||||
daemon_state_colour = ansiprint.yellow()
|
||||
elif node_information["daemon_state"] == "dead":
|
||||
daemon_state_colour = ansiprint.red() + ansiprint.bold()
|
||||
else:
|
||||
daemon_state_colour = ansiprint.blue()
|
||||
|
||||
if node_information["coordinator_state"] == "primary":
|
||||
coordinator_state_colour = ansiprint.green()
|
||||
elif node_information["coordinator_state"] == "secondary":
|
||||
coordinator_state_colour = ansiprint.blue()
|
||||
else:
|
||||
coordinator_state_colour = ansiprint.cyan()
|
||||
|
||||
if node_information["domain_state"] == "ready":
|
||||
domain_state_colour = ansiprint.green()
|
||||
else:
|
||||
domain_state_colour = ansiprint.blue()
|
||||
|
||||
if node_information["memory"]["allocated"] > node_information["memory"]["total"]:
|
||||
mem_allocated_colour = ansiprint.yellow()
|
||||
else:
|
||||
mem_allocated_colour = ""
|
||||
|
||||
if node_information["memory"]["provisioned"] > node_information["memory"]["total"]:
|
||||
mem_provisioned_colour = ansiprint.yellow()
|
||||
else:
|
||||
mem_provisioned_colour = ""
|
||||
|
||||
return (
|
||||
health_colour,
|
||||
daemon_state_colour,
|
||||
coordinator_state_colour,
|
||||
domain_state_colour,
|
||||
mem_allocated_colour,
|
||||
mem_provisioned_colour,
|
||||
)
|
||||
|
||||
|
||||
def format_info(node_information, long_output):
|
||||
(
|
||||
health_colour,
|
||||
daemon_state_colour,
|
||||
coordinator_state_colour,
|
||||
domain_state_colour,
|
||||
mem_allocated_colour,
|
||||
mem_provisioned_colour,
|
||||
) = getOutputColours(node_information)
|
||||
|
||||
# Format a nice output; do this line-by-line then concat the elements at the end
|
||||
ainformation = []
|
||||
# Basic information
|
||||
ainformation.append(
|
||||
"{}Name:{} {}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
node_information["name"],
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}PVC Version:{} {}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
node_information["pvc_version"],
|
||||
)
|
||||
)
|
||||
|
||||
node_health = node_information.get("health", "N/A")
|
||||
if isinstance(node_health, int):
|
||||
node_health_text = f"{node_health}%"
|
||||
else:
|
||||
node_health_text = node_health
|
||||
ainformation.append(
|
||||
"{}Health:{} {}{}{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
health_colour,
|
||||
node_health_text,
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
|
||||
node_health_details = node_information.get("health_details", [])
|
||||
if long_output:
|
||||
node_health_messages = "\n ".join(
|
||||
[f"{plugin['name']}: {plugin['message']}" for plugin in node_health_details]
|
||||
)
|
||||
else:
|
||||
node_health_messages = "\n ".join(
|
||||
[
|
||||
f"{plugin['name']}: {plugin['message']}"
|
||||
for plugin in node_health_details
|
||||
if int(plugin.get("health_delta", 0)) > 0
|
||||
]
|
||||
)
|
||||
|
||||
if len(node_health_messages) > 0:
|
||||
ainformation.append(
|
||||
"{}Health Plugin Details:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_health_messages
|
||||
)
|
||||
)
|
||||
ainformation.append("")
|
||||
|
||||
ainformation.append(
|
||||
"{}Daemon State:{} {}{}{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
daemon_state_colour,
|
||||
node_information["daemon_state"],
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Coordinator State:{} {}{}{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
coordinator_state_colour,
|
||||
node_information["coordinator_state"],
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Domain State:{} {}{}{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
domain_state_colour,
|
||||
node_information["domain_state"],
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
if long_output:
|
||||
ainformation.append("")
|
||||
ainformation.append(
|
||||
"{}Architecture:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_information["arch"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Operating System:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_information["os"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Kernel Version:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_information["kernel"]
|
||||
)
|
||||
)
|
||||
ainformation.append("")
|
||||
ainformation.append(
|
||||
"{}Active VM Count:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_information["domains_count"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Host CPUs:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_information["vcpu"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}vCPUs:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_information["vcpu"]["allocated"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Load:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_information["load"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total RAM (MiB):{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_information["memory"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Used RAM (MiB):{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_information["memory"]["used"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Free RAM (MiB):{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), node_information["memory"]["free"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Allocated RAM (MiB):{} {}{}{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
mem_allocated_colour,
|
||||
node_information["memory"]["allocated"],
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Provisioned RAM (MiB):{} {}{}{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
mem_provisioned_colour,
|
||||
node_information["memory"]["provisioned"],
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
|
||||
# Join it all together
|
||||
ainformation.append("")
|
||||
return "\n".join(ainformation)
|
||||
|
||||
|
||||
def format_list(node_list, raw):
|
||||
if raw:
|
||||
ainformation = list()
|
||||
for node in sorted(item["name"] for item in node_list):
|
||||
ainformation.append(node)
|
||||
return "\n".join(ainformation)
|
||||
|
||||
node_list_output = []
|
||||
|
||||
# Determine optimal column widths
|
||||
node_name_length = 5
|
||||
pvc_version_length = 8
|
||||
health_length = 7
|
||||
daemon_state_length = 7
|
||||
coordinator_state_length = 12
|
||||
domain_state_length = 7
|
||||
domains_count_length = 4
|
||||
cpu_count_length = 6
|
||||
load_length = 5
|
||||
mem_total_length = 6
|
||||
mem_used_length = 5
|
||||
mem_free_length = 5
|
||||
mem_alloc_length = 6
|
||||
mem_prov_length = 5
|
||||
for node_information in node_list:
|
||||
# node_name column
|
||||
_node_name_length = len(node_information["name"]) + 1
|
||||
if _node_name_length > node_name_length:
|
||||
node_name_length = _node_name_length
|
||||
# node_pvc_version column
|
||||
_pvc_version_length = len(node_information.get("pvc_version", "N/A")) + 1
|
||||
if _pvc_version_length > pvc_version_length:
|
||||
pvc_version_length = _pvc_version_length
|
||||
# node_health column
|
||||
node_health = node_information.get("health", "N/A")
|
||||
if isinstance(node_health, int):
|
||||
node_health_text = f"{node_health}%"
|
||||
else:
|
||||
node_health_text = node_health
|
||||
_health_length = len(node_health_text) + 1
|
||||
if _health_length > health_length:
|
||||
health_length = _health_length
|
||||
# daemon_state column
|
||||
_daemon_state_length = len(node_information["daemon_state"]) + 1
|
||||
if _daemon_state_length > daemon_state_length:
|
||||
daemon_state_length = _daemon_state_length
|
||||
# coordinator_state column
|
||||
_coordinator_state_length = len(node_information["coordinator_state"]) + 1
|
||||
if _coordinator_state_length > coordinator_state_length:
|
||||
coordinator_state_length = _coordinator_state_length
|
||||
# domain_state column
|
||||
_domain_state_length = len(node_information["domain_state"]) + 1
|
||||
if _domain_state_length > domain_state_length:
|
||||
domain_state_length = _domain_state_length
|
||||
# domains_count column
|
||||
_domains_count_length = len(str(node_information["domains_count"])) + 1
|
||||
if _domains_count_length > domains_count_length:
|
||||
domains_count_length = _domains_count_length
|
||||
# cpu_count column
|
||||
_cpu_count_length = len(str(node_information["cpu_count"])) + 1
|
||||
if _cpu_count_length > cpu_count_length:
|
||||
cpu_count_length = _cpu_count_length
|
||||
# load column
|
||||
_load_length = len(str(node_information["load"])) + 1
|
||||
if _load_length > load_length:
|
||||
load_length = _load_length
|
||||
# mem_total column
|
||||
_mem_total_length = len(str(node_information["memory"]["total"])) + 1
|
||||
if _mem_total_length > mem_total_length:
|
||||
mem_total_length = _mem_total_length
|
||||
# mem_used column
|
||||
_mem_used_length = len(str(node_information["memory"]["used"])) + 1
|
||||
if _mem_used_length > mem_used_length:
|
||||
mem_used_length = _mem_used_length
|
||||
# mem_free column
|
||||
_mem_free_length = len(str(node_information["memory"]["free"])) + 1
|
||||
if _mem_free_length > mem_free_length:
|
||||
mem_free_length = _mem_free_length
|
||||
# mem_alloc column
|
||||
_mem_alloc_length = len(str(node_information["memory"]["allocated"])) + 1
|
||||
if _mem_alloc_length > mem_alloc_length:
|
||||
mem_alloc_length = _mem_alloc_length
|
||||
|
||||
# mem_prov column
|
||||
_mem_prov_length = len(str(node_information["memory"]["provisioned"])) + 1
|
||||
if _mem_prov_length > mem_prov_length:
|
||||
mem_prov_length = _mem_prov_length
|
||||
|
||||
# Format the string (header)
|
||||
node_list_output.append(
|
||||
"{bold}{node_header: <{node_header_length}} {state_header: <{state_header_length}} {resource_header: <{resource_header_length}} {memory_header: <{memory_header_length}}{end_bold}".format(
|
||||
node_header_length=node_name_length
|
||||
+ pvc_version_length
|
||||
+ health_length
|
||||
+ 2,
|
||||
state_header_length=daemon_state_length
|
||||
+ coordinator_state_length
|
||||
+ domain_state_length
|
||||
+ 2,
|
||||
resource_header_length=domains_count_length
|
||||
+ cpu_count_length
|
||||
+ load_length
|
||||
+ 2,
|
||||
memory_header_length=mem_total_length
|
||||
+ mem_used_length
|
||||
+ mem_free_length
|
||||
+ mem_alloc_length
|
||||
+ mem_prov_length
|
||||
+ 4,
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
node_header="Nodes "
|
||||
+ "".join(
|
||||
[
|
||||
"-"
|
||||
for _ in range(
|
||||
6, node_name_length + pvc_version_length + health_length + 1
|
||||
)
|
||||
]
|
||||
),
|
||||
state_header="States "
|
||||
+ "".join(
|
||||
[
|
||||
"-"
|
||||
for _ in range(
|
||||
7,
|
||||
daemon_state_length
|
||||
+ coordinator_state_length
|
||||
+ domain_state_length
|
||||
+ 1,
|
||||
)
|
||||
]
|
||||
),
|
||||
resource_header="Resources "
|
||||
+ "".join(
|
||||
[
|
||||
"-"
|
||||
for _ in range(
|
||||
10, domains_count_length + cpu_count_length + load_length + 1
|
||||
)
|
||||
]
|
||||
),
|
||||
memory_header="Memory (M) "
|
||||
+ "".join(
|
||||
[
|
||||
"-"
|
||||
for _ in range(
|
||||
11,
|
||||
mem_total_length
|
||||
+ mem_used_length
|
||||
+ mem_free_length
|
||||
+ mem_alloc_length
|
||||
+ mem_prov_length
|
||||
+ 3,
|
||||
)
|
||||
]
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
node_list_output.append(
|
||||
"{bold}{node_name: <{node_name_length}} {node_pvc_version: <{pvc_version_length}} {node_health: <{health_length}} \
|
||||
{daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \
|
||||
{node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \
|
||||
{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}".format(
|
||||
node_name_length=node_name_length,
|
||||
pvc_version_length=pvc_version_length,
|
||||
health_length=health_length,
|
||||
daemon_state_length=daemon_state_length,
|
||||
coordinator_state_length=coordinator_state_length,
|
||||
domain_state_length=domain_state_length,
|
||||
domains_count_length=domains_count_length,
|
||||
cpu_count_length=cpu_count_length,
|
||||
load_length=load_length,
|
||||
mem_total_length=mem_total_length,
|
||||
mem_used_length=mem_used_length,
|
||||
mem_free_length=mem_free_length,
|
||||
mem_alloc_length=mem_alloc_length,
|
||||
mem_prov_length=mem_prov_length,
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
daemon_state_colour="",
|
||||
coordinator_state_colour="",
|
||||
domain_state_colour="",
|
||||
end_colour="",
|
||||
node_name="Name",
|
||||
node_pvc_version="Version",
|
||||
node_health="Health",
|
||||
node_daemon_state="Daemon",
|
||||
node_coordinator_state="Coordinator",
|
||||
node_domain_state="Domain",
|
||||
node_domains_count="VMs",
|
||||
node_cpu_count="vCPUs",
|
||||
node_load="Load",
|
||||
node_mem_total="Total",
|
||||
node_mem_used="Used",
|
||||
node_mem_free="Free",
|
||||
node_mem_allocated="Alloc",
|
||||
node_mem_provisioned="Prov",
|
||||
)
|
||||
)
|
||||
|
||||
# Format the string (elements)
|
||||
for node_information in sorted(node_list, key=lambda n: n["name"]):
|
||||
(
|
||||
health_colour,
|
||||
daemon_state_colour,
|
||||
coordinator_state_colour,
|
||||
domain_state_colour,
|
||||
mem_allocated_colour,
|
||||
mem_provisioned_colour,
|
||||
) = getOutputColours(node_information)
|
||||
|
||||
node_health = node_information.get("health", "N/A")
|
||||
if isinstance(node_health, int):
|
||||
node_health_text = f"{node_health}%"
|
||||
else:
|
||||
node_health_text = node_health
|
||||
|
||||
node_list_output.append(
|
||||
"{bold}{node_name: <{node_name_length}} {node_pvc_version: <{pvc_version_length}} {health_colour}{node_health: <{health_length}}{end_colour} \
|
||||
{daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \
|
||||
{node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \
|
||||
{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {mem_allocated_colour}{node_mem_allocated: <{mem_alloc_length}}{end_colour} {mem_provisioned_colour}{node_mem_provisioned: <{mem_prov_length}}{end_colour}{end_bold}".format(
|
||||
node_name_length=node_name_length,
|
||||
pvc_version_length=pvc_version_length,
|
||||
health_length=health_length,
|
||||
daemon_state_length=daemon_state_length,
|
||||
coordinator_state_length=coordinator_state_length,
|
||||
domain_state_length=domain_state_length,
|
||||
domains_count_length=domains_count_length,
|
||||
cpu_count_length=cpu_count_length,
|
||||
load_length=load_length,
|
||||
mem_total_length=mem_total_length,
|
||||
mem_used_length=mem_used_length,
|
||||
mem_free_length=mem_free_length,
|
||||
mem_alloc_length=mem_alloc_length,
|
||||
mem_prov_length=mem_prov_length,
|
||||
bold="",
|
||||
end_bold="",
|
||||
health_colour=health_colour,
|
||||
daemon_state_colour=daemon_state_colour,
|
||||
coordinator_state_colour=coordinator_state_colour,
|
||||
domain_state_colour=domain_state_colour,
|
||||
mem_allocated_colour=mem_allocated_colour,
|
||||
mem_provisioned_colour=mem_allocated_colour,
|
||||
end_colour=ansiprint.end(),
|
||||
node_name=node_information["name"],
|
||||
node_pvc_version=node_information.get("pvc_version", "N/A"),
|
||||
node_health=node_health_text,
|
||||
node_daemon_state=node_information["daemon_state"],
|
||||
node_coordinator_state=node_information["coordinator_state"],
|
||||
node_domain_state=node_information["domain_state"],
|
||||
node_domains_count=node_information["domains_count"],
|
||||
node_cpu_count=node_information["vcpu"]["allocated"],
|
||||
node_load=node_information["load"],
|
||||
node_mem_total=node_information["memory"]["total"],
|
||||
node_mem_used=node_information["memory"]["used"],
|
||||
node_mem_free=node_information["memory"]["free"],
|
||||
node_mem_allocated=node_information["memory"]["allocated"],
|
||||
node_mem_provisioned=node_information["memory"]["provisioned"],
|
||||
)
|
||||
)
|
||||
|
||||
return "\n".join(node_list_output)
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,102 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# zkhandler.py - Secure versioned ZooKeeper updates
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import uuid
|
||||
|
||||
|
||||
# Exists function
|
||||
def exists(zk_conn, key):
|
||||
stat = zk_conn.exists(key)
|
||||
if stat:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
# Child list function
|
||||
def listchildren(zk_conn, key):
|
||||
children = zk_conn.get_children(key)
|
||||
return children
|
||||
|
||||
|
||||
# Delete key function
|
||||
def deletekey(zk_conn, key, recursive=True):
|
||||
zk_conn.delete(key, recursive=recursive)
|
||||
|
||||
|
||||
# Data read function
|
||||
def readdata(zk_conn, key):
|
||||
data_raw = zk_conn.get(key)
|
||||
data = data_raw[0].decode("utf8")
|
||||
return data
|
||||
|
||||
|
||||
# Data write function
|
||||
def writedata(zk_conn, kv):
|
||||
# Start up a transaction
|
||||
zk_transaction = zk_conn.transaction()
|
||||
|
||||
# Proceed one KV pair at a time
|
||||
for key in sorted(kv):
|
||||
data = kv[key]
|
||||
|
||||
# Check if this key already exists or not
|
||||
if not zk_conn.exists(key):
|
||||
# We're creating a new key
|
||||
zk_transaction.create(key, str(data).encode("utf8"))
|
||||
else:
|
||||
# We're updating a key with version validation
|
||||
orig_data = zk_conn.get(key)
|
||||
version = orig_data[1].version
|
||||
|
||||
# Set what we expect the new version to be
|
||||
new_version = version + 1
|
||||
|
||||
# Update the data
|
||||
zk_transaction.set_data(key, str(data).encode("utf8"))
|
||||
|
||||
# Set up the check
|
||||
try:
|
||||
zk_transaction.check(key, new_version)
|
||||
except TypeError:
|
||||
print('Zookeeper key "{}" does not match expected version'.format(key))
|
||||
return False
|
||||
|
||||
# Commit the transaction
|
||||
try:
|
||||
zk_transaction.commit()
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
# Write lock function
|
||||
def writelock(zk_conn, key):
|
||||
lock_id = str(uuid.uuid1())
|
||||
lock = zk_conn.WriteLock("{}".format(key), lock_id)
|
||||
return lock
|
||||
|
||||
|
||||
# Read lock function
|
||||
def readlock(zk_conn, key):
|
||||
lock_id = str(uuid.uuid1())
|
||||
lock = zk_conn.ReadLock("{}".format(key), lock_id)
|
||||
return lock
|
File diff suppressed because it is too large
Load Diff
|
@ -1,32 +0,0 @@
|
|||
# PVC helper scripts
|
||||
|
||||
These helper scripts are included with the PVC client to aid administrators in some meta-functions.
|
||||
|
||||
The following scripts are provided for use:
|
||||
|
||||
## `migrate_vm`
|
||||
|
||||
Migrates a VM, with downtime, from one PVC cluster to another.
|
||||
|
||||
`migrate_vm <vm> <source_cluster> <destination_cluster>`
|
||||
|
||||
### Arguments
|
||||
|
||||
* `vm`: The virtual machine to migrate
|
||||
* `source_cluster`: The source PVC cluster; must be a valid cluster to the local PVC client
|
||||
* `destination_cluster`: The destination PVC cluster; must be a valid cluster to the local PVC client
|
||||
|
||||
## `import_vm`
|
||||
|
||||
Imports a VM from another platform into a PVC cluster.
|
||||
|
||||
## `export_vm`
|
||||
|
||||
Exports a (stopped) VM from a PVC cluster to another platform.
|
||||
|
||||
`export_vm <vm> <source_cluster>`
|
||||
|
||||
### Arguments
|
||||
|
||||
* `vm`: The virtual machine to migrate
|
||||
* `source_cluster`: The source PVC cluster; must be a valid cluster to the local PVC client
|
|
@ -1,98 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# export_vm - Exports a VM from a PVC cluster to local files
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
echo -e "Export a VM from a PVC cluster to local files."
|
||||
echo -e "Usage:"
|
||||
echo -e " $0 <vm> <source_cluster> [<destination_directory>]"
|
||||
echo -e ""
|
||||
echo -e "Important information:"
|
||||
echo -e " * The local user must have valid SSH access to the primary coordinator in the source_cluster."
|
||||
echo -e " * The user on the cluster primary coordinator must have 'sudo' access."
|
||||
echo -e " * If the VM is not in 'stop' state, it will be shut down."
|
||||
echo -e " * Do not switch the cluster primary coordinator while the script is running."
|
||||
echo -e " * Ensure you have enough space in <destination_directory> to store all VM disk images."
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Arguments
|
||||
if [[ -z ${1} || -z ${2} ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
source_vm="${1}"
|
||||
source_cluster="${2}"
|
||||
if [[ -n "${3}" ]]; then
|
||||
destination_directory="${3}"
|
||||
else
|
||||
destination_directory="."
|
||||
fi
|
||||
|
||||
# Verify the cluster is reachable
|
||||
pvc -c ${source_cluster} status &>/dev/null || fail "Specified source_cluster is not accessible"
|
||||
|
||||
# Determine the connection IP
|
||||
cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${source_cluster}" | awk '{ print $2 }' )"
|
||||
|
||||
# Attempt to connect to the cluster address
|
||||
ssh ${cluster_address} which pvc &>/dev/null || fail "Could not SSH to source_cluster primary coordinator host"
|
||||
|
||||
# Verify that the VM exists
|
||||
pvc -c ${source_cluster} vm info ${source_vm} &>/dev/null || fail "Specified VM is not present on the cluster"
|
||||
|
||||
echo "Verification complete."
|
||||
|
||||
# Shut down the VM
|
||||
echo -n "Shutting down VM..."
|
||||
set +o errexit
|
||||
pvc -c ${source_cluster} vm shutdown ${source_vm} &>/dev/null
|
||||
shutdown_success=$?
|
||||
while ! pvc -c ${source_cluster} vm info ${source_vm} 2>/dev/null | grep '^State' | grep -q -E 'stop|disable'; do
|
||||
sleep 1
|
||||
echo -n "."
|
||||
done
|
||||
set -o errexit
|
||||
echo " done."
|
||||
|
||||
# Dump the XML file
|
||||
echo -n "Exporting VM configuration file... "
|
||||
pvc -c ${source_cluster} vm dump ${source_vm} 1> ${destination_directory}/${source_vm}.xml 2>/dev/null
|
||||
echo "done".
|
||||
|
||||
# Determine the list of volumes in this VM
|
||||
volume_list="$( pvc -c ${source_cluster} vm info --long ${source_vm} 2>/dev/null | grep -w 'rbd' | awk '{ print $3 }' )"
|
||||
for volume in ${volume_list}; do
|
||||
volume_pool="$( awk -F '/' '{ print $1 }' <<<"${volume}" )"
|
||||
volume_name="$( awk -F '/' '{ print $2 }' <<<"${volume}" )"
|
||||
volume_size="$( pvc -c ${source_cluster} storage volume list -p ${volume_pool} ${volume_name} 2>/dev/null | grep "^${volume_name}" | awk '{ print $3 }' )"
|
||||
echo -n "Exporting disk ${volume_name} (${volume_size})... "
|
||||
ssh ${cluster_address} sudo rbd map ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to map volume ${volume}"
|
||||
ssh ${cluster_address} sudo dd if="/dev/rbd/${volume_pool}/${volume_name}" bs=1M 2>/dev/null | dd bs=1M of="${destination_directory}/${volume_name}.img" 2>/dev/null
|
||||
ssh ${cluster_address} sudo rbd unmap ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to unmap volume ${volume}"
|
||||
echo "done."
|
||||
done
|
|
@ -1,118 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# force_single_node - Manually promote a single coordinator node from a degraded cluster
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
echo -e "Manually promote a single coordinator node from a degraded cluster"
|
||||
echo -e ""
|
||||
echo -e "DANGER: This action will cause a permanent split-brain within the cluster"
|
||||
echo -e " which will have to be corrected manually upon cluster restoration."
|
||||
echo -e ""
|
||||
echo -e "This script is primarily designed for small clusters in situations where 2"
|
||||
echo -e "of the 3 coordinators have become unreachable or shut down. It will promote"
|
||||
echo -e "the remaining lone_node to act as a standalone coordinator, allowing basic"
|
||||
echo -e "cluster functionality to continue in a heavily degraded state until the"
|
||||
echo -e "situation can be rectified. This should only be done in exceptional cases"
|
||||
echo -e "as a disaster recovery mechanism when the remaining nodes will remain down"
|
||||
echo -e "for a significant amount of time but some VMs are required to run. In general,"
|
||||
echo -e "use of this script is not advisable."
|
||||
echo -e ""
|
||||
echo -e "Usage:"
|
||||
echo -e " $0 <target_cluster> <lone_node>"
|
||||
echo -e ""
|
||||
echo -e "Important information:"
|
||||
echo -e " * The lone_node must be a fully-qualified name that is directly reachable from"
|
||||
echo -e " the local system via SSH."
|
||||
echo -e " * The local user must have valid SSH access to the lone_node in the cluster."
|
||||
echo -e " * The user on the cluster node must have 'sudo' access."
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Arguments
|
||||
if [[ -z ${1} || -z ${2} ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
target_cluster="${1}"
|
||||
lone_node="${2}"
|
||||
lone_node_shortname="${lone_node%%.*}"
|
||||
|
||||
# Attempt to connect to the node
|
||||
ssh ${lone_node} which pvc &>/dev/null || fail "Could not SSH to the lone_node host"
|
||||
|
||||
echo "Verification complete."
|
||||
|
||||
echo -n "Allowing Ceph single-node operation... "
|
||||
temp_monmap="$( ssh ${lone_node} mktemp )"
|
||||
ssh ${lone_node} "sudo systemctl stop ceph-mon@${lone_node_shortname}" &>/dev/null
|
||||
ssh ${lone_node} "ceph-mon -i ${lone_node_shortname} --extract-monmap ${temp_monmap}" &>/dev/null
|
||||
ssh ${lone_node} "sudo cp ${tmp_monmap} /etc/ceph/monmap.orig" &>/dev/null
|
||||
mon_list="$( ssh ${lone_node} strings ${temp_monmap} | sort | uniq )"
|
||||
for mon in ${mon_list}; do
|
||||
if [[ ${mon} == ${lone_node_shortname} ]]; then
|
||||
continue
|
||||
fi
|
||||
ssh ${lone_node} "sudo monmaptool ${temp_monmap} --rm ${mon}" &>/dev/null
|
||||
done
|
||||
ssh ${lone_node} "sudo ceph-mon -i ${lone_node_shortname} --inject-monmap ${temp_monmap}" &>/dev/null
|
||||
ssh ${lone_node} "sudo systemctl start ceph-mon@${lone_node_shortname}" &>/dev/null
|
||||
sleep 5
|
||||
ssh ${lone_node} "sudo ceph osd set noout" &>/dev/null
|
||||
echo "done."
|
||||
echo -e "Restoration steps:"
|
||||
echo -e " sudo systemctl stop ceph-mon@${lone_node_shortname}"
|
||||
echo -e " sudo ceph-mon -i ${lone_node_shortname} --inject-monmap /etc/ceph/monmap.orig"
|
||||
echo -e " sudo systemctl start ceph-mon@${lone_node_shortname}"
|
||||
echo -e " sudo ceph osd unset noout"
|
||||
|
||||
echo -n "Allowing Zookeeper single-node operation... "
|
||||
temp_zoocfg="$( ssh ${lone_node} mktemp )"
|
||||
ssh ${lone_node} "sudo systemctl stop zookeeper"
|
||||
ssh ${lone_node} "sudo awk -v lone_node=${lone_node_shortname} '{
|
||||
FS="=|:"
|
||||
if ( $1 ~ /^server/ ){
|
||||
if ($2 == lone_node) {
|
||||
print $0
|
||||
} else {
|
||||
print "#" $0
|
||||
}
|
||||
} else {
|
||||
print $0
|
||||
}
|
||||
}' /etc/zookeeper/conf/zoo.cfg > ${temp_zoocfg}"
|
||||
ssh ${lone_node} "sudo mv /etc/zookeeper/conf/zoo.cfg /etc/zookeeper/conf/zoo.cfg.orig"
|
||||
ssh ${lone_node} "sudo mv ${temp_zoocfg} /etc/zookeeper/conf/zoo.cfg"
|
||||
ssh ${lone_node} "sudo systemctl start zookeeper"
|
||||
echo "done."
|
||||
echo -e "Restoration steps:"
|
||||
echo -e " sudo systemctl stop zookeeper"
|
||||
echo -e " sudo mv /etc/zookeeper/conf/zoo.cfg.orig /etc/zookeeper/conf/zoo.cfg"
|
||||
echo -e " sudo systemctl start zookeeper"
|
||||
ssh ${lone_node} "sudo systemctl stop ceph-mon@${lone_node_shortname}"
|
||||
|
||||
echo ""
|
||||
ssh ${lone_node} "sudo pvc status 2>/dev/null"
|
|
@ -1,80 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# import_vm - Imports a VM to a PVC cluster from local files
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
echo -e "Import a VM to a PVC cluster from local files."
|
||||
echo -e "Usage:"
|
||||
echo -e " $0 <destination_cluster> <destination_pool> <vm_configuration_file> <vm_disk_file_1> [<vm_disk_file_2>] [...]"
|
||||
echo -e ""
|
||||
echo -e "Important information:"
|
||||
echo -e " * At least one disk must be specified; all disks that are present in vm_configuration_file"
|
||||
echo -e " should be specified, though this is not strictly requireda."
|
||||
echo -e " * Do not switch the cluster primary coordinator while the script is running."
|
||||
echo -e " * Ensure you have enough space on the destination cluster to store all VM disks."
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Arguments
|
||||
if [[ -z ${1} || -z ${2} || -z ${3} || -z ${4} ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
destination_cluster="${1}"; shift
|
||||
destination_pool="${1}"; shift
|
||||
vm_config_file="${1}"; shift
|
||||
vm_disk_files=( ${@} )
|
||||
|
||||
# Verify the cluster is reachable
|
||||
pvc -c ${destination_cluster} status &>/dev/null || fail "Specified destination_cluster is not accessible"
|
||||
|
||||
# Determine the connection IP
|
||||
cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${destination_cluster}" | awk '{ print $2 }' )"
|
||||
|
||||
echo "Verification complete."
|
||||
|
||||
# Determine information about the VM from the config file
|
||||
parse_xml_field() {
|
||||
field="${1}"
|
||||
line="$( grep -F "<${field}>" ${vm_config_file} )"
|
||||
awk -F '>|<' '{ print $3 }' <<<"${line}"
|
||||
}
|
||||
vm_name="$( parse_xml_field name )"
|
||||
echo "Importing VM ${vm_name}..."
|
||||
pvc -c ${destination_cluster} vm define ${vm_config_file} 2>/dev/null
|
||||
|
||||
# Create the disks on the cluster
|
||||
for disk_file in ${vm_disk_files[@]}; do
|
||||
disk_file_basename="$( basename ${disk_file} )"
|
||||
disk_file_ext="${disk_file_basename##*.}"
|
||||
disk_file_name="$( basename ${disk_file_basename} .${disk_file_ext} )"
|
||||
disk_file_size="$( stat --format="%s" ${disk_file} )"
|
||||
|
||||
echo "Importing disk ${disk_file_name}... "
|
||||
pvc -c ${destination_cluster} storage volume add ${destination_pool} ${disk_file_name} ${disk_file_size}B 2>/dev/null
|
||||
pvc -c ${destination_cluster} storage volume upload ${destination_pool} ${disk_file_name} ${disk_file} 2>/dev/null
|
||||
done
|
|
@ -1,115 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# migrate_vm - Exports a VM from a PVC cluster to another PVC cluster
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
set -o errexit
|
||||
set -o pipefail
|
||||
|
||||
usage() {
|
||||
echo -e "Export a VM from a PVC cluster to another PVC cluster."
|
||||
echo -e "Usage:"
|
||||
echo -e " $0 <vm> <source_cluster> <destination_cluster> <destination_pool>"
|
||||
echo -e ""
|
||||
echo -e "Important information:"
|
||||
echo -e " * The local user must have valid SSH access to the primary coordinator in the source_cluster."
|
||||
echo -e " * The user on the cluster primary coordinator must have 'sudo' access."
|
||||
echo -e " * If the VM is not in 'stop' state, it will be shut down."
|
||||
echo -e " * Do not switch the cluster primary coordinator on either cluster while the script is running."
|
||||
echo -e " * Ensure you have enough space on the target cluster to store all VM disks."
|
||||
}
|
||||
|
||||
fail() {
|
||||
echo -e "$@"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Arguments
|
||||
if [[ -z ${1} || -z ${2} || -z ${3} || -z ${4} ]]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
source_vm="${1}"
|
||||
source_cluster="${2}"
|
||||
destination_cluster="${3}"
|
||||
destination_pool="${4}"
|
||||
|
||||
# Verify each cluster is reachable
|
||||
pvc -c ${source_cluster} status &>/dev/null || fail "Specified source_cluster is not accessible"
|
||||
pvc -c ${destination_cluster} status &>/dev/null || fail "Specified destination_cluster is not accessible"
|
||||
|
||||
# Determine the connection IPs
|
||||
source_cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${source_cluster}" | awk '{ print $2 }' )"
|
||||
destination_cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${destination_cluster}" | awk '{ print $2 }' )"
|
||||
|
||||
# Attempt to connect to the cluster addresses
|
||||
ssh ${source_cluster_address} which pvc &>/dev/null || fail "Could not SSH to source_cluster primary coordinator host"
|
||||
ssh ${destination_cluster_address} which pvc &>/dev/null || fail "Could not SSH to destination_cluster primary coordinator host"
|
||||
|
||||
# Verify that the VM exists
|
||||
pvc -c ${source_cluster} vm info ${source_vm} &>/dev/null || fail "Specified VM is not present on the source cluster"
|
||||
|
||||
echo "Verification complete."
|
||||
|
||||
# Shut down the VM
|
||||
echo -n "Shutting down VM..."
|
||||
set +o errexit
|
||||
pvc -c ${source_cluster} vm shutdown ${source_vm} &>/dev/null
|
||||
shutdown_success=$?
|
||||
while ! pvc -c ${source_cluster} vm info ${source_vm} 2>/dev/null | grep '^State' | grep -q -E 'stop|disable'; do
|
||||
sleep 1
|
||||
echo -n "."
|
||||
done
|
||||
set -o errexit
|
||||
echo " done."
|
||||
|
||||
tempfile="$( mktemp )"
|
||||
|
||||
# Dump the XML file
|
||||
echo -n "Exporting VM configuration file from source cluster... "
|
||||
pvc -c ${source_cluster} vm dump ${source_vm} 1> ${tempfile} 2>/dev/null
|
||||
echo "done."
|
||||
|
||||
# Import the XML file
|
||||
echo -n "Importing VM configuration file to destination cluster... "
|
||||
pvc -c ${destination_cluster} vm define ${tempfile}
|
||||
echo "done."
|
||||
|
||||
rm -f ${tempfile}
|
||||
|
||||
# Determine the list of volumes in this VM
|
||||
volume_list="$( pvc -c ${source_cluster} vm info --long ${source_vm} 2>/dev/null | grep -w 'rbd' | awk '{ print $3 }' )"
|
||||
|
||||
# Parse and migrate each volume
|
||||
for volume in ${volume_list}; do
|
||||
volume_pool="$( awk -F '/' '{ print $1 }' <<<"${volume}" )"
|
||||
volume_name="$( awk -F '/' '{ print $2 }' <<<"${volume}" )"
|
||||
volume_size="$( pvc -c ${source_cluster} storage volume list -p ${volume_pool} ${volume_name} 2>/dev/null | grep "^${volume_name}" | awk '{ print $3 }' )"
|
||||
echo "Transferring disk ${volume_name} (${volume_size})... "
|
||||
pvc -c ${destination_cluster} storage volume add ${destination_pool} ${volume_name} ${volume_size} 2>/dev/null
|
||||
ssh ${source_cluster_address} sudo rbd map ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to map volume ${volume} on source cluster"
|
||||
ssh ${destination_cluster_address} sudo rbd map ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to map volume ${volume} on destination cluster"
|
||||
ssh ${source_cluster_address} sudo dd if="/dev/rbd/${volume_pool}/${volume_name}" bs=1M 2>/dev/null | pv | ssh ${destination_cluster_address} sudo dd bs=1M of="/dev/rbd/${destination_pool}/${volume_name}" 2>/dev/null
|
||||
ssh ${source_cluster_address} sudo rbd unmap ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to unmap volume ${volume} on source cluster"
|
||||
ssh ${destination_cluster_address} sudo rbd unmap ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to unmap volume ${volume} on destination cluster"
|
||||
done
|
||||
|
||||
if [[ ${shutdown_success} -eq 0 ]]; then
|
||||
pvc -c ${destination_cluster} vm start ${source_vm}
|
||||
fi
|
|
@ -1,20 +0,0 @@
|
|||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="pvc",
|
||||
version="0.9.63",
|
||||
packages=["pvc", "pvc.lib"],
|
||||
install_requires=[
|
||||
"Click",
|
||||
"PyYAML",
|
||||
"lxml",
|
||||
"colorama",
|
||||
"requests",
|
||||
"requests-toolbelt",
|
||||
],
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"pvc = pvc.pvc:cli",
|
||||
],
|
||||
},
|
||||
)
|
Loading…
Reference in New Issue