diff --git a/client-cli-old/pvc.py b/client-cli-old/pvc.py deleted file mode 100755 index 6102ddcf..00000000 --- a/client-cli-old/pvc.py +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env python3 - -# pvc.py - PVC client command-line interface (stub testing interface) -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -import pvc.pvc - - -# -# Main entry point -# -def main(): - return pvc.pvc.cli(obj={}) - - -if __name__ == "__main__": - main() diff --git a/client-cli-old/pvc/__init__.py b/client-cli-old/pvc/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/client-cli-old/pvc/lib/__init__.py b/client-cli-old/pvc/lib/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/client-cli-old/pvc/lib/ansiprint.py b/client-cli-old/pvc/lib/ansiprint.py deleted file mode 100644 index 3a7ce394..00000000 --- a/client-cli-old/pvc/lib/ansiprint.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python3 - -# ansiprint.py - Printing function for formatted messages -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -import datetime - - -# ANSII colours for output -def red(): - return "\033[91m" - - -def blue(): - return "\033[94m" - - -def cyan(): - return "\033[96m" - - -def green(): - return "\033[92m" - - -def yellow(): - return "\033[93m" - - -def purple(): - return "\033[95m" - - -def bold(): - return "\033[1m" - - -def end(): - return "\033[0m" - - -# Print function -def echo(message, prefix, state): - # Get the date - date = "{} - ".format(datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S.%f")) - endc = end() - - # Continuation - if state == "c": - date = "" - colour = "" - prompt = " " - # OK - elif state == "o": - colour = green() - prompt = ">>> " - # Error - elif state == "e": - colour = red() - prompt = ">>> " - # Warning - elif state == "w": - colour = yellow() - prompt = ">>> " - # Tick - elif state == "t": - colour = purple() - prompt = ">>> " - # Information - elif state == "i": - colour = blue() - prompt = ">>> " - else: - colour = bold() - prompt = ">>> " - - # Append space to prefix - if prefix != "": - prefix = prefix + " " - - print(colour + prompt + endc + date + prefix + message) diff --git a/client-cli-old/pvc/lib/ceph.py b/client-cli-old/pvc/lib/ceph.py deleted file mode 100644 index 7738f3f2..00000000 --- a/client-cli-old/pvc/lib/ceph.py +++ /dev/null @@ -1,2606 +0,0 @@ -#!/usr/bin/env python3 - -# ceph.py - PVC CLI client function library, Ceph cluster functions -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -import math - -from json import dumps, loads -from requests_toolbelt.multipart.encoder import ( - MultipartEncoder, - MultipartEncoderMonitor, -) - -import pvc.lib.ansiprint as ansiprint -from pvc.lib.common import UploadProgressBar, call_api - -# -# Supplemental functions -# - -# Matrix of human-to-byte values -byte_unit_matrix = { - "B": 1, - "K": 1024, - "M": 1024 * 1024, - "G": 1024 * 1024 * 1024, - "T": 1024 * 1024 * 1024 * 1024, - "P": 1024 * 1024 * 1024 * 1024 * 1024, -} - -# Matrix of human-to-metric values -ops_unit_matrix = { - "": 1, - "K": 1000, - "M": 1000 * 1000, - "G": 1000 * 1000 * 1000, - "T": 1000 * 1000 * 1000 * 1000, - "P": 1000 * 1000 * 1000 * 1000 * 1000, -} - - -# Format byte sizes to/from human-readable units -def format_bytes_tohuman(databytes): - datahuman = "" - for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get, reverse=True): - new_bytes = int(math.ceil(databytes / byte_unit_matrix[unit])) - # Round up if 5 or more digits - if new_bytes > 9999: - # We can jump down another level - continue - else: - # We're at the end, display with this size - datahuman = "{}{}".format(new_bytes, unit) - - return datahuman - - -def format_bytes_fromhuman(datahuman): - # Trim off human-readable character - dataunit = datahuman[-1] - datasize = int(datahuman[:-1]) - databytes = datasize * byte_unit_matrix[dataunit] - return "{}B".format(databytes) - - -# Format ops sizes to/from human-readable units -def format_ops_tohuman(dataops): - datahuman = "" - for unit in sorted(ops_unit_matrix, key=ops_unit_matrix.get, reverse=True): - new_ops = int(math.ceil(dataops / ops_unit_matrix[unit])) - # Round up if 6 or more digits - if new_ops > 99999: - # We can jump down another level - continue - else: - # We're at the end, display with this size - datahuman = "{}{}".format(new_ops, unit) - - return datahuman - - -def format_ops_fromhuman(datahuman): - # Trim off human-readable character - dataunit = datahuman[-1] - datasize = int(datahuman[:-1]) - dataops = datasize * ops_unit_matrix[dataunit] - return "{}".format(dataops) - - -def format_pct_tohuman(datapct): - datahuman = "{0:.1f}".format(float(datapct * 100.0)) - return datahuman - - -# -# Status functions -# -def ceph_status(config): - """ - Get status of the Ceph cluster - - API endpoint: GET /api/v1/storage/ceph/status - API arguments: - API schema: {json_data_object} - """ - response = call_api(config, "get", "/storage/ceph/status") - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ceph_util(config): - """ - Get utilization of the Ceph cluster - - API endpoint: GET /api/v1/storage/ceph/utilization - API arguments: - API schema: {json_data_object} - """ - response = call_api(config, "get", "/storage/ceph/utilization") - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def format_raw_output(status_data): - ainformation = list() - ainformation.append( - "{bold}Ceph cluster {stype} (primary node {end}{blue}{primary}{end}{bold}){end}\n".format( - bold=ansiprint.bold(), - end=ansiprint.end(), - blue=ansiprint.blue(), - stype=status_data["type"], - primary=status_data["primary_node"], - ) - ) - ainformation.append(status_data["ceph_data"]) - ainformation.append("") - - return "\n".join(ainformation) - - -# -# OSD DB VG functions -# -def ceph_osd_db_vg_add(config, node, device): - """ - Add new Ceph OSD database volume group - - API endpoint: POST /api/v1/storage/ceph/osddb - API arguments: node={node}, device={device} - API schema: {"message":"{data}"} - """ - params = {"node": node, "device": device} - response = call_api(config, "post", "/storage/ceph/osddb", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -# -# OSD functions -# -def ceph_osd_info(config, osd): - """ - Get information about Ceph OSD - - API endpoint: GET /api/v1/storage/ceph/osd/{osd} - API arguments: - API schema: {json_data_object} - """ - response = call_api(config, "get", "/storage/ceph/osd/{osd}".format(osd=osd)) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "OSD not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ceph_osd_list(config, limit): - """ - Get list information about Ceph OSDs (limited by {limit}) - - API endpoint: GET /api/v1/storage/ceph/osd - API arguments: limit={limit} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - - response = call_api(config, "get", "/storage/ceph/osd", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ceph_osd_add(config, node, device, weight, ext_db_flag, ext_db_ratio): - """ - Add new Ceph OSD - - API endpoint: POST /api/v1/storage/ceph/osd - API arguments: node={node}, device={device}, weight={weight}, ext_db={ext_db_flag}, ext_db_ratio={ext_db_ratio} - API schema: {"message":"{data}"} - """ - params = { - "node": node, - "device": device, - "weight": weight, - "ext_db": ext_db_flag, - "ext_db_ratio": ext_db_ratio, - } - response = call_api(config, "post", "/storage/ceph/osd", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_osd_replace(config, osdid, device, weight): - """ - Replace an existing Ceph OSD with a new device - - API endpoint: POST /api/v1/storage/ceph/osd/{osdid} - API arguments: device={device}, weight={weight} - API schema: {"message":"{data}"} - """ - params = {"device": device, "weight": weight, "yes-i-really-mean-it": "yes"} - response = call_api(config, "post", f"/storage/ceph/osd/{osdid}", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_osd_refresh(config, osdid, device): - """ - Refresh (reimport) an existing Ceph OSD with device {device} - - API endpoint: PUT /api/v1/storage/ceph/osd/{osdid} - API arguments: device={device} - API schema: {"message":"{data}"} - """ - params = { - "device": device, - } - response = call_api(config, "put", f"/storage/ceph/osd/{osdid}", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_osd_remove(config, osdid, force_flag): - """ - Remove Ceph OSD - - API endpoint: DELETE /api/v1/storage/ceph/osd/{osdid} - API arguments: - API schema: {"message":"{data}"} - """ - params = {"force": force_flag, "yes-i-really-mean-it": "yes"} - response = call_api( - config, "delete", "/storage/ceph/osd/{osdid}".format(osdid=osdid), params=params - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_osd_state(config, osdid, state): - """ - Set state of Ceph OSD - - API endpoint: POST /api/v1/storage/ceph/osd/{osdid}/state - API arguments: state={state} - API schema: {"message":"{data}"} - """ - params = {"state": state} - response = call_api( - config, - "post", - "/storage/ceph/osd/{osdid}/state".format(osdid=osdid), - params=params, - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_osd_option(config, option, action): - """ - Set cluster option of Ceph OSDs - - API endpoint: POST /api/v1/storage/ceph/option - API arguments: option={option}, action={action} - API schema: {"message":"{data}"} - """ - params = {"option": option, "action": action} - response = call_api(config, "post", "/storage/ceph/option", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def getOutputColoursOSD(osd_information): - # Set the UP status - if osd_information["stats"]["up"] == 1: - osd_up_flag = "Yes" - osd_up_colour = ansiprint.green() - else: - osd_up_flag = "No" - osd_up_colour = ansiprint.red() - - # Set the IN status - if osd_information["stats"]["in"] == 1: - osd_in_flag = "Yes" - osd_in_colour = ansiprint.green() - else: - osd_in_flag = "No" - osd_in_colour = ansiprint.red() - - return osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour - - -def format_list_osd(osd_list): - # Handle empty list - if not osd_list: - osd_list = list() - - osd_list_output = [] - - osd_id_length = 3 - osd_node_length = 5 - osd_device_length = 6 - osd_db_device_length = 9 - osd_up_length = 4 - osd_in_length = 4 - osd_size_length = 5 - osd_weight_length = 3 - osd_reweight_length = 5 - osd_pgs_length = 4 - osd_used_length = 5 - osd_free_length = 6 - osd_util_length = 6 - osd_var_length = 5 - osd_wrops_length = 4 - osd_wrdata_length = 5 - osd_rdops_length = 4 - osd_rddata_length = 5 - - for osd_information in osd_list: - try: - # If this happens, the node hasn't checked in fully yet, so use some dummy data - if osd_information["stats"]["node"] == "|": - for key in osd_information["stats"].keys(): - if ( - osd_information["stats"][key] == "|" - or osd_information["stats"][key] is None - ): - osd_information["stats"][key] = "N/A" - for key in osd_information.keys(): - if osd_information[key] is None: - osd_information[key] = "N/A" - else: - for key in osd_information["stats"].keys(): - if key in ["utilization", "var"] and isinstance( - osd_information["stats"][key], float - ): - osd_information["stats"][key] = round( - osd_information["stats"][key], 2 - ) - except KeyError: - print( - f"Details for OSD {osd_information['id']} missing required keys, skipping." - ) - continue - - # Deal with the size to human readable - osd_information["stats"]["size"] = osd_information["stats"]["kb"] * 1024 - for datatype in "size", "wr_data", "rd_data": - databytes = osd_information["stats"][datatype] - if isinstance(databytes, int): - databytes_formatted = format_bytes_tohuman(databytes) - else: - databytes_formatted = databytes - osd_information["stats"][datatype] = databytes_formatted - for datatype in "wr_ops", "rd_ops": - dataops = osd_information["stats"][datatype] - if isinstance(dataops, int): - dataops_formatted = format_ops_tohuman(dataops) - else: - dataops_formatted = dataops - osd_information["stats"][datatype] = dataops_formatted - - # Set the OSD ID length - _osd_id_length = len(osd_information["id"]) + 1 - if _osd_id_length > osd_id_length: - osd_id_length = _osd_id_length - - # Set the OSD node length - _osd_node_length = len(osd_information["node"]) + 1 - if _osd_node_length > osd_node_length: - osd_node_length = _osd_node_length - - # Set the OSD device length - _osd_device_length = len(osd_information["device"]) + 1 - if _osd_device_length > osd_device_length: - osd_device_length = _osd_device_length - - # Set the OSD db_device length - _osd_db_device_length = len(osd_information["db_device"]) + 1 - if _osd_db_device_length > osd_db_device_length: - osd_db_device_length = _osd_db_device_length - - # Set the size and length - _osd_size_length = len(str(osd_information["stats"]["size"])) + 1 - if _osd_size_length > osd_size_length: - osd_size_length = _osd_size_length - - # Set the weight and length - _osd_weight_length = len(str(osd_information["stats"]["weight"])) + 1 - if _osd_weight_length > osd_weight_length: - osd_weight_length = _osd_weight_length - - # Set the reweight and length - _osd_reweight_length = len(str(osd_information["stats"]["reweight"])) + 1 - if _osd_reweight_length > osd_reweight_length: - osd_reweight_length = _osd_reweight_length - - # Set the pgs and length - _osd_pgs_length = len(str(osd_information["stats"]["pgs"])) + 1 - if _osd_pgs_length > osd_pgs_length: - osd_pgs_length = _osd_pgs_length - - # Set the used/available/utlization%/variance and lengths - _osd_used_length = len(osd_information["stats"]["used"]) + 1 - if _osd_used_length > osd_used_length: - osd_used_length = _osd_used_length - - _osd_free_length = len(osd_information["stats"]["avail"]) + 1 - if _osd_free_length > osd_free_length: - osd_free_length = _osd_free_length - - _osd_util_length = len(str(osd_information["stats"]["utilization"])) + 1 - if _osd_util_length > osd_util_length: - osd_util_length = _osd_util_length - - _osd_var_length = len(str(osd_information["stats"]["var"])) + 1 - if _osd_var_length > osd_var_length: - osd_var_length = _osd_var_length - - # Set the read/write IOPS/data and length - _osd_wrops_length = len(osd_information["stats"]["wr_ops"]) + 1 - if _osd_wrops_length > osd_wrops_length: - osd_wrops_length = _osd_wrops_length - - _osd_wrdata_length = len(osd_information["stats"]["wr_data"]) + 1 - if _osd_wrdata_length > osd_wrdata_length: - osd_wrdata_length = _osd_wrdata_length - - _osd_rdops_length = len(osd_information["stats"]["rd_ops"]) + 1 - if _osd_rdops_length > osd_rdops_length: - osd_rdops_length = _osd_rdops_length - - _osd_rddata_length = len(osd_information["stats"]["rd_data"]) + 1 - if _osd_rddata_length > osd_rddata_length: - osd_rddata_length = _osd_rddata_length - - # Format the output header - osd_list_output.append( - "{bold}{osd_header: <{osd_header_length}} {state_header: <{state_header_length}} {details_header: <{details_header_length}} {read_header: <{read_header_length}} {write_header: <{write_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - osd_header_length=osd_id_length - + osd_node_length - + osd_device_length - + osd_db_device_length - + 3, - state_header_length=osd_up_length + osd_in_length + 1, - details_header_length=osd_size_length - + osd_pgs_length - + osd_weight_length - + osd_reweight_length - + osd_used_length - + osd_free_length - + osd_util_length - + osd_var_length - + 7, - read_header_length=osd_rdops_length + osd_rddata_length + 1, - write_header_length=osd_wrops_length + osd_wrdata_length + 1, - osd_header="OSDs " - + "".join( - [ - "-" - for _ in range( - 5, - osd_id_length - + osd_node_length - + osd_device_length - + osd_db_device_length - + 2, - ) - ] - ), - state_header="State " - + "".join(["-" for _ in range(6, osd_up_length + osd_in_length)]), - details_header="Details " - + "".join( - [ - "-" - for _ in range( - 8, - osd_size_length - + osd_pgs_length - + osd_weight_length - + osd_reweight_length - + osd_used_length - + osd_free_length - + osd_util_length - + osd_var_length - + 6, - ) - ] - ), - read_header="Read " - + "".join(["-" for _ in range(5, osd_rdops_length + osd_rddata_length)]), - write_header="Write " - + "".join(["-" for _ in range(6, osd_wrops_length + osd_wrdata_length)]), - ) - ) - - osd_list_output.append( - "{bold}\ -{osd_id: <{osd_id_length}} \ -{osd_node: <{osd_node_length}} \ -{osd_device: <{osd_device_length}} \ -{osd_db_device: <{osd_db_device_length}} \ -{osd_up: <{osd_up_length}} \ -{osd_in: <{osd_in_length}} \ -{osd_size: <{osd_size_length}} \ -{osd_pgs: <{osd_pgs_length}} \ -{osd_weight: <{osd_weight_length}} \ -{osd_reweight: <{osd_reweight_length}} \ -{osd_used: <{osd_used_length}} \ -{osd_free: <{osd_free_length}} \ -{osd_util: <{osd_util_length}} \ -{osd_var: <{osd_var_length}} \ -{osd_rdops: <{osd_rdops_length}} \ -{osd_rddata: <{osd_rddata_length}} \ -{osd_wrops: <{osd_wrops_length}} \ -{osd_wrdata: <{osd_wrdata_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - osd_id_length=osd_id_length, - osd_node_length=osd_node_length, - osd_device_length=osd_device_length, - osd_db_device_length=osd_db_device_length, - osd_up_length=osd_up_length, - osd_in_length=osd_in_length, - osd_size_length=osd_size_length, - osd_pgs_length=osd_pgs_length, - osd_weight_length=osd_weight_length, - osd_reweight_length=osd_reweight_length, - osd_used_length=osd_used_length, - osd_free_length=osd_free_length, - osd_util_length=osd_util_length, - osd_var_length=osd_var_length, - osd_wrops_length=osd_wrops_length, - osd_wrdata_length=osd_wrdata_length, - osd_rdops_length=osd_rdops_length, - osd_rddata_length=osd_rddata_length, - osd_id="ID", - osd_node="Node", - osd_device="Block", - osd_db_device="DB Block", - osd_up="Up", - osd_in="In", - osd_size="Size", - osd_pgs="PGs", - osd_weight="Wt", - osd_reweight="ReWt", - osd_used="Used", - osd_free="Free", - osd_util="Util%", - osd_var="Var", - osd_wrops="OPS", - osd_wrdata="Data", - osd_rdops="OPS", - osd_rddata="Data", - ) - ) - - for osd_information in sorted(osd_list, key=lambda x: int(x["id"])): - osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour = getOutputColoursOSD( - osd_information - ) - - osd_db_device = osd_information["db_device"] - if not osd_db_device: - osd_db_device = "N/A" - - # Format the output header - osd_list_output.append( - "{bold}\ -{osd_id: <{osd_id_length}} \ -{osd_node: <{osd_node_length}} \ -{osd_device: <{osd_device_length}} \ -{osd_db_device: <{osd_db_device_length}} \ -{osd_up_colour}{osd_up: <{osd_up_length}}{end_colour} \ -{osd_in_colour}{osd_in: <{osd_in_length}}{end_colour} \ -{osd_size: <{osd_size_length}} \ -{osd_pgs: <{osd_pgs_length}} \ -{osd_weight: <{osd_weight_length}} \ -{osd_reweight: <{osd_reweight_length}} \ -{osd_used: <{osd_used_length}} \ -{osd_free: <{osd_free_length}} \ -{osd_util: <{osd_util_length}} \ -{osd_var: <{osd_var_length}} \ -{osd_rdops: <{osd_rdops_length}} \ -{osd_rddata: <{osd_rddata_length}} \ -{osd_wrops: <{osd_wrops_length}} \ -{osd_wrdata: <{osd_wrdata_length}} \ -{end_bold}".format( - bold="", - end_bold="", - end_colour=ansiprint.end(), - osd_id_length=osd_id_length, - osd_node_length=osd_node_length, - osd_device_length=osd_device_length, - osd_db_device_length=osd_db_device_length, - osd_up_length=osd_up_length, - osd_in_length=osd_in_length, - osd_size_length=osd_size_length, - osd_pgs_length=osd_pgs_length, - osd_weight_length=osd_weight_length, - osd_reweight_length=osd_reweight_length, - osd_used_length=osd_used_length, - osd_free_length=osd_free_length, - osd_util_length=osd_util_length, - osd_var_length=osd_var_length, - osd_wrops_length=osd_wrops_length, - osd_wrdata_length=osd_wrdata_length, - osd_rdops_length=osd_rdops_length, - osd_rddata_length=osd_rddata_length, - osd_id=osd_information["id"], - osd_node=osd_information["node"], - osd_device=osd_information["device"], - osd_db_device=osd_db_device, - osd_up_colour=osd_up_colour, - osd_up=osd_up_flag, - osd_in_colour=osd_in_colour, - osd_in=osd_in_flag, - osd_size=osd_information["stats"]["size"], - osd_pgs=osd_information["stats"]["pgs"], - osd_weight=osd_information["stats"]["weight"], - osd_reweight=osd_information["stats"]["reweight"], - osd_used=osd_information["stats"]["used"], - osd_free=osd_information["stats"]["avail"], - osd_util=osd_information["stats"]["utilization"], - osd_var=osd_information["stats"]["var"], - osd_wrops=osd_information["stats"]["wr_ops"], - osd_wrdata=osd_information["stats"]["wr_data"], - osd_rdops=osd_information["stats"]["rd_ops"], - osd_rddata=osd_information["stats"]["rd_data"], - ) - ) - - return "\n".join(osd_list_output) - - -# -# Pool functions -# -def ceph_pool_info(config, pool): - """ - Get information about Ceph OSD - - API endpoint: GET /api/v1/storage/ceph/pool/{pool} - API arguments: - API schema: {json_data_object} - """ - response = call_api(config, "get", "/storage/ceph/pool/{pool}".format(pool=pool)) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "Pool not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ceph_pool_list(config, limit): - """ - Get list information about Ceph pools (limited by {limit}) - - API endpoint: GET /api/v1/storage/ceph/pool - API arguments: limit={limit} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - - response = call_api(config, "get", "/storage/ceph/pool", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ceph_pool_add(config, pool, pgs, replcfg, tier): - """ - Add new Ceph pool - - API endpoint: POST /api/v1/storage/ceph/pool - API arguments: pool={pool}, pgs={pgs}, replcfg={replcfg}, tier={tier} - API schema: {"message":"{data}"} - """ - params = {"pool": pool, "pgs": pgs, "replcfg": replcfg, "tier": tier} - response = call_api(config, "post", "/storage/ceph/pool", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_pool_remove(config, pool): - """ - Remove Ceph pool - - API endpoint: DELETE /api/v1/storage/ceph/pool/{pool} - API arguments: - API schema: {"message":"{data}"} - """ - params = {"yes-i-really-mean-it": "yes"} - response = call_api( - config, "delete", "/storage/ceph/pool/{pool}".format(pool=pool), params=params - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_pool_set_pgs(config, pool, pgs): - """ - Set the PGs of a Ceph pool - - API endpoint: PUT /api/v1/storage/ceph/pool/{pool} - API arguments: {"pgs": "{pgs}"} - API schema: {"message":"{data}"} - """ - params = {"pgs": pgs} - response = call_api( - config, "put", "/storage/ceph/pool/{pool}".format(pool=pool), params=params - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def format_list_pool(pool_list): - # Handle empty list - if not pool_list: - pool_list = list() - - pool_list_output = [] - - pool_name_length = 5 - pool_id_length = 3 - pool_tier_length = 5 - pool_pgs_length = 4 - pool_used_length = 5 - pool_usedpct_length = 6 - pool_free_length = 5 - pool_num_objects_length = 6 - pool_num_clones_length = 7 - pool_num_copies_length = 7 - pool_num_degraded_length = 9 - pool_read_ops_length = 4 - pool_read_data_length = 5 - pool_write_ops_length = 4 - pool_write_data_length = 5 - - for pool_information in pool_list: - # Deal with the size to human readable - for datatype in ["free_bytes", "used_bytes", "write_bytes", "read_bytes"]: - databytes = pool_information["stats"][datatype] - databytes_formatted = format_bytes_tohuman(int(databytes)) - pool_information["stats"][datatype] = databytes_formatted - for datatype in ["write_ops", "read_ops"]: - dataops = pool_information["stats"][datatype] - dataops_formatted = format_ops_tohuman(int(dataops)) - pool_information["stats"][datatype] = dataops_formatted - for datatype in ["used_percent"]: - datapct = pool_information["stats"][datatype] - datapct_formatted = format_pct_tohuman(float(datapct)) - pool_information["stats"][datatype] = datapct_formatted - - # Set the Pool name length - _pool_name_length = len(pool_information["name"]) + 1 - if _pool_name_length > pool_name_length: - pool_name_length = _pool_name_length - - # Set the id and length - _pool_id_length = len(str(pool_information["stats"]["id"])) + 1 - if _pool_id_length > pool_id_length: - pool_id_length = _pool_id_length - - # Set the tier and length - _pool_tier_length = len(str(pool_information["tier"])) + 1 - if _pool_tier_length > pool_tier_length: - pool_tier_length = _pool_tier_length - - # Set the pgs and length - _pool_pgs_length = len(str(pool_information["pgs"])) + 1 - if _pool_pgs_length > pool_pgs_length: - pool_pgs_length = _pool_pgs_length - - # Set the used and length - _pool_used_length = len(str(pool_information["stats"]["used_bytes"])) + 1 - if _pool_used_length > pool_used_length: - pool_used_length = _pool_used_length - - # Set the usedpct and length - _pool_usedpct_length = len(str(pool_information["stats"]["used_percent"])) + 1 - if _pool_usedpct_length > pool_usedpct_length: - pool_usedpct_length = _pool_usedpct_length - - # Set the free and length - _pool_free_length = len(str(pool_information["stats"]["free_bytes"])) + 1 - if _pool_free_length > pool_free_length: - pool_free_length = _pool_free_length - - # Set the num_objects and length - _pool_num_objects_length = ( - len(str(pool_information["stats"]["num_objects"])) + 1 - ) - if _pool_num_objects_length > pool_num_objects_length: - pool_num_objects_length = _pool_num_objects_length - - # Set the num_clones and length - _pool_num_clones_length = ( - len(str(pool_information["stats"]["num_object_clones"])) + 1 - ) - if _pool_num_clones_length > pool_num_clones_length: - pool_num_clones_length = _pool_num_clones_length - - # Set the num_copies and length - _pool_num_copies_length = ( - len(str(pool_information["stats"]["num_object_copies"])) + 1 - ) - if _pool_num_copies_length > pool_num_copies_length: - pool_num_copies_length = _pool_num_copies_length - - # Set the num_degraded and length - _pool_num_degraded_length = ( - len(str(pool_information["stats"]["num_objects_degraded"])) + 1 - ) - if _pool_num_degraded_length > pool_num_degraded_length: - pool_num_degraded_length = _pool_num_degraded_length - - # Set the read/write IOPS/data and length - _pool_write_ops_length = len(str(pool_information["stats"]["write_ops"])) + 1 - if _pool_write_ops_length > pool_write_ops_length: - pool_write_ops_length = _pool_write_ops_length - - _pool_write_data_length = len(pool_information["stats"]["write_bytes"]) + 1 - if _pool_write_data_length > pool_write_data_length: - pool_write_data_length = _pool_write_data_length - - _pool_read_ops_length = len(str(pool_information["stats"]["read_ops"])) + 1 - if _pool_read_ops_length > pool_read_ops_length: - pool_read_ops_length = _pool_read_ops_length - - _pool_read_data_length = len(pool_information["stats"]["read_bytes"]) + 1 - if _pool_read_data_length > pool_read_data_length: - pool_read_data_length = _pool_read_data_length - - # Format the output header - pool_list_output.append( - "{bold}{pool_header: <{pool_header_length}} {objects_header: <{objects_header_length}} {read_header: <{read_header_length}} {write_header: <{write_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - pool_header_length=pool_id_length - + pool_name_length - + pool_tier_length - + pool_pgs_length - + pool_used_length - + pool_usedpct_length - + pool_free_length - + 6, - objects_header_length=pool_num_objects_length - + pool_num_clones_length - + pool_num_copies_length - + pool_num_degraded_length - + 3, - read_header_length=pool_read_ops_length + pool_read_data_length + 1, - write_header_length=pool_write_ops_length + pool_write_data_length + 1, - pool_header="Pools " - + "".join( - [ - "-" - for _ in range( - 6, - pool_id_length - + pool_name_length - + pool_tier_length - + pool_pgs_length - + pool_used_length - + pool_usedpct_length - + pool_free_length - + 5, - ) - ] - ), - objects_header="Objects " - + "".join( - [ - "-" - for _ in range( - 8, - pool_num_objects_length - + pool_num_clones_length - + pool_num_copies_length - + pool_num_degraded_length - + 2, - ) - ] - ), - read_header="Read " - + "".join( - ["-" for _ in range(5, pool_read_ops_length + pool_read_data_length)] - ), - write_header="Write " - + "".join( - ["-" for _ in range(6, pool_write_ops_length + pool_write_data_length)] - ), - ) - ) - - pool_list_output.append( - "{bold}\ -{pool_id: <{pool_id_length}} \ -{pool_name: <{pool_name_length}} \ -{pool_tier: <{pool_tier_length}} \ -{pool_pgs: <{pool_pgs_length}} \ -{pool_used: <{pool_used_length}} \ -{pool_usedpct: <{pool_usedpct_length}} \ -{pool_free: <{pool_free_length}} \ -{pool_objects: <{pool_objects_length}} \ -{pool_clones: <{pool_clones_length}} \ -{pool_copies: <{pool_copies_length}} \ -{pool_degraded: <{pool_degraded_length}} \ -{pool_read_ops: <{pool_read_ops_length}} \ -{pool_read_data: <{pool_read_data_length}} \ -{pool_write_ops: <{pool_write_ops_length}} \ -{pool_write_data: <{pool_write_data_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - pool_id_length=pool_id_length, - pool_name_length=pool_name_length, - pool_tier_length=pool_tier_length, - pool_pgs_length=pool_pgs_length, - pool_used_length=pool_used_length, - pool_usedpct_length=pool_usedpct_length, - pool_free_length=pool_free_length, - pool_objects_length=pool_num_objects_length, - pool_clones_length=pool_num_clones_length, - pool_copies_length=pool_num_copies_length, - pool_degraded_length=pool_num_degraded_length, - pool_write_ops_length=pool_write_ops_length, - pool_write_data_length=pool_write_data_length, - pool_read_ops_length=pool_read_ops_length, - pool_read_data_length=pool_read_data_length, - pool_id="ID", - pool_name="Name", - pool_tier="Tier", - pool_pgs="PGs", - pool_used="Used", - pool_usedpct="Used%", - pool_free="Free", - pool_objects="Count", - pool_clones="Clones", - pool_copies="Copies", - pool_degraded="Degraded", - pool_write_ops="OPS", - pool_write_data="Data", - pool_read_ops="OPS", - pool_read_data="Data", - ) - ) - - for pool_information in sorted(pool_list, key=lambda x: int(x["stats"]["id"])): - # Format the output header - pool_list_output.append( - "{bold}\ -{pool_id: <{pool_id_length}} \ -{pool_name: <{pool_name_length}} \ -{pool_tier: <{pool_tier_length}} \ -{pool_pgs: <{pool_pgs_length}} \ -{pool_used: <{pool_used_length}} \ -{pool_usedpct: <{pool_usedpct_length}} \ -{pool_free: <{pool_free_length}} \ -{pool_objects: <{pool_objects_length}} \ -{pool_clones: <{pool_clones_length}} \ -{pool_copies: <{pool_copies_length}} \ -{pool_degraded: <{pool_degraded_length}} \ -{pool_read_ops: <{pool_read_ops_length}} \ -{pool_read_data: <{pool_read_data_length}} \ -{pool_write_ops: <{pool_write_ops_length}} \ -{pool_write_data: <{pool_write_data_length}} \ -{end_bold}".format( - bold="", - end_bold="", - pool_id_length=pool_id_length, - pool_name_length=pool_name_length, - pool_tier_length=pool_tier_length, - pool_pgs_length=pool_pgs_length, - pool_used_length=pool_used_length, - pool_usedpct_length=pool_usedpct_length, - pool_free_length=pool_free_length, - pool_objects_length=pool_num_objects_length, - pool_clones_length=pool_num_clones_length, - pool_copies_length=pool_num_copies_length, - pool_degraded_length=pool_num_degraded_length, - pool_write_ops_length=pool_write_ops_length, - pool_write_data_length=pool_write_data_length, - pool_read_ops_length=pool_read_ops_length, - pool_read_data_length=pool_read_data_length, - pool_id=pool_information["stats"]["id"], - pool_name=pool_information["name"], - pool_tier=pool_information["tier"], - pool_pgs=pool_information["pgs"], - pool_used=pool_information["stats"]["used_bytes"], - pool_usedpct=pool_information["stats"]["used_percent"], - pool_free=pool_information["stats"]["free_bytes"], - pool_objects=pool_information["stats"]["num_objects"], - pool_clones=pool_information["stats"]["num_object_clones"], - pool_copies=pool_information["stats"]["num_object_copies"], - pool_degraded=pool_information["stats"]["num_objects_degraded"], - pool_write_ops=pool_information["stats"]["write_ops"], - pool_write_data=pool_information["stats"]["write_bytes"], - pool_read_ops=pool_information["stats"]["read_ops"], - pool_read_data=pool_information["stats"]["read_bytes"], - ) - ) - - return "\n".join(pool_list_output) - - -# -# Volume functions -# -def ceph_volume_info(config, pool, volume): - """ - Get information about Ceph volume - - API endpoint: GET /api/v1/storage/ceph/volume/{pool}/{volume} - API arguments: - API schema: {json_data_object} - """ - response = call_api( - config, - "get", - "/storage/ceph/volume/{pool}/{volume}".format(volume=volume, pool=pool), - ) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "Volume not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ceph_volume_list(config, limit, pool): - """ - Get list information about Ceph volumes (limited by {limit} and by {pool}) - - API endpoint: GET /api/v1/storage/ceph/volume - API arguments: limit={limit}, pool={pool} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - if pool: - params["pool"] = pool - - response = call_api(config, "get", "/storage/ceph/volume", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ceph_volume_add(config, pool, volume, size): - """ - Add new Ceph volume - - API endpoint: POST /api/v1/storage/ceph/volume - API arguments: volume={volume}, pool={pool}, size={size} - API schema: {"message":"{data}"} - """ - params = {"volume": volume, "pool": pool, "size": size} - response = call_api(config, "post", "/storage/ceph/volume", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_volume_upload(config, pool, volume, image_format, image_file): - """ - Upload a disk image to a Ceph volume - - API endpoint: POST /api/v1/storage/ceph/volume/{pool}/{volume}/upload - API arguments: image_format={image_format} - API schema: {"message":"{data}"} - """ - import click - - bar = UploadProgressBar( - image_file, end_message="Parsing file on remote side...", end_nl=False - ) - upload_data = MultipartEncoder( - fields={ - "file": ("filename", open(image_file, "rb"), "application/octet-stream") - } - ) - upload_monitor = MultipartEncoderMonitor(upload_data, bar.update) - - headers = {"Content-Type": upload_monitor.content_type} - params = {"image_format": image_format} - - response = call_api( - config, - "post", - "/storage/ceph/volume/{}/{}/upload".format(pool, volume), - headers=headers, - params=params, - data=upload_monitor, - ) - - click.echo("done.") - click.echo() - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_volume_remove(config, pool, volume): - """ - Remove Ceph volume - - API endpoint: DELETE /api/v1/storage/ceph/volume/{pool}/{volume} - API arguments: - API schema: {"message":"{data}"} - """ - response = call_api( - config, - "delete", - "/storage/ceph/volume/{pool}/{volume}".format(volume=volume, pool=pool), - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_volume_modify(config, pool, volume, new_name=None, new_size=None): - """ - Modify Ceph volume - - API endpoint: PUT /api/v1/storage/ceph/volume/{pool}/{volume} - API arguments: - API schema: {"message":"{data}"} - """ - - params = dict() - if new_name: - params["new_name"] = new_name - if new_size: - params["new_size"] = new_size - - response = call_api( - config, - "put", - "/storage/ceph/volume/{pool}/{volume}".format(volume=volume, pool=pool), - params=params, - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_volume_clone(config, pool, volume, new_volume): - """ - Clone Ceph volume - - API endpoint: POST /api/v1/storage/ceph/volume/{pool}/{volume} - API arguments: new_volume={new_volume - API schema: {"message":"{data}"} - """ - params = {"new_volume": new_volume} - response = call_api( - config, - "post", - "/storage/ceph/volume/{pool}/{volume}/clone".format(volume=volume, pool=pool), - params=params, - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def format_list_volume(volume_list): - # Handle empty list - if not volume_list: - volume_list = list() - - volume_list_output = [] - - volume_name_length = 5 - volume_pool_length = 5 - volume_size_length = 5 - volume_objects_length = 8 - volume_order_length = 6 - volume_format_length = 7 - volume_features_length = 10 - - for volume_information in volume_list: - # Set the Volume name length - _volume_name_length = len(volume_information["name"]) + 1 - if _volume_name_length > volume_name_length: - volume_name_length = _volume_name_length - - # Set the Volume pool length - _volume_pool_length = len(volume_information["pool"]) + 1 - if _volume_pool_length > volume_pool_length: - volume_pool_length = _volume_pool_length - - # Set the size and length - _volume_size_length = len(str(volume_information["stats"]["size"])) + 1 - if _volume_size_length > volume_size_length: - volume_size_length = _volume_size_length - - # Set the num_objects and length - _volume_objects_length = len(str(volume_information["stats"]["objects"])) + 1 - if _volume_objects_length > volume_objects_length: - volume_objects_length = _volume_objects_length - - # Set the order and length - _volume_order_length = len(str(volume_information["stats"]["order"])) + 1 - if _volume_order_length > volume_order_length: - volume_order_length = _volume_order_length - - # Set the format and length - _volume_format_length = len(str(volume_information["stats"]["format"])) + 1 - if _volume_format_length > volume_format_length: - volume_format_length = _volume_format_length - - # Set the features and length - _volume_features_length = ( - len(str(",".join(volume_information["stats"]["features"]))) + 1 - ) - if _volume_features_length > volume_features_length: - volume_features_length = _volume_features_length - - # Format the output header - volume_list_output.append( - "{bold}{volume_header: <{volume_header_length}} {details_header: <{details_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - volume_header_length=volume_name_length + volume_pool_length + 1, - details_header_length=volume_size_length - + volume_objects_length - + volume_order_length - + volume_format_length - + volume_features_length - + 4, - volume_header="Volumes " - + "".join(["-" for _ in range(8, volume_name_length + volume_pool_length)]), - details_header="Details " - + "".join( - [ - "-" - for _ in range( - 8, - volume_size_length - + volume_objects_length - + volume_order_length - + volume_format_length - + volume_features_length - + 3, - ) - ] - ), - ) - ) - - volume_list_output.append( - "{bold}\ -{volume_name: <{volume_name_length}} \ -{volume_pool: <{volume_pool_length}} \ -{volume_size: <{volume_size_length}} \ -{volume_objects: <{volume_objects_length}} \ -{volume_order: <{volume_order_length}} \ -{volume_format: <{volume_format_length}} \ -{volume_features: <{volume_features_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - volume_name_length=volume_name_length, - volume_pool_length=volume_pool_length, - volume_size_length=volume_size_length, - volume_objects_length=volume_objects_length, - volume_order_length=volume_order_length, - volume_format_length=volume_format_length, - volume_features_length=volume_features_length, - volume_name="Name", - volume_pool="Pool", - volume_size="Size", - volume_objects="Objects", - volume_order="Order", - volume_format="Format", - volume_features="Features", - ) - ) - - for volume_information in sorted(volume_list, key=lambda v: v["pool"] + v["name"]): - volume_list_output.append( - "{bold}\ -{volume_name: <{volume_name_length}} \ -{volume_pool: <{volume_pool_length}} \ -{volume_size: <{volume_size_length}} \ -{volume_objects: <{volume_objects_length}} \ -{volume_order: <{volume_order_length}} \ -{volume_format: <{volume_format_length}} \ -{volume_features: <{volume_features_length}} \ -{end_bold}".format( - bold="", - end_bold="", - volume_name_length=volume_name_length, - volume_pool_length=volume_pool_length, - volume_size_length=volume_size_length, - volume_objects_length=volume_objects_length, - volume_order_length=volume_order_length, - volume_format_length=volume_format_length, - volume_features_length=volume_features_length, - volume_name=volume_information["name"], - volume_pool=volume_information["pool"], - volume_size=volume_information["stats"]["size"], - volume_objects=volume_information["stats"]["objects"], - volume_order=volume_information["stats"]["order"], - volume_format=volume_information["stats"]["format"], - volume_features=",".join(volume_information["stats"]["features"]), - ) - ) - - return "\n".join(volume_list_output) - - -# -# Snapshot functions -# -def ceph_snapshot_info(config, pool, volume, snapshot): - """ - Get information about Ceph snapshot - - API endpoint: GET /api/v1/storage/ceph/snapshot/{pool}/{volume}/{snapshot} - API arguments: - API schema: {json_data_object} - """ - response = call_api( - config, - "get", - "/storage/ceph/snapshot/{pool}/{volume}/{snapshot}".format( - snapshot=snapshot, volume=volume, pool=pool - ), - ) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "Snapshot not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ceph_snapshot_list(config, limit, volume, pool): - """ - Get list information about Ceph snapshots (limited by {limit}, by {pool}, or by {volume}) - - API endpoint: GET /api/v1/storage/ceph/snapshot - API arguments: limit={limit}, volume={volume}, pool={pool} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - if volume: - params["volume"] = volume - if pool: - params["pool"] = pool - - response = call_api(config, "get", "/storage/ceph/snapshot", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ceph_snapshot_add(config, pool, volume, snapshot): - """ - Add new Ceph snapshot - - API endpoint: POST /api/v1/storage/ceph/snapshot - API arguments: snapshot={snapshot}, volume={volume}, pool={pool} - API schema: {"message":"{data}"} - """ - params = {"snapshot": snapshot, "volume": volume, "pool": pool} - response = call_api(config, "post", "/storage/ceph/snapshot", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_snapshot_remove(config, pool, volume, snapshot): - """ - Remove Ceph snapshot - - API endpoint: DELETE /api/v1/storage/ceph/snapshot/{pool}/{volume}/{snapshot} - API arguments: - API schema: {"message":"{data}"} - """ - response = call_api( - config, - "delete", - "/storage/ceph/snapshot/{pool}/{volume}/{snapshot}".format( - snapshot=snapshot, volume=volume, pool=pool - ), - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ceph_snapshot_modify(config, pool, volume, snapshot, new_name=None): - """ - Modify Ceph snapshot - - API endpoint: PUT /api/v1/storage/ceph/snapshot/{pool}/{volume}/{snapshot} - API arguments: - API schema: {"message":"{data}"} - """ - - params = dict() - if new_name: - params["new_name"] = new_name - - response = call_api( - config, - "put", - "/storage/ceph/snapshot/{pool}/{volume}/{snapshot}".format( - snapshot=snapshot, volume=volume, pool=pool - ), - params=params, - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def format_list_snapshot(snapshot_list): - # Handle empty list - if not snapshot_list: - snapshot_list = list() - - snapshot_list_output = [] - - snapshot_name_length = 5 - snapshot_volume_length = 7 - snapshot_pool_length = 5 - - for snapshot_information in snapshot_list: - snapshot_name = snapshot_information["snapshot"] - snapshot_volume = snapshot_information["volume"] - snapshot_pool = snapshot_information["pool"] - - # Set the Snapshot name length - _snapshot_name_length = len(snapshot_name) + 1 - if _snapshot_name_length > snapshot_name_length: - snapshot_name_length = _snapshot_name_length - - # Set the Snapshot volume length - _snapshot_volume_length = len(snapshot_volume) + 1 - if _snapshot_volume_length > snapshot_volume_length: - snapshot_volume_length = _snapshot_volume_length - - # Set the Snapshot pool length - _snapshot_pool_length = len(snapshot_pool) + 1 - if _snapshot_pool_length > snapshot_pool_length: - snapshot_pool_length = _snapshot_pool_length - - # Format the output header - snapshot_list_output.append( - "{bold}{snapshot_header: <{snapshot_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - snapshot_header_length=snapshot_name_length - + snapshot_volume_length - + snapshot_pool_length - + 2, - snapshot_header="Snapshots " - + "".join( - [ - "-" - for _ in range( - 10, - snapshot_name_length - + snapshot_volume_length - + snapshot_pool_length - + 1, - ) - ] - ), - ) - ) - - snapshot_list_output.append( - "{bold}\ -{snapshot_name: <{snapshot_name_length}} \ -{snapshot_volume: <{snapshot_volume_length}} \ -{snapshot_pool: <{snapshot_pool_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - snapshot_name_length=snapshot_name_length, - snapshot_volume_length=snapshot_volume_length, - snapshot_pool_length=snapshot_pool_length, - snapshot_name="Name", - snapshot_volume="Volume", - snapshot_pool="Pool", - ) - ) - - for snapshot_information in sorted( - snapshot_list, key=lambda s: s["pool"] + s["volume"] + s["snapshot"] - ): - snapshot_name = snapshot_information["snapshot"] - snapshot_volume = snapshot_information["volume"] - snapshot_pool = snapshot_information["pool"] - snapshot_list_output.append( - "{bold}\ -{snapshot_name: <{snapshot_name_length}} \ -{snapshot_volume: <{snapshot_volume_length}} \ -{snapshot_pool: <{snapshot_pool_length}} \ -{end_bold}".format( - bold="", - end_bold="", - snapshot_name_length=snapshot_name_length, - snapshot_volume_length=snapshot_volume_length, - snapshot_pool_length=snapshot_pool_length, - snapshot_name=snapshot_name, - snapshot_volume=snapshot_volume, - snapshot_pool=snapshot_pool, - ) - ) - - return "\n".join(snapshot_list_output) - - -# -# Benchmark functions -# -def ceph_benchmark_run(config, pool): - """ - Run a storage benchmark against {pool} - - API endpoint: POST /api/v1/storage/ceph/benchmark - API arguments: pool={pool} - API schema: {message} - """ - params = {"pool": pool} - response = call_api(config, "post", "/storage/ceph/benchmark", params=params) - - if response.status_code == 202: - retvalue = True - retdata = "Task ID: {}".format(response.json()["task_id"]) - else: - retvalue = False - retdata = response.json().get("message", "") - - return retvalue, retdata - - -def ceph_benchmark_list(config, job): - """ - View results of one or more previous benchmark runs - - API endpoint: GET /api/v1/storage/ceph/benchmark - API arguments: job={job} - API schema: {results} - """ - if job is not None: - params = {"job": job} - else: - params = {} - - response = call_api(config, "get", "/storage/ceph/benchmark", params=params) - - if response.status_code == 200: - retvalue = True - retdata = response.json() - else: - retvalue = False - retdata = response.json().get("message", "") - - return retvalue, retdata - - -def get_benchmark_list_results_legacy(benchmark_data): - if isinstance(benchmark_data, str): - benchmark_data = loads(benchmark_data) - benchmark_bandwidth = dict() - benchmark_iops = dict() - for test in ["seq_read", "seq_write", "rand_read_4K", "rand_write_4K"]: - benchmark_bandwidth[test] = format_bytes_tohuman( - int(benchmark_data[test]["overall"]["bandwidth"]) * 1024 - ) - benchmark_iops[test] = format_ops_tohuman( - int(benchmark_data[test]["overall"]["iops"]) - ) - - return benchmark_bandwidth, benchmark_iops - - -def get_benchmark_list_results_json(benchmark_data): - benchmark_bandwidth = dict() - benchmark_iops = dict() - for test in ["seq_read", "seq_write", "rand_read_4K", "rand_write_4K"]: - benchmark_test_data = benchmark_data[test] - active_class = None - for io_class in ["read", "write"]: - if benchmark_test_data["jobs"][0][io_class]["io_bytes"] > 0: - active_class = io_class - if active_class is not None: - benchmark_bandwidth[test] = format_bytes_tohuman( - int(benchmark_test_data["jobs"][0][active_class]["bw_bytes"]) - ) - benchmark_iops[test] = format_ops_tohuman( - int(benchmark_test_data["jobs"][0][active_class]["iops"]) - ) - - return benchmark_bandwidth, benchmark_iops - - -def get_benchmark_list_results(benchmark_format, benchmark_data): - if benchmark_format == 0: - benchmark_bandwidth, benchmark_iops = get_benchmark_list_results_legacy( - benchmark_data - ) - elif benchmark_format == 1: - benchmark_bandwidth, benchmark_iops = get_benchmark_list_results_json( - benchmark_data - ) - - seq_benchmark_bandwidth = "{} / {}".format( - benchmark_bandwidth["seq_read"], benchmark_bandwidth["seq_write"] - ) - seq_benchmark_iops = "{} / {}".format( - benchmark_iops["seq_read"], benchmark_iops["seq_write"] - ) - rand_benchmark_bandwidth = "{} / {}".format( - benchmark_bandwidth["rand_read_4K"], benchmark_bandwidth["rand_write_4K"] - ) - rand_benchmark_iops = "{} / {}".format( - benchmark_iops["rand_read_4K"], benchmark_iops["rand_write_4K"] - ) - - return ( - seq_benchmark_bandwidth, - seq_benchmark_iops, - rand_benchmark_bandwidth, - rand_benchmark_iops, - ) - - -def format_list_benchmark(config, benchmark_information): - benchmark_list_output = [] - - benchmark_job_length = 20 - benchmark_format_length = 6 - benchmark_bandwidth_length = dict() - benchmark_iops_length = dict() - - # For this output, we're only showing the Sequential (seq_read and seq_write) and 4k Random (rand_read_4K and rand_write_4K) results since we're showing them for each test result. - for test in ["seq_read", "seq_write", "rand_read_4K", "rand_write_4K"]: - benchmark_bandwidth_length[test] = 7 - benchmark_iops_length[test] = 6 - - benchmark_seq_bw_length = 15 - benchmark_seq_iops_length = 10 - benchmark_rand_bw_length = 15 - benchmark_rand_iops_length = 10 - - for benchmark in benchmark_information: - benchmark_job = benchmark["job"] - benchmark_format = benchmark.get("test_format", 0) # noqa: F841 - - _benchmark_job_length = len(benchmark_job) - if _benchmark_job_length > benchmark_job_length: - benchmark_job_length = _benchmark_job_length - - if benchmark["benchmark_result"] == "Running": - continue - - benchmark_data = benchmark["benchmark_result"] - ( - seq_benchmark_bandwidth, - seq_benchmark_iops, - rand_benchmark_bandwidth, - rand_benchmark_iops, - ) = get_benchmark_list_results(benchmark_format, benchmark_data) - - _benchmark_seq_bw_length = len(seq_benchmark_bandwidth) + 1 - if _benchmark_seq_bw_length > benchmark_seq_bw_length: - benchmark_seq_bw_length = _benchmark_seq_bw_length - - _benchmark_seq_iops_length = len(seq_benchmark_iops) + 1 - if _benchmark_seq_iops_length > benchmark_seq_iops_length: - benchmark_seq_iops_length = _benchmark_seq_iops_length - - _benchmark_rand_bw_length = len(rand_benchmark_bandwidth) + 1 - if _benchmark_rand_bw_length > benchmark_rand_bw_length: - benchmark_rand_bw_length = _benchmark_rand_bw_length - - _benchmark_rand_iops_length = len(rand_benchmark_iops) + 1 - if _benchmark_rand_iops_length > benchmark_rand_iops_length: - benchmark_rand_iops_length = _benchmark_rand_iops_length - - # Format the output header line 1 - benchmark_list_output.append( - "{bold}\ -{benchmark_job: <{benchmark_job_length}} \ -{seq_header: <{seq_header_length}} \ -{rand_header: <{rand_header_length}}\ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - benchmark_job_length=benchmark_job_length + benchmark_format_length + 1, - seq_header_length=benchmark_seq_bw_length + benchmark_seq_iops_length + 1, - rand_header_length=benchmark_rand_bw_length - + benchmark_rand_iops_length - + 1, - benchmark_job="Benchmarks " - + "".join( - [ - "-" - for _ in range( - 11, benchmark_job_length + benchmark_format_length + 2 - ) - ] - ), - seq_header="Sequential (4M blocks) " - + "".join( - [ - "-" - for _ in range( - 23, benchmark_seq_bw_length + benchmark_seq_iops_length - ) - ] - ), - rand_header="Random (4K blocks) " - + "".join( - [ - "-" - for _ in range( - 19, benchmark_rand_bw_length + benchmark_rand_iops_length - ) - ] - ), - ) - ) - - benchmark_list_output.append( - "{bold}\ -{benchmark_job: <{benchmark_job_length}} \ -{benchmark_format: <{benchmark_format_length}} \ -{seq_benchmark_bandwidth: <{seq_benchmark_bandwidth_length}} \ -{seq_benchmark_iops: <{seq_benchmark_iops_length}} \ -{rand_benchmark_bandwidth: <{rand_benchmark_bandwidth_length}} \ -{rand_benchmark_iops: <{rand_benchmark_iops_length}}\ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - benchmark_job_length=benchmark_job_length, - benchmark_format_length=benchmark_format_length, - seq_benchmark_bandwidth_length=benchmark_seq_bw_length, - seq_benchmark_iops_length=benchmark_seq_iops_length, - rand_benchmark_bandwidth_length=benchmark_rand_bw_length, - rand_benchmark_iops_length=benchmark_rand_iops_length, - benchmark_job="Job", - benchmark_format="Format", - seq_benchmark_bandwidth="R/W Bandwith/s", - seq_benchmark_iops="R/W IOPS", - rand_benchmark_bandwidth="R/W Bandwith/s", - rand_benchmark_iops="R/W IOPS", - ) - ) - - for benchmark in benchmark_information: - benchmark_job = benchmark["job"] - benchmark_format = benchmark.get("test_format", 0) # noqa: F841 - - if benchmark["benchmark_result"] == "Running": - seq_benchmark_bandwidth = "Running" - seq_benchmark_iops = "Running" - rand_benchmark_bandwidth = "Running" - rand_benchmark_iops = "Running" - else: - benchmark_data = benchmark["benchmark_result"] - ( - seq_benchmark_bandwidth, - seq_benchmark_iops, - rand_benchmark_bandwidth, - rand_benchmark_iops, - ) = get_benchmark_list_results(benchmark_format, benchmark_data) - - benchmark_list_output.append( - "{bold}\ -{benchmark_job: <{benchmark_job_length}} \ -{benchmark_format: <{benchmark_format_length}} \ -{seq_benchmark_bandwidth: <{seq_benchmark_bandwidth_length}} \ -{seq_benchmark_iops: <{seq_benchmark_iops_length}} \ -{rand_benchmark_bandwidth: <{rand_benchmark_bandwidth_length}} \ -{rand_benchmark_iops: <{rand_benchmark_iops_length}}\ -{end_bold}".format( - bold="", - end_bold="", - benchmark_job_length=benchmark_job_length, - benchmark_format_length=benchmark_format_length, - seq_benchmark_bandwidth_length=benchmark_seq_bw_length, - seq_benchmark_iops_length=benchmark_seq_iops_length, - rand_benchmark_bandwidth_length=benchmark_rand_bw_length, - rand_benchmark_iops_length=benchmark_rand_iops_length, - benchmark_job=benchmark_job, - benchmark_format=benchmark_format, - seq_benchmark_bandwidth=seq_benchmark_bandwidth, - seq_benchmark_iops=seq_benchmark_iops, - rand_benchmark_bandwidth=rand_benchmark_bandwidth, - rand_benchmark_iops=rand_benchmark_iops, - ) - ) - - return "\n".join(benchmark_list_output) - - -def format_info_benchmark(config, oformat, benchmark_information): - # This matrix is a list of the possible format functions for a benchmark result - # It is extensable in the future should newer formats be required. - benchmark_matrix = { - 0: format_info_benchmark_legacy, - 1: format_info_benchmark_json, - } - - benchmark_version = benchmark_information[0]["test_format"] - - if oformat == "json-pretty": - return dumps(benchmark_information, indent=4) - elif oformat == "json": - return dumps(benchmark_information) - else: - return benchmark_matrix[benchmark_version](config, benchmark_information[0]) - - -def format_info_benchmark_legacy(config, benchmark_information): - if benchmark_information["benchmark_result"] == "Running": - return "Benchmark test is still running." - - benchmark_details = benchmark_information["benchmark_result"] - - # Format a nice output; do this line-by-line then concat the elements at the end - ainformation = [] - ainformation.append( - "{}Storage Benchmark details:{}".format(ansiprint.bold(), ansiprint.end()) - ) - - nice_test_name_map = { - "seq_read": "Sequential Read (4M blocks)", - "seq_write": "Sequential Write (4M blocks)", - "rand_read_4M": "Random Read (4M blocks)", - "rand_write_4M": "Random Write (4M blocks)", - "rand_read_4K": "Random Read (4K blocks)", - "rand_write_4K": "Random Write (4K blocks)", - "rand_read_4K_lowdepth": "Random Read (4K blocks, single-queue)", - "rand_write_4K_lowdepth": "Random Write (4K blocks, single-queue)", - } - - test_name_length = 30 - overall_label_length = 12 - overall_column_length = 8 - bandwidth_label_length = 9 - bandwidth_column_length = 10 - iops_column_length = 6 - latency_column_length = 8 - cpuutil_label_length = 11 - cpuutil_column_length = 9 - - # Work around old results that did not have these tests - if "rand_read_4K_lowdepth" not in benchmark_details: - del nice_test_name_map["rand_read_4K_lowdepth"] - del nice_test_name_map["rand_write_4K_lowdepth"] - - for test in benchmark_details: - # Work around old results that had these obsolete tests - if test == "rand_read_256K" or test == "rand_write_256K": - continue - - _test_name_length = len(nice_test_name_map[test]) - if _test_name_length > test_name_length: - test_name_length = _test_name_length - - for element in benchmark_details[test]["overall"]: - _element_length = len(benchmark_details[test]["overall"][element]) - if _element_length > overall_column_length: - overall_column_length = _element_length - - for element in benchmark_details[test]["bandwidth"]: - try: - _element_length = len( - format_bytes_tohuman( - int(float(benchmark_details[test]["bandwidth"][element])) - ) - ) - except Exception: - _element_length = len(benchmark_details[test]["bandwidth"][element]) - if _element_length > bandwidth_column_length: - bandwidth_column_length = _element_length - - for element in benchmark_details[test]["iops"]: - try: - _element_length = len( - format_ops_tohuman( - int(float(benchmark_details[test]["iops"][element])) - ) - ) - except Exception: - _element_length = len(benchmark_details[test]["iops"][element]) - if _element_length > iops_column_length: - iops_column_length = _element_length - - for element in benchmark_details[test]["latency"]: - _element_length = len(benchmark_details[test]["latency"][element]) - if _element_length > latency_column_length: - latency_column_length = _element_length - - for element in benchmark_details[test]["cpu"]: - _element_length = len(benchmark_details[test]["cpu"][element]) - if _element_length > cpuutil_column_length: - cpuutil_column_length = _element_length - - for test in benchmark_details: - # Work around old results that had these obsolete tests - if test == "rand_read_256K" or test == "rand_write_256K": - continue - - ainformation.append("") - - test_details = benchmark_details[test] - - # Top row (Headers) - ainformation.append( - "{bold}\ -{test_name: <{test_name_length}} \ -{overall_label: <{overall_label_length}} \ -{overall: <{overall_length}} \ -{bandwidth_label: <{bandwidth_label_length}} \ -{bandwidth: <{bandwidth_length}} \ -{iops: <{iops_length}} \ -{latency: <{latency_length}} \ -{cpuutil_label: <{cpuutil_label_length}} \ -{cpuutil: <{cpuutil_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - test_name="Test:", - test_name_length=test_name_length, - overall_label="", - overall_label_length=overall_label_length, - overall="General", - overall_length=overall_column_length, - bandwidth_label="", - bandwidth_label_length=bandwidth_label_length, - bandwidth="Bandwidth", - bandwidth_length=bandwidth_column_length, - iops="IOPS", - iops_length=iops_column_length, - latency="Latency (μs)", - latency_length=latency_column_length, - cpuutil_label="", - cpuutil_label_length=cpuutil_label_length, - cpuutil="CPU Util", - cpuutil_length=cpuutil_column_length, - ) - ) - # Second row (Test, Size, Min, User)) - ainformation.append( - "{bold}\ -{test_name: <{test_name_length}} \ -{overall_label: >{overall_label_length}} \ -{overall: <{overall_length}} \ -{bandwidth_label: >{bandwidth_label_length}} \ -{bandwidth: <{bandwidth_length}} \ -{iops: <{iops_length}} \ -{latency: <{latency_length}} \ -{cpuutil_label: >{cpuutil_label_length}} \ -{cpuutil: <{cpuutil_length}} \ -{end_bold}".format( - bold="", - end_bold="", - test_name=nice_test_name_map[test], - test_name_length=test_name_length, - overall_label="Test Size:", - overall_label_length=overall_label_length, - overall=format_bytes_tohuman( - int(test_details["overall"]["iosize"]) * 1024 - ), - overall_length=overall_column_length, - bandwidth_label="Min:", - bandwidth_label_length=bandwidth_label_length, - bandwidth=format_bytes_tohuman( - int(test_details["bandwidth"]["min"]) * 1024 - ), - bandwidth_length=bandwidth_column_length, - iops=format_ops_tohuman(int(test_details["iops"]["min"])), - iops_length=iops_column_length, - latency=test_details["latency"]["min"], - latency_length=latency_column_length, - cpuutil_label="User:", - cpuutil_label_length=cpuutil_label_length, - cpuutil=test_details["cpu"]["user"], - cpuutil_length=cpuutil_column_length, - ) - ) - # Third row (blank, BW/s, Max, System)) - ainformation.append( - "{bold}\ -{test_name: <{test_name_length}} \ -{overall_label: >{overall_label_length}} \ -{overall: <{overall_length}} \ -{bandwidth_label: >{bandwidth_label_length}} \ -{bandwidth: <{bandwidth_length}} \ -{iops: <{iops_length}} \ -{latency: <{latency_length}} \ -{cpuutil_label: >{cpuutil_label_length}} \ -{cpuutil: <{cpuutil_length}} \ -{end_bold}".format( - bold="", - end_bold="", - test_name="", - test_name_length=test_name_length, - overall_label="Bandwidth/s:", - overall_label_length=overall_label_length, - overall=format_bytes_tohuman( - int(test_details["overall"]["bandwidth"]) * 1024 - ), - overall_length=overall_column_length, - bandwidth_label="Max:", - bandwidth_label_length=bandwidth_label_length, - bandwidth=format_bytes_tohuman( - int(test_details["bandwidth"]["max"]) * 1024 - ), - bandwidth_length=bandwidth_column_length, - iops=format_ops_tohuman(int(test_details["iops"]["max"])), - iops_length=iops_column_length, - latency=test_details["latency"]["max"], - latency_length=latency_column_length, - cpuutil_label="System:", - cpuutil_label_length=cpuutil_label_length, - cpuutil=test_details["cpu"]["system"], - cpuutil_length=cpuutil_column_length, - ) - ) - # Fourth row (blank, IOPS, Mean, CtxSq)) - ainformation.append( - "{bold}\ -{test_name: <{test_name_length}} \ -{overall_label: >{overall_label_length}} \ -{overall: <{overall_length}} \ -{bandwidth_label: >{bandwidth_label_length}} \ -{bandwidth: <{bandwidth_length}} \ -{iops: <{iops_length}} \ -{latency: <{latency_length}} \ -{cpuutil_label: >{cpuutil_label_length}} \ -{cpuutil: <{cpuutil_length}} \ -{end_bold}".format( - bold="", - end_bold="", - test_name="", - test_name_length=test_name_length, - overall_label="IOPS:", - overall_label_length=overall_label_length, - overall=format_ops_tohuman(int(test_details["overall"]["iops"])), - overall_length=overall_column_length, - bandwidth_label="Mean:", - bandwidth_label_length=bandwidth_label_length, - bandwidth=format_bytes_tohuman( - int(float(test_details["bandwidth"]["mean"])) * 1024 - ), - bandwidth_length=bandwidth_column_length, - iops=format_ops_tohuman(int(float(test_details["iops"]["mean"]))), - iops_length=iops_column_length, - latency=test_details["latency"]["mean"], - latency_length=latency_column_length, - cpuutil_label="CtxSw:", - cpuutil_label_length=cpuutil_label_length, - cpuutil=test_details["cpu"]["ctxsw"], - cpuutil_length=cpuutil_column_length, - ) - ) - # Fifth row (blank, Runtime, StdDev, MajFault)) - ainformation.append( - "{bold}\ -{test_name: <{test_name_length}} \ -{overall_label: >{overall_label_length}} \ -{overall: <{overall_length}} \ -{bandwidth_label: >{bandwidth_label_length}} \ -{bandwidth: <{bandwidth_length}} \ -{iops: <{iops_length}} \ -{latency: <{latency_length}} \ -{cpuutil_label: >{cpuutil_label_length}} \ -{cpuutil: <{cpuutil_length}} \ -{end_bold}".format( - bold="", - end_bold="", - test_name="", - test_name_length=test_name_length, - overall_label="Runtime (s):", - overall_label_length=overall_label_length, - overall=int(test_details["overall"]["runtime"]) / 1000.0, - overall_length=overall_column_length, - bandwidth_label="StdDev:", - bandwidth_label_length=bandwidth_label_length, - bandwidth=format_bytes_tohuman( - int(float(test_details["bandwidth"]["stdev"])) * 1024 - ), - bandwidth_length=bandwidth_column_length, - iops=format_ops_tohuman(int(float(test_details["iops"]["stdev"]))), - iops_length=iops_column_length, - latency=test_details["latency"]["stdev"], - latency_length=latency_column_length, - cpuutil_label="MajFault:", - cpuutil_label_length=cpuutil_label_length, - cpuutil=test_details["cpu"]["majfault"], - cpuutil_length=cpuutil_column_length, - ) - ) - # Sixth row (blank, blank, Samples, MinFault)) - ainformation.append( - "{bold}\ -{test_name: <{test_name_length}} \ -{overall_label: >{overall_label_length}} \ -{overall: <{overall_length}} \ -{bandwidth_label: >{bandwidth_label_length}} \ -{bandwidth: <{bandwidth_length}} \ -{iops: <{iops_length}} \ -{latency: <{latency_length}} \ -{cpuutil_label: >{cpuutil_label_length}} \ -{cpuutil: <{cpuutil_length}} \ -{end_bold}".format( - bold="", - end_bold="", - test_name="", - test_name_length=test_name_length, - overall_label="", - overall_label_length=overall_label_length, - overall="", - overall_length=overall_column_length, - bandwidth_label="Samples:", - bandwidth_label_length=bandwidth_label_length, - bandwidth=test_details["bandwidth"]["numsamples"], - bandwidth_length=bandwidth_column_length, - iops=test_details["iops"]["numsamples"], - iops_length=iops_column_length, - latency="", - latency_length=latency_column_length, - cpuutil_label="MinFault:", - cpuutil_label_length=cpuutil_label_length, - cpuutil=test_details["cpu"]["minfault"], - cpuutil_length=cpuutil_column_length, - ) - ) - - ainformation.append("") - - return "\n".join(ainformation) - - -def format_info_benchmark_json(config, benchmark_information): - if benchmark_information["benchmark_result"] == "Running": - return "Benchmark test is still running." - - benchmark_details = benchmark_information["benchmark_result"] - - # Format a nice output; do this line-by-line then concat the elements at the end - ainformation = [] - ainformation.append( - "{}Storage Benchmark details:{}".format(ansiprint.bold(), ansiprint.end()) - ) - - nice_test_name_map = { - "seq_read": "Sequential Read (4M blocks, queue depth 64)", - "seq_write": "Sequential Write (4M blocks, queue depth 64)", - "rand_read_4M": "Random Read (4M blocks, queue depth 64)", - "rand_write_4M": "Random Write (4M blocks queue depth 64)", - "rand_read_4K": "Random Read (4K blocks, queue depth 64)", - "rand_write_4K": "Random Write (4K blocks, queue depth 64)", - "rand_read_4K_lowdepth": "Random Read (4K blocks, queue depth 1)", - "rand_write_4K_lowdepth": "Random Write (4K blocks, queue depth 1)", - } - - for test in benchmark_details: - ainformation.append("") - - io_class = None - for _io_class in ["read", "write"]: - if benchmark_details[test]["jobs"][0][_io_class]["io_bytes"] > 0: - io_class = _io_class - if io_class is None: - continue - - job_details = benchmark_details[test]["jobs"][0] - - # Calculate the unified latency categories (in us) - latency_tree = list() - for field in job_details["latency_ns"]: - bucket = str(int(field) / 1000) - latency_tree.append((bucket, job_details["latency_ns"][field])) - for field in job_details["latency_us"]: - bucket = field - latency_tree.append((bucket, job_details["latency_us"][field])) - for field in job_details["latency_ms"]: - # That one annoying one - if field == ">=2000": - bucket = ">=2000000" - else: - bucket = str(int(field) * 1000) - latency_tree.append((bucket, job_details["latency_ms"][field])) - - # Find the minimum entry without a zero - useful_latency_tree = list() - for element in latency_tree: - if element[1] != 0: - useful_latency_tree.append(element) - - max_rows = 9 - if len(useful_latency_tree) > 9: - max_rows = len(useful_latency_tree) - elif len(useful_latency_tree) < 9: - while len(useful_latency_tree) < 9: - useful_latency_tree.append(("", "")) - - # Format the static data - overall_label = [ - "Overall BW/s:", - "Overall IOPS:", - "Total I/O:", - "Runtime (s):", - "User CPU %:", - "System CPU %:", - "Ctx Switches:", - "Major Faults:", - "Minor Faults:", - ] - while len(overall_label) < max_rows: - overall_label.append("") - - overall_data = [ - format_bytes_tohuman(int(job_details[io_class]["bw_bytes"])), - format_ops_tohuman(int(job_details[io_class]["iops"])), - format_bytes_tohuman(int(job_details[io_class]["io_bytes"])), - job_details["job_runtime"] / 1000, - job_details["usr_cpu"], - job_details["sys_cpu"], - job_details["ctx"], - job_details["majf"], - job_details["minf"], - ] - while len(overall_data) < max_rows: - overall_data.append("") - - bandwidth_label = [ - "Min:", - "Max:", - "Mean:", - "StdDev:", - "Samples:", - "", - "", - "", - "", - ] - while len(bandwidth_label) < max_rows: - bandwidth_label.append("") - - bandwidth_data = [ - format_bytes_tohuman(int(job_details[io_class]["bw_min"]) * 1024), - format_bytes_tohuman(int(job_details[io_class]["bw_max"]) * 1024), - format_bytes_tohuman(int(job_details[io_class]["bw_mean"]) * 1024), - format_bytes_tohuman(int(job_details[io_class]["bw_dev"]) * 1024), - job_details[io_class]["bw_samples"], - "", - "", - "", - "", - ] - while len(bandwidth_data) < max_rows: - bandwidth_data.append("") - - iops_data = [ - format_ops_tohuman(int(job_details[io_class]["iops_min"])), - format_ops_tohuman(int(job_details[io_class]["iops_max"])), - format_ops_tohuman(int(job_details[io_class]["iops_mean"])), - format_ops_tohuman(int(job_details[io_class]["iops_stddev"])), - job_details[io_class]["iops_samples"], - "", - "", - "", - "", - ] - while len(iops_data) < max_rows: - iops_data.append("") - - lat_data = [ - int(job_details[io_class]["lat_ns"]["min"]) / 1000, - int(job_details[io_class]["lat_ns"]["max"]) / 1000, - int(job_details[io_class]["lat_ns"]["mean"]) / 1000, - int(job_details[io_class]["lat_ns"]["stddev"]) / 1000, - "", - "", - "", - "", - "", - ] - while len(lat_data) < max_rows: - lat_data.append("") - - # Format the dynamic buckets - lat_bucket_label = list() - lat_bucket_data = list() - for element in useful_latency_tree: - lat_bucket_label.append(element[0]) - lat_bucket_data.append(element[1]) - - # Column default widths - overall_label_length = 0 - overall_column_length = 0 - bandwidth_label_length = 0 - bandwidth_column_length = 11 - iops_column_length = 4 - latency_column_length = 12 - latency_bucket_label_length = 0 - - # Column layout: - # General Bandwidth IOPS Latency Percentiles - # --------- ---------- -------- -------- --------------- - # Size Min Min Min A - # BW Max Max Max B - # IOPS Mean Mean Mean ... - # Runtime StdDev StdDev StdDev Z - # UsrCPU Samples Samples - # SysCPU - # CtxSw - # MajFault - # MinFault - - # Set column widths - for item in overall_label: - _item_length = len(str(item)) - if _item_length > overall_label_length: - overall_label_length = _item_length - - for item in overall_data: - _item_length = len(str(item)) - if _item_length > overall_column_length: - overall_column_length = _item_length - - test_name_length = len(nice_test_name_map[test]) - if test_name_length > overall_label_length + overall_column_length: - _diff = test_name_length - (overall_label_length + overall_column_length) - overall_column_length += _diff - - for item in bandwidth_label: - _item_length = len(str(item)) - if _item_length > bandwidth_label_length: - bandwidth_label_length = _item_length - - for item in bandwidth_data: - _item_length = len(str(item)) - if _item_length > bandwidth_column_length: - bandwidth_column_length = _item_length - - for item in iops_data: - _item_length = len(str(item)) - if _item_length > iops_column_length: - iops_column_length = _item_length - - for item in lat_data: - _item_length = len(str(item)) - if _item_length > latency_column_length: - latency_column_length = _item_length - - for item in lat_bucket_label: - _item_length = len(str(item)) - if _item_length > latency_bucket_label_length: - latency_bucket_label_length = _item_length - - # Top row (Headers) - ainformation.append( - "{bold}\ -{overall_label: <{overall_label_length}} \ -{bandwidth_label: <{bandwidth_label_length}} \ -{bandwidth: <{bandwidth_length}} \ -{iops: <{iops_length}} \ -{latency: <{latency_length}} \ -{latency_bucket_label: <{latency_bucket_label_length}} \ -{latency_bucket} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - overall_label=nice_test_name_map[test], - overall_label_length=overall_label_length, - bandwidth_label="", - bandwidth_label_length=bandwidth_label_length, - bandwidth="Bandwidth/s", - bandwidth_length=bandwidth_column_length, - iops="IOPS", - iops_length=iops_column_length, - latency="Latency (μs)", - latency_length=latency_column_length, - latency_bucket_label="Latency Buckets (μs/%)", - latency_bucket_label_length=latency_bucket_label_length, - latency_bucket="", - ) - ) - - for idx in range(0, max_rows): - # Top row (Headers) - ainformation.append( - "{bold}\ -{overall_label: >{overall_label_length}} \ -{overall: <{overall_length}} \ -{bandwidth_label: >{bandwidth_label_length}} \ -{bandwidth: <{bandwidth_length}} \ -{iops: <{iops_length}} \ -{latency: <{latency_length}} \ -{latency_bucket_label: >{latency_bucket_label_length}} \ -{latency_bucket} \ -{end_bold}".format( - bold="", - end_bold="", - overall_label=overall_label[idx], - overall_label_length=overall_label_length, - overall=overall_data[idx], - overall_length=overall_column_length, - bandwidth_label=bandwidth_label[idx], - bandwidth_label_length=bandwidth_label_length, - bandwidth=bandwidth_data[idx], - bandwidth_length=bandwidth_column_length, - iops=iops_data[idx], - iops_length=iops_column_length, - latency=lat_data[idx], - latency_length=latency_column_length, - latency_bucket_label=lat_bucket_label[idx], - latency_bucket_label_length=latency_bucket_label_length, - latency_bucket=lat_bucket_data[idx], - ) - ) - - return "\n".join(ainformation) diff --git a/client-cli-old/pvc/lib/cluster.py b/client-cli-old/pvc/lib/cluster.py deleted file mode 100644 index c93fdb2d..00000000 --- a/client-cli-old/pvc/lib/cluster.py +++ /dev/null @@ -1,313 +0,0 @@ -#!/usr/bin/env python3 - -# cluster.py - PVC CLI client function library, cluster management -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -import json - -import pvc.lib.ansiprint as ansiprint -from pvc.lib.common import call_api - - -def initialize(config, overwrite=False): - """ - Initialize the PVC cluster - - API endpoint: GET /api/v1/initialize - API arguments: overwrite, yes-i-really-mean-it - API schema: {json_data_object} - """ - params = {"yes-i-really-mean-it": "yes", "overwrite": overwrite} - response = call_api(config, "post", "/initialize", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def backup(config): - """ - Get a JSON backup of the cluster - - API endpoint: GET /api/v1/backup - API arguments: - API schema: {json_data_object} - """ - response = call_api(config, "get", "/backup") - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def restore(config, cluster_data): - """ - Restore a JSON backup to the cluster - - API endpoint: POST /api/v1/restore - API arguments: yes-i-really-mean-it - API schema: {json_data_object} - """ - cluster_data_json = json.dumps(cluster_data) - - params = {"yes-i-really-mean-it": "yes"} - data = {"cluster_data": cluster_data_json} - response = call_api(config, "post", "/restore", params=params, data=data) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def maintenance_mode(config, state): - """ - Enable or disable PVC cluster maintenance mode - - API endpoint: POST /api/v1/status - API arguments: {state}={state} - API schema: {json_data_object} - """ - params = {"state": state} - response = call_api(config, "post", "/status", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def get_info(config): - """ - Get status of the PVC cluster - - API endpoint: GET /api/v1/status - API arguments: - API schema: {json_data_object} - """ - response = call_api(config, "get", "/status") - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def format_info(cluster_information, oformat): - if oformat == "json": - return json.dumps(cluster_information) - - if oformat == "json-pretty": - return json.dumps(cluster_information, indent=4) - - # Plain formatting, i.e. human-readable - if ( - cluster_information.get("maintenance") == "true" - or cluster_information.get("cluster_health", {}).get("health", "N/A") == "N/A" - ): - health_colour = ansiprint.blue() - elif cluster_information.get("cluster_health", {}).get("health", 100) > 90: - health_colour = ansiprint.green() - elif cluster_information.get("cluster_health", {}).get("health", 100) > 50: - health_colour = ansiprint.yellow() - else: - health_colour = ansiprint.red() - - ainformation = [] - - ainformation.append( - "{}PVC cluster status:{}".format(ansiprint.bold(), ansiprint.end()) - ) - ainformation.append("") - - health_text = ( - f"{cluster_information.get('cluster_health', {}).get('health', 'N/A')}" - ) - if health_text != "N/A": - health_text += "%" - if cluster_information.get("maintenance") == "true": - health_text += " (maintenance on)" - - ainformation.append( - "{}Cluster health:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - health_colour, - health_text, - ansiprint.end(), - ) - ) - if cluster_information.get("cluster_health", {}).get("messages"): - health_messages = "\n > ".join( - sorted(cluster_information["cluster_health"]["messages"]) - ) - ainformation.append( - "{}Health messages:{} > {}".format( - ansiprint.purple(), - ansiprint.end(), - health_messages, - ) - ) - else: - ainformation.append( - "{}Health messages:{} N/A".format( - ansiprint.purple(), - ansiprint.end(), - ) - ) - - if oformat == "short": - return "\n".join(ainformation) - - ainformation.append("") - ainformation.append( - "{}Primary node:{} {}".format( - ansiprint.purple(), ansiprint.end(), cluster_information["primary_node"] - ) - ) - ainformation.append( - "{}PVC version:{} {}".format( - ansiprint.purple(), - ansiprint.end(), - cluster_information.get("pvc_version", "N/A"), - ) - ) - ainformation.append( - "{}Cluster upstream IP:{} {}".format( - ansiprint.purple(), ansiprint.end(), cluster_information["upstream_ip"] - ) - ) - ainformation.append("") - ainformation.append( - "{}Total nodes:{} {}".format( - ansiprint.purple(), ansiprint.end(), cluster_information["nodes"]["total"] - ) - ) - ainformation.append( - "{}Total VMs:{} {}".format( - ansiprint.purple(), ansiprint.end(), cluster_information["vms"]["total"] - ) - ) - ainformation.append( - "{}Total networks:{} {}".format( - ansiprint.purple(), ansiprint.end(), cluster_information["networks"] - ) - ) - ainformation.append( - "{}Total OSDs:{} {}".format( - ansiprint.purple(), ansiprint.end(), cluster_information["osds"]["total"] - ) - ) - ainformation.append( - "{}Total pools:{} {}".format( - ansiprint.purple(), ansiprint.end(), cluster_information["pools"] - ) - ) - ainformation.append( - "{}Total volumes:{} {}".format( - ansiprint.purple(), ansiprint.end(), cluster_information["volumes"] - ) - ) - ainformation.append( - "{}Total snapshots:{} {}".format( - ansiprint.purple(), ansiprint.end(), cluster_information["snapshots"] - ) - ) - - nodes_string = "{}Nodes:{} {}/{} {}ready,run{}".format( - ansiprint.purple(), - ansiprint.end(), - cluster_information["nodes"].get("run,ready", 0), - cluster_information["nodes"].get("total", 0), - ansiprint.green(), - ansiprint.end(), - ) - for state, count in cluster_information["nodes"].items(): - if state == "total" or state == "run,ready": - continue - - nodes_string += " {}/{} {}{}{}".format( - count, - cluster_information["nodes"]["total"], - ansiprint.yellow(), - state, - ansiprint.end(), - ) - - ainformation.append("") - ainformation.append(nodes_string) - - vms_string = "{}VMs:{} {}/{} {}start{}".format( - ansiprint.purple(), - ansiprint.end(), - cluster_information["vms"].get("start", 0), - cluster_information["vms"].get("total", 0), - ansiprint.green(), - ansiprint.end(), - ) - for state, count in cluster_information["vms"].items(): - if state == "total" or state == "start": - continue - - if state in ["disable", "migrate", "unmigrate", "provision"]: - colour = ansiprint.blue() - else: - colour = ansiprint.yellow() - - vms_string += " {}/{} {}{}{}".format( - count, cluster_information["vms"]["total"], colour, state, ansiprint.end() - ) - - ainformation.append("") - ainformation.append(vms_string) - - if cluster_information["osds"]["total"] > 0: - osds_string = "{}Ceph OSDs:{} {}/{} {}up,in{}".format( - ansiprint.purple(), - ansiprint.end(), - cluster_information["osds"].get("up,in", 0), - cluster_information["osds"].get("total", 0), - ansiprint.green(), - ansiprint.end(), - ) - for state, count in cluster_information["osds"].items(): - if state == "total" or state == "up,in": - continue - - osds_string += " {}/{} {}{}{}".format( - count, - cluster_information["osds"]["total"], - ansiprint.yellow(), - state, - ansiprint.end(), - ) - - ainformation.append("") - ainformation.append(osds_string) - - ainformation.append("") - return "\n".join(ainformation) diff --git a/client-cli-old/pvc/lib/common.py b/client-cli-old/pvc/lib/common.py deleted file mode 100644 index 8071884c..00000000 --- a/client-cli-old/pvc/lib/common.py +++ /dev/null @@ -1,201 +0,0 @@ -#!/usr/bin/env python3 - -# common.py - PVC CLI client function library, Common functions -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -import os -import math -import time -import requests -import click -from urllib3 import disable_warnings - - -def format_bytes(size_bytes): - byte_unit_matrix = { - "B": 1, - "K": 1024, - "M": 1024 * 1024, - "G": 1024 * 1024 * 1024, - "T": 1024 * 1024 * 1024 * 1024, - "P": 1024 * 1024 * 1024 * 1024 * 1024, - } - human_bytes = "0B" - for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get): - formatted_bytes = int(math.ceil(size_bytes / byte_unit_matrix[unit])) - if formatted_bytes < 10000: - human_bytes = "{}{}".format(formatted_bytes, unit) - break - return human_bytes - - -def format_metric(integer): - integer_unit_matrix = { - "": 1, - "K": 1000, - "M": 1000 * 1000, - "B": 1000 * 1000 * 1000, - "T": 1000 * 1000 * 1000 * 1000, - "Q": 1000 * 1000 * 1000 * 1000 * 1000, - } - human_integer = "0" - for unit in sorted(integer_unit_matrix, key=integer_unit_matrix.get): - formatted_integer = int(math.ceil(integer / integer_unit_matrix[unit])) - if formatted_integer < 10000: - human_integer = "{}{}".format(formatted_integer, unit) - break - return human_integer - - -class UploadProgressBar(object): - def __init__(self, filename, end_message="", end_nl=True): - file_size = os.path.getsize(filename) - file_size_human = format_bytes(file_size) - click.echo("Uploading file (total size {})...".format(file_size_human)) - - self.length = file_size - self.time_last = int(round(time.time() * 1000)) - 1000 - self.bytes_last = 0 - self.bytes_diff = 0 - self.is_end = False - - self.end_message = end_message - self.end_nl = end_nl - if not self.end_nl: - self.end_suffix = " " - else: - self.end_suffix = "" - - self.bar = click.progressbar(length=self.length, show_eta=True) - - def update(self, monitor): - bytes_cur = monitor.bytes_read - self.bytes_diff += bytes_cur - self.bytes_last - if self.bytes_last == bytes_cur: - self.is_end = True - self.bytes_last = bytes_cur - - time_cur = int(round(time.time() * 1000)) - if (time_cur - 1000) > self.time_last: - self.time_last = time_cur - self.bar.update(self.bytes_diff) - self.bytes_diff = 0 - - if self.is_end: - self.bar.update(self.bytes_diff) - self.bytes_diff = 0 - click.echo() - click.echo() - if self.end_message: - click.echo(self.end_message + self.end_suffix, nl=self.end_nl) - - -class ErrorResponse(requests.Response): - def __init__(self, json_data, status_code): - self.json_data = json_data - self.status_code = status_code - - def json(self): - return self.json_data - - -def call_api( - config, - operation, - request_uri, - headers={}, - params=None, - data=None, - files=None, -): - # Set the connect timeout to 2 seconds but extremely long (48 hour) data timeout - timeout = (2.05, 172800) - - # Craft the URI - uri = "{}://{}{}{}".format( - config["api_scheme"], config["api_host"], config["api_prefix"], request_uri - ) - - # Craft the authentication header if required - if config["api_key"]: - headers["X-Api-Key"] = config["api_key"] - - # Determine the request type and hit the API - disable_warnings() - try: - if operation == "get": - response = requests.get( - uri, - timeout=timeout, - headers=headers, - params=params, - data=data, - verify=config["verify_ssl"], - ) - if operation == "post": - response = requests.post( - uri, - timeout=timeout, - headers=headers, - params=params, - data=data, - files=files, - verify=config["verify_ssl"], - ) - if operation == "put": - response = requests.put( - uri, - timeout=timeout, - headers=headers, - params=params, - data=data, - files=files, - verify=config["verify_ssl"], - ) - if operation == "patch": - response = requests.patch( - uri, - timeout=timeout, - headers=headers, - params=params, - data=data, - verify=config["verify_ssl"], - ) - if operation == "delete": - response = requests.delete( - uri, - timeout=timeout, - headers=headers, - params=params, - data=data, - verify=config["verify_ssl"], - ) - except Exception as e: - message = "Failed to connect to the API: {}".format(e) - response = ErrorResponse({"message": message}, 500) - - # Display debug output - if config["debug"]: - click.echo("API endpoint: {}".format(uri), err=True) - click.echo("Response code: {}".format(response.status_code), err=True) - click.echo("Response headers: {}".format(response.headers), err=True) - click.echo(err=True) - - # Return the response object - return response diff --git a/client-cli-old/pvc/lib/network.py b/client-cli-old/pvc/lib/network.py deleted file mode 100644 index 8b07960f..00000000 --- a/client-cli-old/pvc/lib/network.py +++ /dev/null @@ -1,1487 +0,0 @@ -#!/usr/bin/env python3 - -# network.py - PVC CLI client function library, Network functions -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -import re -import pvc.lib.ansiprint as ansiprint -from pvc.lib.common import call_api - - -def isValidMAC(macaddr): - allowed = re.compile( - r""" - ( - ^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$ - ) - """, - re.VERBOSE | re.IGNORECASE, - ) - - if allowed.match(macaddr): - return True - else: - return False - - -def isValidIP(ipaddr): - ip4_blocks = str(ipaddr).split(".") - if len(ip4_blocks) == 4: - for block in ip4_blocks: - # Check if number is digit, if not checked before calling this function - if not block.isdigit(): - return False - tmp = int(block) - if 0 > tmp > 255: - return False - return True - return False - - -# -# Primary functions -# -def net_info(config, net): - """ - Get information about network - - API endpoint: GET /api/v1/network/{net} - API arguments: - API schema: {json_data_object} - """ - response = call_api(config, "get", "/network/{net}".format(net=net)) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "Network not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def net_list(config, limit): - """ - Get list information about networks (limited by {limit}) - - API endpoint: GET /api/v1/network - API arguments: limit={limit} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - - response = call_api(config, "get", "/network", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def net_add( - config, - vni, - description, - nettype, - mtu, - domain, - name_servers, - ip4_network, - ip4_gateway, - ip6_network, - ip6_gateway, - dhcp4_flag, - dhcp4_start, - dhcp4_end, -): - """ - Add new network - - API endpoint: POST /api/v1/network - API arguments: lots - API schema: {"message":"{data}"} - """ - params = { - "vni": vni, - "description": description, - "nettype": nettype, - "mtu": mtu, - "domain": domain, - "name_servers": name_servers, - "ip4_network": ip4_network, - "ip4_gateway": ip4_gateway, - "ip6_network": ip6_network, - "ip6_gateway": ip6_gateway, - "dhcp4": dhcp4_flag, - "dhcp4_start": dhcp4_start, - "dhcp4_end": dhcp4_end, - } - response = call_api(config, "post", "/network", params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def net_modify( - config, - net, - description, - mtu, - domain, - name_servers, - ip4_network, - ip4_gateway, - ip6_network, - ip6_gateway, - dhcp4_flag, - dhcp4_start, - dhcp4_end, -): - """ - Modify a network - - API endpoint: POST /api/v1/network/{net} - API arguments: lots - API schema: {"message":"{data}"} - """ - params = dict() - if description is not None: - params["description"] = description - if mtu is not None: - params["mtu"] = mtu - if domain is not None: - params["domain"] = domain - if name_servers is not None: - params["name_servers"] = name_servers - if ip4_network is not None: - params["ip4_network"] = ip4_network - if ip4_gateway is not None: - params["ip4_gateway"] = ip4_gateway - if ip6_network is not None: - params["ip6_network"] = ip6_network - if ip6_gateway is not None: - params["ip6_gateway"] = ip6_gateway - if dhcp4_flag is not None: - params["dhcp4"] = dhcp4_flag - if dhcp4_start is not None: - params["dhcp4_start"] = dhcp4_start - if dhcp4_end is not None: - params["dhcp4_end"] = dhcp4_end - - response = call_api(config, "put", "/network/{net}".format(net=net), params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def net_remove(config, net): - """ - Remove a network - - API endpoint: DELETE /api/v1/network/{net} - API arguments: - API schema: {"message":"{data}"} - """ - response = call_api(config, "delete", "/network/{net}".format(net=net)) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -# -# DHCP lease functions -# -def net_dhcp_info(config, net, mac): - """A - Get information about network DHCP lease - - API endpoint: GET /api/v1/network/{net}/lease/{mac} - API arguments: - API schema: {json_data_object} - """ - response = call_api( - config, "get", "/network/{net}/lease/{mac}".format(net=net, mac=mac) - ) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "Lease not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def net_dhcp_list(config, net, limit, only_static=False): - """ - Get list information about leases (limited by {limit}) - - API endpoint: GET /api/v1/network/{net}/lease - API arguments: limit={limit}, static={only_static} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - - if only_static: - params["static"] = True - else: - params["static"] = False - - response = call_api( - config, "get", "/network/{net}/lease".format(net=net), params=params - ) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def net_dhcp_add(config, net, ipaddr, macaddr, hostname): - """ - Add new network DHCP lease - - API endpoint: POST /api/v1/network/{net}/lease - API arguments: macaddress=macaddr, ipaddress=ipaddr, hostname=hostname - API schema: {"message":"{data}"} - """ - params = {"macaddress": macaddr, "ipaddress": ipaddr, "hostname": hostname} - response = call_api( - config, "post", "/network/{net}/lease".format(net=net), params=params - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def net_dhcp_remove(config, net, mac): - """ - Remove a network DHCP lease - - API endpoint: DELETE /api/v1/network/{vni}/lease/{mac} - API arguments: - API schema: {"message":"{data}"} - """ - response = call_api( - config, "delete", "/network/{net}/lease/{mac}".format(net=net, mac=mac) - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -# -# ACL functions -# -def net_acl_info(config, net, description): - """ - Get information about network ACL - - API endpoint: GET /api/v1/network/{net}/acl/{description} - API arguments: - API schema: {json_data_object} - """ - response = call_api( - config, - "get", - "/network/{net}/acl/{description}".format(net=net, description=description), - ) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "ACL not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def net_acl_list(config, net, limit, direction): - """ - Get list information about ACLs (limited by {limit}) - - API endpoint: GET /api/v1/network/{net}/acl - API arguments: limit={limit}, direction={direction} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - if direction is not None: - params["direction"] = direction - - response = call_api( - config, "get", "/network/{net}/acl".format(net=net), params=params - ) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def net_acl_add(config, net, direction, description, rule, order): - """ - Add new network acl - - API endpoint: POST /api/v1/network/{net}/acl - API arguments: description=description, direction=direction, order=order, rule=rule - API schema: {"message":"{data}"} - """ - params = dict() - params["description"] = description - params["direction"] = direction - params["rule"] = rule - if order is not None: - params["order"] = order - - response = call_api( - config, "post", "/network/{net}/acl".format(net=net), params=params - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def net_acl_remove(config, net, description): - """ - Remove a network ACL - - API endpoint: DELETE /api/v1/network/{vni}/acl/{description} - API arguments: - API schema: {"message":"{data}"} - """ - response = call_api( - config, - "delete", - "/network/{net}/acl/{description}".format(net=net, description=description), - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -# -# SR-IOV functions -# -def net_sriov_pf_list(config, node): - """ - List all PFs on NODE - - API endpoint: GET /api/v1/sriov/pf/ - API arguments: node={node} - API schema: [{json_data_object},{json_data_object},etc.] - """ - response = call_api(config, "get", "/sriov/pf/{}".format(node)) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def net_sriov_vf_set( - config, - node, - vf, - vlan_id, - vlan_qos, - tx_rate_min, - tx_rate_max, - link_state, - spoof_check, - trust, - query_rss, -): - """ - Mdoify configuration of a SR-IOV VF - - API endpoint: PUT /api/v1/sriov/vf// - API arguments: vlan_id={vlan_id}, vlan_qos={vlan_qos}, tx_rate_min={tx_rate_min}, tx_rate_max={tx_rate_max}, - link_state={link_state}, spoof_check={spoof_check}, trust={trust}, query_rss={query_rss} - API schema: {"message": "{data}"} - """ - params = dict() - - # Update any params that we've sent - if vlan_id is not None: - params["vlan_id"] = vlan_id - - if vlan_qos is not None: - params["vlan_qos"] = vlan_qos - - if tx_rate_min is not None: - params["tx_rate_min"] = tx_rate_min - - if tx_rate_max is not None: - params["tx_rate_max"] = tx_rate_max - - if link_state is not None: - params["link_state"] = link_state - - if spoof_check is not None: - params["spoof_check"] = spoof_check - - if trust is not None: - params["trust"] = trust - - if query_rss is not None: - params["query_rss"] = query_rss - - # Write the new configuration to the API - response = call_api( - config, "put", "/sriov/vf/{node}/{vf}".format(node=node, vf=vf), params=params - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def net_sriov_vf_list(config, node, pf=None): - """ - List all VFs on NODE, optionally limited by PF - - API endpoint: GET /api/v1/sriov/vf/ - API arguments: node={node}, pf={pf} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - params["pf"] = pf - - response = call_api(config, "get", "/sriov/vf/{}".format(node), params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def net_sriov_vf_info(config, node, vf): - """ - Get info about VF on NODE - - API endpoint: GET /api/v1/sriov/vf// - API arguments: - API schema: [{json_data_object}] - """ - response = call_api(config, "get", "/sriov/vf/{}/{}".format(node, vf)) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "VF not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -# -# Output display functions -# -def getColour(value): - if value in ["False", "None"]: - return ansiprint.blue() - else: - return ansiprint.green() - - -def getOutputColours(network_information): - v6_flag_colour = getColour(network_information["ip6"]["network"]) - v4_flag_colour = getColour(network_information["ip4"]["network"]) - dhcp6_flag_colour = getColour(network_information["ip6"]["dhcp_flag"]) - dhcp4_flag_colour = getColour(network_information["ip4"]["dhcp_flag"]) - - return v6_flag_colour, v4_flag_colour, dhcp6_flag_colour, dhcp4_flag_colour - - -def format_info(config, network_information, long_output): - if not network_information: - return "No network found" - - ( - v6_flag_colour, - v4_flag_colour, - dhcp6_flag_colour, - dhcp4_flag_colour, - ) = getOutputColours(network_information) - - # Format a nice output: do this line-by-line then concat the elements at the end - ainformation = [] - ainformation.append( - "{}Virtual network information:{}".format(ansiprint.bold(), ansiprint.end()) - ) - ainformation.append("") - # Basic information - ainformation.append( - "{}VNI:{} {}".format( - ansiprint.purple(), ansiprint.end(), network_information["vni"] - ) - ) - ainformation.append( - "{}Type:{} {}".format( - ansiprint.purple(), ansiprint.end(), network_information["type"] - ) - ) - ainformation.append( - "{}MTU:{} {}".format( - ansiprint.purple(), ansiprint.end(), network_information["mtu"] - ) - ) - ainformation.append( - "{}Description:{} {}".format( - ansiprint.purple(), ansiprint.end(), network_information["description"] - ) - ) - if network_information["type"] == "managed": - ainformation.append( - "{}Domain:{} {}".format( - ansiprint.purple(), ansiprint.end(), network_information["domain"] - ) - ) - ainformation.append( - "{}DNS Servers:{} {}".format( - ansiprint.purple(), - ansiprint.end(), - ", ".join(network_information["name_servers"]), - ) - ) - if network_information["ip6"]["network"] != "None": - ainformation.append("") - ainformation.append( - "{}IPv6 network:{} {}".format( - ansiprint.purple(), - ansiprint.end(), - network_information["ip6"]["network"], - ) - ) - ainformation.append( - "{}IPv6 gateway:{} {}".format( - ansiprint.purple(), - ansiprint.end(), - network_information["ip6"]["gateway"], - ) - ) - ainformation.append( - "{}DHCPv6 enabled:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - dhcp6_flag_colour, - network_information["ip6"]["dhcp_flag"], - ansiprint.end(), - ) - ) - if network_information["ip4"]["network"] != "None": - ainformation.append("") - ainformation.append( - "{}IPv4 network:{} {}".format( - ansiprint.purple(), - ansiprint.end(), - network_information["ip4"]["network"], - ) - ) - ainformation.append( - "{}IPv4 gateway:{} {}".format( - ansiprint.purple(), - ansiprint.end(), - network_information["ip4"]["gateway"], - ) - ) - ainformation.append( - "{}DHCPv4 enabled:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - dhcp4_flag_colour, - network_information["ip4"]["dhcp_flag"], - ansiprint.end(), - ) - ) - if network_information["ip4"]["dhcp_flag"] == "True": - ainformation.append( - "{}DHCPv4 range:{} {} - {}".format( - ansiprint.purple(), - ansiprint.end(), - network_information["ip4"]["dhcp_start"], - network_information["ip4"]["dhcp_end"], - ) - ) - - if long_output: - retcode, dhcp4_reservations_list = net_dhcp_list( - config, network_information["vni"], None - ) - if dhcp4_reservations_list: - ainformation.append("") - ainformation.append( - "{}Client DHCPv4 reservations:{}".format( - ansiprint.bold(), ansiprint.end() - ) - ) - ainformation.append("") - if retcode: - dhcp4_reservations_string = format_list_dhcp( - dhcp4_reservations_list - ) - for line in dhcp4_reservations_string.split("\n"): - ainformation.append(line) - else: - ainformation.append("No leases found") - - retcode, firewall_rules_list = net_acl_list( - config, network_information["vni"], None, None - ) - if firewall_rules_list: - ainformation.append("") - ainformation.append( - "{}Network firewall rules:{}".format( - ansiprint.bold(), ansiprint.end() - ) - ) - ainformation.append("") - if retcode: - firewall_rules_string = format_list_acl(firewall_rules_list) - for line in firewall_rules_string.split("\n"): - ainformation.append(line) - else: - ainformation.append("No ACLs found") - - # Join it all together - return "\n".join(ainformation) - - -def format_list(config, network_list): - if not network_list: - return "No network found" - - network_list_output = [] - - # Determine optimal column widths - net_vni_length = 5 - net_description_length = 12 - net_nettype_length = 8 - net_mtu_length = 4 - net_domain_length = 6 - net_v6_flag_length = 6 - net_dhcp6_flag_length = 7 - net_v4_flag_length = 6 - net_dhcp4_flag_length = 7 - for network_information in network_list: - # vni column - _net_vni_length = len(str(network_information["vni"])) + 1 - if _net_vni_length > net_vni_length: - net_vni_length = _net_vni_length - # description column - _net_description_length = len(network_information["description"]) + 1 - if _net_description_length > net_description_length: - net_description_length = _net_description_length - # mtu column - _net_mtu_length = len(str(network_information["mtu"])) + 1 - if _net_mtu_length > net_mtu_length: - net_mtu_length = _net_mtu_length - # domain column - _net_domain_length = len(network_information["domain"]) + 1 - if _net_domain_length > net_domain_length: - net_domain_length = _net_domain_length - - # Format the string (header) - network_list_output.append( - "{bold}{networks_header: <{networks_header_length}} {config_header: <{config_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - networks_header_length=net_vni_length + net_description_length + 1, - config_header_length=net_nettype_length - + net_mtu_length - + net_domain_length - + net_v6_flag_length - + net_dhcp6_flag_length - + net_v4_flag_length - + net_dhcp4_flag_length - + 7, - networks_header="Networks " - + "".join(["-" for _ in range(9, net_vni_length + net_description_length)]), - config_header="Config " - + "".join( - [ - "-" - for _ in range( - 7, - net_nettype_length - + net_mtu_length - + net_domain_length - + net_v6_flag_length - + net_dhcp6_flag_length - + net_v4_flag_length - + net_dhcp4_flag_length - + 6, - ) - ] - ), - ) - ) - network_list_output.append( - "{bold}\ -{net_vni: <{net_vni_length}} \ -{net_description: <{net_description_length}} \ -{net_nettype: <{net_nettype_length}} \ -{net_mtu: <{net_mtu_length}} \ -{net_domain: <{net_domain_length}} \ -{net_v6_flag: <{net_v6_flag_length}} \ -{net_dhcp6_flag: <{net_dhcp6_flag_length}} \ -{net_v4_flag: <{net_v4_flag_length}} \ -{net_dhcp4_flag: <{net_dhcp4_flag_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - net_vni_length=net_vni_length, - net_description_length=net_description_length, - net_nettype_length=net_nettype_length, - net_mtu_length=net_mtu_length, - net_domain_length=net_domain_length, - net_v6_flag_length=net_v6_flag_length, - net_dhcp6_flag_length=net_dhcp6_flag_length, - net_v4_flag_length=net_v4_flag_length, - net_dhcp4_flag_length=net_dhcp4_flag_length, - net_vni="VNI", - net_description="Description", - net_nettype="Type", - net_mtu="MTU", - net_domain="Domain", - net_v6_flag="IPv6", - net_dhcp6_flag="DHCPv6", - net_v4_flag="IPv4", - net_dhcp4_flag="DHCPv4", - ) - ) - - for network_information in sorted(network_list, key=lambda n: int(n["vni"])): - ( - v6_flag_colour, - v4_flag_colour, - dhcp6_flag_colour, - dhcp4_flag_colour, - ) = getOutputColours(network_information) - if network_information["ip4"]["network"] != "None": - v4_flag = "True" - else: - v4_flag = "False" - - if network_information["ip6"]["network"] != "None": - v6_flag = "True" - else: - v6_flag = "False" - - network_list_output.append( - "{bold}\ -{net_vni: <{net_vni_length}} \ -{net_description: <{net_description_length}} \ -{net_nettype: <{net_nettype_length}} \ -{net_mtu: <{net_mtu_length}} \ -{net_domain: <{net_domain_length}} \ -{v6_flag_colour}{net_v6_flag: <{net_v6_flag_length}}{colour_off} \ -{dhcp6_flag_colour}{net_dhcp6_flag: <{net_dhcp6_flag_length}}{colour_off} \ -{v4_flag_colour}{net_v4_flag: <{net_v4_flag_length}}{colour_off} \ -{dhcp4_flag_colour}{net_dhcp4_flag: <{net_dhcp4_flag_length}}{colour_off} \ -{end_bold}".format( - bold="", - end_bold="", - net_vni_length=net_vni_length, - net_description_length=net_description_length, - net_nettype_length=net_nettype_length, - net_mtu_length=net_mtu_length, - net_domain_length=net_domain_length, - net_v6_flag_length=net_v6_flag_length, - net_dhcp6_flag_length=net_dhcp6_flag_length, - net_v4_flag_length=net_v4_flag_length, - net_dhcp4_flag_length=net_dhcp4_flag_length, - net_vni=network_information["vni"], - net_description=network_information["description"], - net_nettype=network_information["type"], - net_mtu=network_information["mtu"], - net_domain=network_information["domain"], - net_v6_flag=v6_flag, - v6_flag_colour=v6_flag_colour, - net_dhcp6_flag=network_information["ip6"]["dhcp_flag"], - dhcp6_flag_colour=dhcp6_flag_colour, - net_v4_flag=v4_flag, - v4_flag_colour=v4_flag_colour, - net_dhcp4_flag=network_information["ip4"]["dhcp_flag"], - dhcp4_flag_colour=dhcp4_flag_colour, - colour_off=ansiprint.end(), - ) - ) - - return "\n".join(network_list_output) - - -def format_list_dhcp(dhcp_lease_list): - dhcp_lease_list_output = [] - - # Determine optimal column widths - lease_hostname_length = 9 - lease_ip4_address_length = 11 - lease_mac_address_length = 13 - lease_timestamp_length = 10 - for dhcp_lease_information in dhcp_lease_list: - # hostname column - _lease_hostname_length = len(str(dhcp_lease_information["hostname"])) + 1 - if _lease_hostname_length > lease_hostname_length: - lease_hostname_length = _lease_hostname_length - # ip4_address column - _lease_ip4_address_length = len(str(dhcp_lease_information["ip4_address"])) + 1 - if _lease_ip4_address_length > lease_ip4_address_length: - lease_ip4_address_length = _lease_ip4_address_length - # mac_address column - _lease_mac_address_length = len(str(dhcp_lease_information["mac_address"])) + 1 - if _lease_mac_address_length > lease_mac_address_length: - lease_mac_address_length = _lease_mac_address_length - # timestamp column - _lease_timestamp_length = len(str(dhcp_lease_information["timestamp"])) + 1 - if _lease_timestamp_length > lease_timestamp_length: - lease_timestamp_length = _lease_timestamp_length - - # Format the string (header) - dhcp_lease_list_output.append( - "{bold}{lease_header: <{lease_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - lease_header_length=lease_hostname_length - + lease_ip4_address_length - + lease_mac_address_length - + lease_timestamp_length - + 3, - lease_header="Leases " - + "".join( - [ - "-" - for _ in range( - 7, - lease_hostname_length - + lease_ip4_address_length - + lease_mac_address_length - + lease_timestamp_length - + 2, - ) - ] - ), - ) - ) - - dhcp_lease_list_output.append( - "{bold}\ -{lease_hostname: <{lease_hostname_length}} \ -{lease_ip4_address: <{lease_ip4_address_length}} \ -{lease_mac_address: <{lease_mac_address_length}} \ -{lease_timestamp: <{lease_timestamp_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - lease_hostname_length=lease_hostname_length, - lease_ip4_address_length=lease_ip4_address_length, - lease_mac_address_length=lease_mac_address_length, - lease_timestamp_length=lease_timestamp_length, - lease_hostname="Hostname", - lease_ip4_address="IP Address", - lease_mac_address="MAC Address", - lease_timestamp="Timestamp", - ) - ) - - for dhcp_lease_information in sorted( - dhcp_lease_list, key=lambda lease: lease["hostname"] - ): - dhcp_lease_list_output.append( - "{bold}\ -{lease_hostname: <{lease_hostname_length}} \ -{lease_ip4_address: <{lease_ip4_address_length}} \ -{lease_mac_address: <{lease_mac_address_length}} \ -{lease_timestamp: <{lease_timestamp_length}} \ -{end_bold}".format( - bold="", - end_bold="", - lease_hostname_length=lease_hostname_length, - lease_ip4_address_length=lease_ip4_address_length, - lease_mac_address_length=lease_mac_address_length, - lease_timestamp_length=12, - lease_hostname=str(dhcp_lease_information["hostname"]), - lease_ip4_address=str(dhcp_lease_information["ip4_address"]), - lease_mac_address=str(dhcp_lease_information["mac_address"]), - lease_timestamp=str(dhcp_lease_information["timestamp"]), - ) - ) - - return "\n".join(dhcp_lease_list_output) - - -def format_list_acl(acl_list): - # Handle when we get an empty entry - if not acl_list: - acl_list = list() - - acl_list_output = [] - - # Determine optimal column widths - acl_direction_length = 10 - acl_order_length = 6 - acl_description_length = 12 - acl_rule_length = 5 - for acl_information in acl_list: - # order column - _acl_order_length = len(str(acl_information["order"])) + 1 - if _acl_order_length > acl_order_length: - acl_order_length = _acl_order_length - # description column - _acl_description_length = len(acl_information["description"]) + 1 - if _acl_description_length > acl_description_length: - acl_description_length = _acl_description_length - # rule column - _acl_rule_length = len(acl_information["rule"]) + 1 - if _acl_rule_length > acl_rule_length: - acl_rule_length = _acl_rule_length - - # Format the string (header) - acl_list_output.append( - "{bold}{acl_header: <{acl_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - acl_header_length=acl_direction_length - + acl_order_length - + acl_description_length - + acl_rule_length - + 3, - acl_header="ACLs " - + "".join( - [ - "-" - for _ in range( - 5, - acl_direction_length - + acl_order_length - + acl_description_length - + acl_rule_length - + 2, - ) - ] - ), - ) - ) - - acl_list_output.append( - "{bold}\ -{acl_direction: <{acl_direction_length}} \ -{acl_order: <{acl_order_length}} \ -{acl_description: <{acl_description_length}} \ -{acl_rule: <{acl_rule_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - acl_direction_length=acl_direction_length, - acl_order_length=acl_order_length, - acl_description_length=acl_description_length, - acl_rule_length=acl_rule_length, - acl_direction="Direction", - acl_order="Order", - acl_description="Description", - acl_rule="Rule", - ) - ) - - for acl_information in sorted( - acl_list, key=lambda acl: acl["direction"] + str(acl["order"]) - ): - acl_list_output.append( - "{bold}\ -{acl_direction: <{acl_direction_length}} \ -{acl_order: <{acl_order_length}} \ -{acl_description: <{acl_description_length}} \ -{acl_rule: <{acl_rule_length}} \ -{end_bold}".format( - bold="", - end_bold="", - acl_direction_length=acl_direction_length, - acl_order_length=acl_order_length, - acl_description_length=acl_description_length, - acl_rule_length=acl_rule_length, - acl_direction=acl_information["direction"], - acl_order=acl_information["order"], - acl_description=acl_information["description"], - acl_rule=acl_information["rule"], - ) - ) - - return "\n".join(acl_list_output) - - -def format_list_sriov_pf(pf_list): - # The maximum column width of the VFs column - max_vfs_length = 70 - - # Handle when we get an empty entry - if not pf_list: - pf_list = list() - - pf_list_output = [] - - # Determine optimal column widths - pf_phy_length = 6 - pf_mtu_length = 4 - pf_vfs_length = 4 - - for pf_information in pf_list: - # phy column - _pf_phy_length = len(str(pf_information["phy"])) + 1 - if _pf_phy_length > pf_phy_length: - pf_phy_length = _pf_phy_length - # mtu column - _pf_mtu_length = len(str(pf_information["mtu"])) + 1 - if _pf_mtu_length > pf_mtu_length: - pf_mtu_length = _pf_mtu_length - # vfs column - _pf_vfs_length = len(str(", ".join(pf_information["vfs"]))) + 1 - if _pf_vfs_length > pf_vfs_length: - pf_vfs_length = _pf_vfs_length - - # We handle columnizing very long lists later - if pf_vfs_length > max_vfs_length: - pf_vfs_length = max_vfs_length - - # Format the string (header) - pf_list_output.append( - "{bold}{pf_header: <{pf_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - pf_header_length=pf_phy_length + pf_mtu_length + pf_vfs_length + 2, - pf_header="PFs " - + "".join( - [ - "-" - for _ in range(4, pf_phy_length + pf_mtu_length + pf_vfs_length + 1) - ] - ), - ) - ) - - pf_list_output.append( - "{bold}\ -{pf_phy: <{pf_phy_length}} \ -{pf_mtu: <{pf_mtu_length}} \ -{pf_vfs: <{pf_vfs_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - pf_phy_length=pf_phy_length, - pf_mtu_length=pf_mtu_length, - pf_vfs_length=pf_vfs_length, - pf_phy="Device", - pf_mtu="MTU", - pf_vfs="VFs", - ) - ) - - for pf_information in sorted(pf_list, key=lambda p: p["phy"]): - # Figure out how to nicely columnize our list - nice_vfs_list = [list()] - vfs_lines = 0 - cur_vfs_length = 0 - for vfs in pf_information["vfs"]: - vfs_len = len(vfs) - cur_vfs_length += vfs_len + 2 # for the comma and space - if cur_vfs_length > max_vfs_length: - cur_vfs_length = 0 - vfs_lines += 1 - nice_vfs_list.append(list()) - nice_vfs_list[vfs_lines].append(vfs) - - # Append the lines - pf_list_output.append( - "{bold}\ -{pf_phy: <{pf_phy_length}} \ -{pf_mtu: <{pf_mtu_length}} \ -{pf_vfs: <{pf_vfs_length}} \ -{end_bold}".format( - bold="", - end_bold="", - pf_phy_length=pf_phy_length, - pf_mtu_length=pf_mtu_length, - pf_vfs_length=pf_vfs_length, - pf_phy=pf_information["phy"], - pf_mtu=pf_information["mtu"], - pf_vfs=", ".join(nice_vfs_list[0]), - ) - ) - - if len(nice_vfs_list) > 1: - for idx in range(1, len(nice_vfs_list)): - pf_list_output.append( - "{bold}\ -{pf_phy: <{pf_phy_length}} \ -{pf_mtu: <{pf_mtu_length}} \ -{pf_vfs: <{pf_vfs_length}} \ -{end_bold}".format( - bold="", - end_bold="", - pf_phy_length=pf_phy_length, - pf_mtu_length=pf_mtu_length, - pf_vfs_length=pf_vfs_length, - pf_phy="", - pf_mtu="", - pf_vfs=", ".join(nice_vfs_list[idx]), - ) - ) - - return "\n".join(pf_list_output) - - -def format_list_sriov_vf(vf_list): - # Handle when we get an empty entry - if not vf_list: - vf_list = list() - - vf_list_output = [] - - # Determine optimal column widths - vf_phy_length = 4 - vf_pf_length = 3 - vf_mtu_length = 4 - vf_mac_length = 11 - vf_used_length = 5 - vf_domain_length = 5 - - for vf_information in vf_list: - # phy column - _vf_phy_length = len(str(vf_information["phy"])) + 1 - if _vf_phy_length > vf_phy_length: - vf_phy_length = _vf_phy_length - # pf column - _vf_pf_length = len(str(vf_information["pf"])) + 1 - if _vf_pf_length > vf_pf_length: - vf_pf_length = _vf_pf_length - # mtu column - _vf_mtu_length = len(str(vf_information["mtu"])) + 1 - if _vf_mtu_length > vf_mtu_length: - vf_mtu_length = _vf_mtu_length - # mac column - _vf_mac_length = len(str(vf_information["mac"])) + 1 - if _vf_mac_length > vf_mac_length: - vf_mac_length = _vf_mac_length - # used column - _vf_used_length = len(str(vf_information["usage"]["used"])) + 1 - if _vf_used_length > vf_used_length: - vf_used_length = _vf_used_length - # domain column - _vf_domain_length = len(str(vf_information["usage"]["domain"])) + 1 - if _vf_domain_length > vf_domain_length: - vf_domain_length = _vf_domain_length - - # Format the string (header) - vf_list_output.append( - "{bold}{vf_header: <{vf_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - vf_header_length=vf_phy_length - + vf_pf_length - + vf_mtu_length - + vf_mac_length - + vf_used_length - + vf_domain_length - + 5, - vf_header="VFs " - + "".join( - [ - "-" - for _ in range( - 4, - vf_phy_length - + vf_pf_length - + vf_mtu_length - + vf_mac_length - + vf_used_length - + vf_domain_length - + 4, - ) - ] - ), - ) - ) - - vf_list_output.append( - "{bold}\ -{vf_phy: <{vf_phy_length}} \ -{vf_pf: <{vf_pf_length}} \ -{vf_mtu: <{vf_mtu_length}} \ -{vf_mac: <{vf_mac_length}} \ -{vf_used: <{vf_used_length}} \ -{vf_domain: <{vf_domain_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - vf_phy_length=vf_phy_length, - vf_pf_length=vf_pf_length, - vf_mtu_length=vf_mtu_length, - vf_mac_length=vf_mac_length, - vf_used_length=vf_used_length, - vf_domain_length=vf_domain_length, - vf_phy="Device", - vf_pf="PF", - vf_mtu="MTU", - vf_mac="MAC Address", - vf_used="Used", - vf_domain="Domain", - ) - ) - - for vf_information in sorted(vf_list, key=lambda v: v["phy"]): - vf_domain = vf_information["usage"]["domain"] - if not vf_domain: - vf_domain = "N/A" - - vf_list_output.append( - "{bold}\ -{vf_phy: <{vf_phy_length}} \ -{vf_pf: <{vf_pf_length}} \ -{vf_mtu: <{vf_mtu_length}} \ -{vf_mac: <{vf_mac_length}} \ -{vf_used: <{vf_used_length}} \ -{vf_domain: <{vf_domain_length}} \ -{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - vf_phy_length=vf_phy_length, - vf_pf_length=vf_pf_length, - vf_mtu_length=vf_mtu_length, - vf_mac_length=vf_mac_length, - vf_used_length=vf_used_length, - vf_domain_length=vf_domain_length, - vf_phy=vf_information["phy"], - vf_pf=vf_information["pf"], - vf_mtu=vf_information["mtu"], - vf_mac=vf_information["mac"], - vf_used=vf_information["usage"]["used"], - vf_domain=vf_domain, - ) - ) - - return "\n".join(vf_list_output) - - -def format_info_sriov_vf(config, vf_information, node): - if not vf_information: - return "No VF found" - - # Get information on the using VM if applicable - if vf_information["usage"]["used"] == "True" and vf_information["usage"]["domain"]: - vm_information = call_api( - config, "get", "/vm/{vm}".format(vm=vf_information["usage"]["domain"]) - ).json() - if isinstance(vm_information, list) and len(vm_information) > 0: - vm_information = vm_information[0] - else: - vm_information = None - - # Format a nice output: do this line-by-line then concat the elements at the end - ainformation = [] - ainformation.append( - "{}SR-IOV VF information:{}".format(ansiprint.bold(), ansiprint.end()) - ) - ainformation.append("") - # Basic information - ainformation.append( - "{}PHY:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["phy"] - ) - ) - ainformation.append( - "{}PF:{} {} @ {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["pf"], node - ) - ) - ainformation.append( - "{}MTU:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["mtu"] - ) - ) - ainformation.append( - "{}MAC Address:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["mac"] - ) - ) - ainformation.append("") - # Configuration information - ainformation.append( - "{}vLAN ID:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["config"]["vlan_id"] - ) - ) - ainformation.append( - "{}vLAN QOS priority:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["config"]["vlan_qos"] - ) - ) - ainformation.append( - "{}Minimum TX Rate:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["config"]["tx_rate_min"] - ) - ) - ainformation.append( - "{}Maximum TX Rate:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["config"]["tx_rate_max"] - ) - ) - ainformation.append( - "{}Link State:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["config"]["link_state"] - ) - ) - ainformation.append( - "{}Spoof Checking:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - getColour(vf_information["config"]["spoof_check"]), - vf_information["config"]["spoof_check"], - ansiprint.end(), - ) - ) - ainformation.append( - "{}VF User Trust:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - getColour(vf_information["config"]["trust"]), - vf_information["config"]["trust"], - ansiprint.end(), - ) - ) - ainformation.append( - "{}Query RSS Config:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - getColour(vf_information["config"]["query_rss"]), - vf_information["config"]["query_rss"], - ansiprint.end(), - ) - ) - ainformation.append("") - # PCIe bus information - ainformation.append( - "{}PCIe domain:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["pci"]["domain"] - ) - ) - ainformation.append( - "{}PCIe bus:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["pci"]["bus"] - ) - ) - ainformation.append( - "{}PCIe slot:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["pci"]["slot"] - ) - ) - ainformation.append( - "{}PCIe function:{} {}".format( - ansiprint.purple(), ansiprint.end(), vf_information["pci"]["function"] - ) - ) - ainformation.append("") - # Usage information - ainformation.append( - "{}VF Used:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - getColour(vf_information["usage"]["used"]), - vf_information["usage"]["used"], - ansiprint.end(), - ) - ) - if vf_information["usage"]["used"] == "True" and vm_information is not None: - ainformation.append( - "{}Using Domain:{} {} ({}) ({}{}{})".format( - ansiprint.purple(), - ansiprint.end(), - vf_information["usage"]["domain"], - vm_information["name"], - getColour(vm_information["state"]), - vm_information["state"], - ansiprint.end(), - ) - ) - else: - ainformation.append( - "{}Using Domain:{} N/A".format(ansiprint.purple(), ansiprint.end()) - ) - - # Join it all together - return "\n".join(ainformation) diff --git a/client-cli-old/pvc/lib/node.py b/client-cli-old/pvc/lib/node.py deleted file mode 100644 index 2390319a..00000000 --- a/client-cli-old/pvc/lib/node.py +++ /dev/null @@ -1,709 +0,0 @@ -#!/usr/bin/env python3 - -# node.py - PVC CLI client function library, node management -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -import time - -import pvc.lib.ansiprint as ansiprint -from pvc.lib.common import call_api - - -# -# Primary functions -# -def node_coordinator_state(config, node, action): - """ - Set node coordinator state state (primary/secondary) - - API endpoint: POST /api/v1/node/{node}/coordinator-state - API arguments: action={action} - API schema: {"message": "{data}"} - """ - params = {"state": action} - response = call_api( - config, - "post", - "/node/{node}/coordinator-state".format(node=node), - params=params, - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def node_domain_state(config, node, action, wait): - """ - Set node domain state state (flush/ready) - - API endpoint: POST /api/v1/node/{node}/domain-state - API arguments: action={action}, wait={wait} - API schema: {"message": "{data}"} - """ - params = {"state": action, "wait": str(wait).lower()} - response = call_api( - config, "post", "/node/{node}/domain-state".format(node=node), params=params - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def view_node_log(config, node, lines=100): - """ - Return node log lines from the API (and display them in a pager in the main CLI) - - API endpoint: GET /node/{node}/log - API arguments: lines={lines} - API schema: {"name":"{node}","data":"{node_log}"} - """ - params = {"lines": lines} - response = call_api( - config, "get", "/node/{node}/log".format(node=node), params=params - ) - - if response.status_code != 200: - return False, response.json().get("message", "") - - node_log = response.json()["data"] - - # Shrink the log buffer to length lines - shrunk_log = node_log.split("\n")[-lines:] - loglines = "\n".join(shrunk_log) - - return True, loglines - - -def follow_node_log(config, node, lines=10): - """ - Return and follow node log lines from the API - - API endpoint: GET /node/{node}/log - API arguments: lines={lines} - API schema: {"name":"{nodename}","data":"{node_log}"} - """ - # We always grab 200 to match the follow call, but only _show_ `lines` number - params = {"lines": 200} - response = call_api( - config, "get", "/node/{node}/log".format(node=node), params=params - ) - - if response.status_code != 200: - return False, response.json().get("message", "") - - # Shrink the log buffer to length lines - node_log = response.json()["data"] - shrunk_log = node_log.split("\n")[-int(lines) :] - loglines = "\n".join(shrunk_log) - - # Print the initial data and begin following - print(loglines, end="") - print("\n", end="") - - while True: - # Grab the next line set (200 is a reasonable number of lines per half-second; any more are skipped) - try: - params = {"lines": 200} - response = call_api( - config, "get", "/node/{node}/log".format(node=node), params=params - ) - new_node_log = response.json()["data"] - except Exception: - break - # Split the new and old log strings into constitutent lines - old_node_loglines = node_log.split("\n") - new_node_loglines = new_node_log.split("\n") - - # Set the node log to the new log value for the next iteration - node_log = new_node_log - - # Get the difference between the two sets of lines - old_node_loglines_set = set(old_node_loglines) - diff_node_loglines = [ - x for x in new_node_loglines if x not in old_node_loglines_set - ] - - # If there's a difference, print it out - if len(diff_node_loglines) > 0: - print("\n".join(diff_node_loglines), end="") - print("\n", end="") - - # Wait half a second - time.sleep(0.5) - - return True, "" - - -def node_info(config, node): - """ - Get information about node - - API endpoint: GET /api/v1/node/{node} - API arguments: - API schema: {json_data_object} - """ - response = call_api(config, "get", "/node/{node}".format(node=node)) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match, return not found - return False, "Node not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def node_list( - config, limit, target_daemon_state, target_coordinator_state, target_domain_state -): - """ - Get list information about nodes (limited by {limit}) - - API endpoint: GET /api/v1/node - API arguments: limit={limit} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - if target_daemon_state: - params["daemon_state"] = target_daemon_state - if target_coordinator_state: - params["coordinator_state"] = target_coordinator_state - if target_domain_state: - params["domain_state"] = target_domain_state - - response = call_api(config, "get", "/node", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -# -# Output display functions -# -def getOutputColours(node_information): - node_health = node_information.get("health", "N/A") - if isinstance(node_health, int): - if node_health <= 50: - health_colour = ansiprint.red() - elif node_health <= 90: - health_colour = ansiprint.yellow() - elif node_health <= 100: - health_colour = ansiprint.green() - else: - health_colour = ansiprint.blue() - else: - health_colour = ansiprint.blue() - - if node_information["daemon_state"] == "run": - daemon_state_colour = ansiprint.green() - elif node_information["daemon_state"] == "stop": - daemon_state_colour = ansiprint.red() - elif node_information["daemon_state"] == "shutdown": - daemon_state_colour = ansiprint.yellow() - elif node_information["daemon_state"] == "init": - daemon_state_colour = ansiprint.yellow() - elif node_information["daemon_state"] == "dead": - daemon_state_colour = ansiprint.red() + ansiprint.bold() - else: - daemon_state_colour = ansiprint.blue() - - if node_information["coordinator_state"] == "primary": - coordinator_state_colour = ansiprint.green() - elif node_information["coordinator_state"] == "secondary": - coordinator_state_colour = ansiprint.blue() - else: - coordinator_state_colour = ansiprint.cyan() - - if node_information["domain_state"] == "ready": - domain_state_colour = ansiprint.green() - else: - domain_state_colour = ansiprint.blue() - - if node_information["memory"]["allocated"] > node_information["memory"]["total"]: - mem_allocated_colour = ansiprint.yellow() - else: - mem_allocated_colour = "" - - if node_information["memory"]["provisioned"] > node_information["memory"]["total"]: - mem_provisioned_colour = ansiprint.yellow() - else: - mem_provisioned_colour = "" - - return ( - health_colour, - daemon_state_colour, - coordinator_state_colour, - domain_state_colour, - mem_allocated_colour, - mem_provisioned_colour, - ) - - -def format_info(node_information, long_output): - ( - health_colour, - daemon_state_colour, - coordinator_state_colour, - domain_state_colour, - mem_allocated_colour, - mem_provisioned_colour, - ) = getOutputColours(node_information) - - # Format a nice output; do this line-by-line then concat the elements at the end - ainformation = [] - # Basic information - ainformation.append( - "{}Name:{} {}".format( - ansiprint.purple(), - ansiprint.end(), - node_information["name"], - ) - ) - ainformation.append( - "{}PVC Version:{} {}".format( - ansiprint.purple(), - ansiprint.end(), - node_information["pvc_version"], - ) - ) - - node_health = node_information.get("health", "N/A") - if isinstance(node_health, int): - node_health_text = f"{node_health}%" - else: - node_health_text = node_health - ainformation.append( - "{}Health:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - health_colour, - node_health_text, - ansiprint.end(), - ) - ) - - node_health_details = node_information.get("health_details", []) - if long_output: - node_health_messages = "\n ".join( - [f"{plugin['name']}: {plugin['message']}" for plugin in node_health_details] - ) - else: - node_health_messages = "\n ".join( - [ - f"{plugin['name']}: {plugin['message']}" - for plugin in node_health_details - if int(plugin.get("health_delta", 0)) > 0 - ] - ) - - if len(node_health_messages) > 0: - ainformation.append( - "{}Health Plugin Details:{} {}".format( - ansiprint.purple(), ansiprint.end(), node_health_messages - ) - ) - ainformation.append("") - - ainformation.append( - "{}Daemon State:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - daemon_state_colour, - node_information["daemon_state"], - ansiprint.end(), - ) - ) - ainformation.append( - "{}Coordinator State:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - coordinator_state_colour, - node_information["coordinator_state"], - ansiprint.end(), - ) - ) - ainformation.append( - "{}Domain State:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - domain_state_colour, - node_information["domain_state"], - ansiprint.end(), - ) - ) - if long_output: - ainformation.append("") - ainformation.append( - "{}Architecture:{} {}".format( - ansiprint.purple(), ansiprint.end(), node_information["arch"] - ) - ) - ainformation.append( - "{}Operating System:{} {}".format( - ansiprint.purple(), ansiprint.end(), node_information["os"] - ) - ) - ainformation.append( - "{}Kernel Version:{} {}".format( - ansiprint.purple(), ansiprint.end(), node_information["kernel"] - ) - ) - ainformation.append("") - ainformation.append( - "{}Active VM Count:{} {}".format( - ansiprint.purple(), ansiprint.end(), node_information["domains_count"] - ) - ) - ainformation.append( - "{}Host CPUs:{} {}".format( - ansiprint.purple(), ansiprint.end(), node_information["vcpu"]["total"] - ) - ) - ainformation.append( - "{}vCPUs:{} {}".format( - ansiprint.purple(), ansiprint.end(), node_information["vcpu"]["allocated"] - ) - ) - ainformation.append( - "{}Load:{} {}".format( - ansiprint.purple(), ansiprint.end(), node_information["load"] - ) - ) - ainformation.append( - "{}Total RAM (MiB):{} {}".format( - ansiprint.purple(), ansiprint.end(), node_information["memory"]["total"] - ) - ) - ainformation.append( - "{}Used RAM (MiB):{} {}".format( - ansiprint.purple(), ansiprint.end(), node_information["memory"]["used"] - ) - ) - ainformation.append( - "{}Free RAM (MiB):{} {}".format( - ansiprint.purple(), ansiprint.end(), node_information["memory"]["free"] - ) - ) - ainformation.append( - "{}Allocated RAM (MiB):{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - mem_allocated_colour, - node_information["memory"]["allocated"], - ansiprint.end(), - ) - ) - ainformation.append( - "{}Provisioned RAM (MiB):{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - mem_provisioned_colour, - node_information["memory"]["provisioned"], - ansiprint.end(), - ) - ) - - # Join it all together - ainformation.append("") - return "\n".join(ainformation) - - -def format_list(node_list, raw): - if raw: - ainformation = list() - for node in sorted(item["name"] for item in node_list): - ainformation.append(node) - return "\n".join(ainformation) - - node_list_output = [] - - # Determine optimal column widths - node_name_length = 5 - pvc_version_length = 8 - health_length = 7 - daemon_state_length = 7 - coordinator_state_length = 12 - domain_state_length = 7 - domains_count_length = 4 - cpu_count_length = 6 - load_length = 5 - mem_total_length = 6 - mem_used_length = 5 - mem_free_length = 5 - mem_alloc_length = 6 - mem_prov_length = 5 - for node_information in node_list: - # node_name column - _node_name_length = len(node_information["name"]) + 1 - if _node_name_length > node_name_length: - node_name_length = _node_name_length - # node_pvc_version column - _pvc_version_length = len(node_information.get("pvc_version", "N/A")) + 1 - if _pvc_version_length > pvc_version_length: - pvc_version_length = _pvc_version_length - # node_health column - node_health = node_information.get("health", "N/A") - if isinstance(node_health, int): - node_health_text = f"{node_health}%" - else: - node_health_text = node_health - _health_length = len(node_health_text) + 1 - if _health_length > health_length: - health_length = _health_length - # daemon_state column - _daemon_state_length = len(node_information["daemon_state"]) + 1 - if _daemon_state_length > daemon_state_length: - daemon_state_length = _daemon_state_length - # coordinator_state column - _coordinator_state_length = len(node_information["coordinator_state"]) + 1 - if _coordinator_state_length > coordinator_state_length: - coordinator_state_length = _coordinator_state_length - # domain_state column - _domain_state_length = len(node_information["domain_state"]) + 1 - if _domain_state_length > domain_state_length: - domain_state_length = _domain_state_length - # domains_count column - _domains_count_length = len(str(node_information["domains_count"])) + 1 - if _domains_count_length > domains_count_length: - domains_count_length = _domains_count_length - # cpu_count column - _cpu_count_length = len(str(node_information["cpu_count"])) + 1 - if _cpu_count_length > cpu_count_length: - cpu_count_length = _cpu_count_length - # load column - _load_length = len(str(node_information["load"])) + 1 - if _load_length > load_length: - load_length = _load_length - # mem_total column - _mem_total_length = len(str(node_information["memory"]["total"])) + 1 - if _mem_total_length > mem_total_length: - mem_total_length = _mem_total_length - # mem_used column - _mem_used_length = len(str(node_information["memory"]["used"])) + 1 - if _mem_used_length > mem_used_length: - mem_used_length = _mem_used_length - # mem_free column - _mem_free_length = len(str(node_information["memory"]["free"])) + 1 - if _mem_free_length > mem_free_length: - mem_free_length = _mem_free_length - # mem_alloc column - _mem_alloc_length = len(str(node_information["memory"]["allocated"])) + 1 - if _mem_alloc_length > mem_alloc_length: - mem_alloc_length = _mem_alloc_length - - # mem_prov column - _mem_prov_length = len(str(node_information["memory"]["provisioned"])) + 1 - if _mem_prov_length > mem_prov_length: - mem_prov_length = _mem_prov_length - - # Format the string (header) - node_list_output.append( - "{bold}{node_header: <{node_header_length}} {state_header: <{state_header_length}} {resource_header: <{resource_header_length}} {memory_header: <{memory_header_length}}{end_bold}".format( - node_header_length=node_name_length - + pvc_version_length - + health_length - + 2, - state_header_length=daemon_state_length - + coordinator_state_length - + domain_state_length - + 2, - resource_header_length=domains_count_length - + cpu_count_length - + load_length - + 2, - memory_header_length=mem_total_length - + mem_used_length - + mem_free_length - + mem_alloc_length - + mem_prov_length - + 4, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - node_header="Nodes " - + "".join( - [ - "-" - for _ in range( - 6, node_name_length + pvc_version_length + health_length + 1 - ) - ] - ), - state_header="States " - + "".join( - [ - "-" - for _ in range( - 7, - daemon_state_length - + coordinator_state_length - + domain_state_length - + 1, - ) - ] - ), - resource_header="Resources " - + "".join( - [ - "-" - for _ in range( - 10, domains_count_length + cpu_count_length + load_length + 1 - ) - ] - ), - memory_header="Memory (M) " - + "".join( - [ - "-" - for _ in range( - 11, - mem_total_length - + mem_used_length - + mem_free_length - + mem_alloc_length - + mem_prov_length - + 3, - ) - ] - ), - ) - ) - - node_list_output.append( - "{bold}{node_name: <{node_name_length}} {node_pvc_version: <{pvc_version_length}} {node_health: <{health_length}} \ -{daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \ -{node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \ -{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}".format( - node_name_length=node_name_length, - pvc_version_length=pvc_version_length, - health_length=health_length, - daemon_state_length=daemon_state_length, - coordinator_state_length=coordinator_state_length, - domain_state_length=domain_state_length, - domains_count_length=domains_count_length, - cpu_count_length=cpu_count_length, - load_length=load_length, - mem_total_length=mem_total_length, - mem_used_length=mem_used_length, - mem_free_length=mem_free_length, - mem_alloc_length=mem_alloc_length, - mem_prov_length=mem_prov_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - daemon_state_colour="", - coordinator_state_colour="", - domain_state_colour="", - end_colour="", - node_name="Name", - node_pvc_version="Version", - node_health="Health", - node_daemon_state="Daemon", - node_coordinator_state="Coordinator", - node_domain_state="Domain", - node_domains_count="VMs", - node_cpu_count="vCPUs", - node_load="Load", - node_mem_total="Total", - node_mem_used="Used", - node_mem_free="Free", - node_mem_allocated="Alloc", - node_mem_provisioned="Prov", - ) - ) - - # Format the string (elements) - for node_information in sorted(node_list, key=lambda n: n["name"]): - ( - health_colour, - daemon_state_colour, - coordinator_state_colour, - domain_state_colour, - mem_allocated_colour, - mem_provisioned_colour, - ) = getOutputColours(node_information) - - node_health = node_information.get("health", "N/A") - if isinstance(node_health, int): - node_health_text = f"{node_health}%" - else: - node_health_text = node_health - - node_list_output.append( - "{bold}{node_name: <{node_name_length}} {node_pvc_version: <{pvc_version_length}} {health_colour}{node_health: <{health_length}}{end_colour} \ -{daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \ -{node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \ -{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {mem_allocated_colour}{node_mem_allocated: <{mem_alloc_length}}{end_colour} {mem_provisioned_colour}{node_mem_provisioned: <{mem_prov_length}}{end_colour}{end_bold}".format( - node_name_length=node_name_length, - pvc_version_length=pvc_version_length, - health_length=health_length, - daemon_state_length=daemon_state_length, - coordinator_state_length=coordinator_state_length, - domain_state_length=domain_state_length, - domains_count_length=domains_count_length, - cpu_count_length=cpu_count_length, - load_length=load_length, - mem_total_length=mem_total_length, - mem_used_length=mem_used_length, - mem_free_length=mem_free_length, - mem_alloc_length=mem_alloc_length, - mem_prov_length=mem_prov_length, - bold="", - end_bold="", - health_colour=health_colour, - daemon_state_colour=daemon_state_colour, - coordinator_state_colour=coordinator_state_colour, - domain_state_colour=domain_state_colour, - mem_allocated_colour=mem_allocated_colour, - mem_provisioned_colour=mem_allocated_colour, - end_colour=ansiprint.end(), - node_name=node_information["name"], - node_pvc_version=node_information.get("pvc_version", "N/A"), - node_health=node_health_text, - node_daemon_state=node_information["daemon_state"], - node_coordinator_state=node_information["coordinator_state"], - node_domain_state=node_information["domain_state"], - node_domains_count=node_information["domains_count"], - node_cpu_count=node_information["vcpu"]["allocated"], - node_load=node_information["load"], - node_mem_total=node_information["memory"]["total"], - node_mem_used=node_information["memory"]["used"], - node_mem_free=node_information["memory"]["free"], - node_mem_allocated=node_information["memory"]["allocated"], - node_mem_provisioned=node_information["memory"]["provisioned"], - ) - ) - - return "\n".join(node_list_output) diff --git a/client-cli-old/pvc/lib/provisioner.py b/client-cli-old/pvc/lib/provisioner.py deleted file mode 100644 index f533621d..00000000 --- a/client-cli-old/pvc/lib/provisioner.py +++ /dev/null @@ -1,2006 +0,0 @@ -#!/usr/bin/env python3 - -# provisioner.py - PVC CLI client function library, Provisioner functions -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -from requests_toolbelt.multipart.encoder import ( - MultipartEncoder, - MultipartEncoderMonitor, -) - -import pvc.lib.ansiprint as ansiprint -from pvc.lib.common import UploadProgressBar, call_api -from ast import literal_eval - - -# -# Primary functions -# -def template_info(config, template, template_type): - """ - Get information about template - - API endpoint: GET /api/v1/provisioner/template/{template_type}/{template} - API arguments: - API schema: {json_template_object} - """ - response = call_api( - config, - "get", - "/provisioner/template/{template_type}/{template}".format( - template_type=template_type, template=template - ), - ) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "Template not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def template_list(config, limit, template_type=None): - """ - Get list information about templates (limited by {limit}) - - API endpoint: GET /api/v1/provisioner/template/{template_type} - API arguments: limit={limit} - API schema: [{json_template_object},{json_template_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - - if template_type is not None: - response = call_api( - config, - "get", - "/provisioner/template/{template_type}".format(template_type=template_type), - params=params, - ) - else: - response = call_api(config, "get", "/provisioner/template", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def template_add(config, params, template_type=None): - """ - Add a new template of {template_type} with {params} - - API endpoint: POST /api/v1/provisioner/template/{template_type} - API_arguments: args - API schema: {message} - """ - response = call_api( - config, - "post", - "/provisioner/template/{template_type}".format(template_type=template_type), - params=params, - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def template_modify(config, params, name, template_type): - """ - Modify an existing template of {template_type} with {params} - - API endpoint: PUT /api/v1/provisioner/template/{template_type}/{name} - API_arguments: args - API schema: {message} - """ - response = call_api( - config, - "put", - "/provisioner/template/{template_type}/{name}".format( - template_type=template_type, name=name - ), - params=params, - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def template_remove(config, name, template_type): - """ - Remove template {name} of {template_type} - - API endpoint: DELETE /api/v1/provisioner/template/{template_type}/{name} - API_arguments: - API schema: {message} - """ - response = call_api( - config, - "delete", - "/provisioner/template/{template_type}/{name}".format( - template_type=template_type, name=name - ), - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def template_element_add( - config, name, element_id, params, element_type=None, template_type=None -): - """ - Add a new template element of {element_type} with {params} to template {name} of {template_type} - - API endpoint: POST /api/v1/provisioner/template/{template_type}/{name}/{element_type}/{element_id} - API_arguments: args - API schema: {message} - """ - response = call_api( - config, - "post", - "/provisioner/template/{template_type}/{name}/{element_type}/{element_id}".format( - template_type=template_type, - name=name, - element_type=element_type, - element_id=element_id, - ), - params=params, - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def template_element_remove( - config, name, element_id, element_type=None, template_type=None -): - """ - Remove template element {element_id} of {element_type} from template {name} of {template_type} - - API endpoint: DELETE /api/v1/provisioner/template/{template_type}/{name}/{element_type}/{element_id} - API_arguments: - API schema: {message} - """ - response = call_api( - config, - "delete", - "/provisioner/template/{template_type}/{name}/{element_type}/{element_id}".format( - template_type=template_type, - name=name, - element_type=element_type, - element_id=element_id, - ), - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def userdata_info(config, userdata): - """ - Get information about userdata - - API endpoint: GET /api/v1/provisioner/userdata/{userdata} - API arguments: - API schema: {json_data_object} - """ - response = call_api( - config, "get", "/provisioner/userdata/{userdata}".format(userdata=userdata) - ) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "Userdata not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def userdata_list(config, limit): - """ - Get list information about userdatas (limited by {limit}) - - API endpoint: GET /api/v1/provisioner/userdata - API arguments: limit={limit} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - - response = call_api(config, "get", "/provisioner/userdata", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def userdata_show(config, name): - """ - Get information about userdata name - - API endpoint: GET /api/v1/provisioner/userdata/{name} - API arguments: - API schema: [{json_data_object},{json_data_object},etc.] - """ - response = call_api(config, "get", "/provisioner/userdata/{}".format(name)) - - if response.status_code == 200: - return True, response.json()[0]["userdata"] - else: - return False, response.json().get("message", "") - - -def userdata_add(config, params): - """ - Add a new userdata with {params} - - API endpoint: POST /api/v1/provisioner/userdata - API_arguments: args - API schema: {message} - """ - name = params.get("name") - userdata_data = params.get("data") - - params = {"name": name} - data = {"data": userdata_data} - response = call_api( - config, "post", "/provisioner/userdata", params=params, data=data - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def userdata_modify(config, name, params): - """ - Modify userdata {name} with {params} - - API endpoint: PUT /api/v1/provisioner/userdata/{name} - API_arguments: args - API schema: {message} - """ - userdata_data = params.get("data") - - params = {"name": name} - data = {"data": userdata_data} - response = call_api( - config, - "put", - "/provisioner/userdata/{name}".format(name=name), - params=params, - data=data, - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def userdata_remove(config, name): - """ - Remove userdata {name} - - API endpoint: DELETE /api/v1/provisioner/userdata/{name} - API_arguments: - API schema: {message} - """ - response = call_api( - config, "delete", "/provisioner/userdata/{name}".format(name=name) - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def script_info(config, script): - """ - Get information about script - - API endpoint: GET /api/v1/provisioner/script/{script} - API arguments: - API schema: {json_data_object} - """ - response = call_api( - config, "get", "/provisioner/script/{script}".format(script=script) - ) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "Script not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def script_list(config, limit): - """ - Get list information about scripts (limited by {limit}) - - API endpoint: GET /api/v1/provisioner/script - API arguments: limit={limit} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - - response = call_api(config, "get", "/provisioner/script", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def script_show(config, name): - """ - Get information about script name - - API endpoint: GET /api/v1/provisioner/script/{name} - API arguments: - API schema: [{json_data_object},{json_data_object},etc.] - """ - response = call_api(config, "get", "/provisioner/script/{}".format(name)) - - if response.status_code == 200: - return True, response.json()[0]["script"] - else: - return False, response.json().get("message", "") - - -def script_add(config, params): - """ - Add a new script with {params} - - API endpoint: POST /api/v1/provisioner/script - API_arguments: args - API schema: {message} - """ - name = params.get("name") - script_data = params.get("data") - - params = {"name": name} - data = {"data": script_data} - response = call_api(config, "post", "/provisioner/script", params=params, data=data) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def script_modify(config, name, params): - """ - Modify script {name} with {params} - - API endpoint: PUT /api/v1/provisioner/script/{name} - API_arguments: args - API schema: {message} - """ - script_data = params.get("data") - - params = {"name": name} - data = {"data": script_data} - response = call_api( - config, - "put", - "/provisioner/script/{name}".format(name=name), - params=params, - data=data, - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def script_remove(config, name): - """ - Remove script {name} - - API endpoint: DELETE /api/v1/provisioner/script/{name} - API_arguments: - API schema: {message} - """ - response = call_api( - config, "delete", "/provisioner/script/{name}".format(name=name) - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def ova_info(config, name): - """ - Get information about OVA image {name} - - API endpoint: GET /api/v1/provisioner/ova/{name} - API arguments: - API schema: {json_data_object} - """ - response = call_api(config, "get", "/provisioner/ova/{name}".format(name=name)) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "OVA not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ova_list(config, limit): - """ - Get list information about OVA images (limited by {limit}) - - API endpoint: GET /api/v1/provisioner/ova - API arguments: limit={limit} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - - response = call_api(config, "get", "/provisioner/ova", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def ova_upload(config, name, ova_file, params): - """ - Upload an OVA image to the cluster - - API endpoint: POST /api/v1/provisioner/ova/{name} - API arguments: pool={pool}, ova_size={ova_size} - API schema: {"message":"{data}"} - """ - import click - - bar = UploadProgressBar( - ova_file, end_message="Parsing file on remote side...", end_nl=False - ) - upload_data = MultipartEncoder( - fields={"file": ("filename", open(ova_file, "rb"), "application/octet-stream")} - ) - upload_monitor = MultipartEncoderMonitor(upload_data, bar.update) - - headers = {"Content-Type": upload_monitor.content_type} - - response = call_api( - config, - "post", - "/provisioner/ova/{}".format(name), - headers=headers, - params=params, - data=upload_monitor, - ) - - click.echo("done.") - click.echo() - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def ova_remove(config, name): - """ - Remove OVA image {name} - - API endpoint: DELETE /api/v1/provisioner/ova/{name} - API_arguments: - API schema: {message} - """ - response = call_api(config, "delete", "/provisioner/ova/{name}".format(name=name)) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def profile_info(config, profile): - """ - Get information about profile - - API endpoint: GET /api/v1/provisioner/profile/{profile} - API arguments: - API schema: {json_data_object} - """ - response = call_api( - config, "get", "/provisioner/profile/{profile}".format(profile=profile) - ) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "Profile not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def profile_list(config, limit): - """ - Get list information about profiles (limited by {limit}) - - API endpoint: GET /api/v1/provisioner/profile/{profile_type} - API arguments: limit={limit} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - - response = call_api(config, "get", "/provisioner/profile", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def profile_add(config, params): - """ - Add a new profile with {params} - - API endpoint: POST /api/v1/provisioner/profile - API_arguments: args - API schema: {message} - """ - response = call_api(config, "post", "/provisioner/profile", params=params) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def profile_modify(config, name, params): - """ - Modify profile {name} with {params} - - API endpoint: PUT /api/v1/provisioner/profile/{name} - API_arguments: args - API schema: {message} - """ - response = call_api( - config, "put", "/provisioner/profile/{name}".format(name=name), params=params - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def profile_remove(config, name): - """ - Remove profile {name} - - API endpoint: DELETE /api/v1/provisioner/profile/{name} - API_arguments: - API schema: {message} - """ - response = call_api( - config, "delete", "/provisioner/profile/{name}".format(name=name) - ) - - if response.status_code == 200: - retvalue = True - else: - retvalue = False - - return retvalue, response.json().get("message", "") - - -def vm_create(config, name, profile, wait_flag, define_flag, start_flag, script_args): - """ - Create a new VM named {name} with profile {profile} - - API endpoint: POST /api/v1/provisioner/create - API_arguments: name={name}, profile={profile}, arg={script_args} - API schema: {message} - """ - params = { - "name": name, - "profile": profile, - "start_vm": start_flag, - "define_vm": define_flag, - "arg": script_args, - } - response = call_api(config, "post", "/provisioner/create", params=params) - - if response.status_code == 202: - retvalue = True - if not wait_flag: - retdata = "Task ID: {}".format(response.json()["task_id"]) - else: - # Just return the task_id raw, instead of formatting it - retdata = response.json()["task_id"] - else: - retvalue = False - retdata = response.json().get("message", "") - - return retvalue, retdata - - -def task_status(config, task_id=None, is_watching=False): - """ - Get information about provisioner job {task_id} or all tasks if None - - API endpoint: GET /api/v1/provisioner/status - API arguments: - API schema: {json_data_object} - """ - if task_id is not None: - response = call_api( - config, "get", "/provisioner/status/{task_id}".format(task_id=task_id) - ) - else: - response = call_api(config, "get", "/provisioner/status") - - if task_id is not None: - if response.status_code == 200: - retvalue = True - respjson = response.json() - - if is_watching: - # Just return the raw JSON to the watching process instead of formatting it - return respjson - - job_state = respjson["state"] - if job_state == "RUNNING": - retdata = "Job state: RUNNING\nStage: {}/{}\nStatus: {}".format( - respjson["current"], respjson["total"], respjson["status"] - ) - elif job_state == "FAILED": - retdata = "Job state: FAILED\nStatus: {}".format(respjson["status"]) - elif job_state == "COMPLETED": - retdata = "Job state: COMPLETED\nStatus: {}".format(respjson["status"]) - else: - retdata = "Job state: {}\nStatus: {}".format( - respjson["state"], respjson["status"] - ) - else: - retvalue = False - retdata = response.json().get("message", "") - else: - retvalue = True - task_data_raw = response.json() - # Format the Celery data into a more useful data structure - task_data = list() - for task_type in ["active", "reserved", "scheduled"]: - try: - type_data = task_data_raw[task_type] - except Exception: - type_data = None - - if not type_data: - type_data = dict() - for task_host in type_data: - for task_job in task_data_raw[task_type][task_host]: - task = dict() - if task_type == "reserved": - task["type"] = "pending" - else: - task["type"] = task_type - task["worker"] = task_host - task["id"] = task_job.get("id") - try: - task_args = literal_eval(task_job.get("args")) - except Exception: - task_args = task_job.get("args") - task["vm_name"] = task_args[0] - task["vm_profile"] = task_args[1] - try: - task_kwargs = literal_eval(task_job.get("kwargs")) - except Exception: - task_kwargs = task_job.get("kwargs") - task["vm_define"] = str(bool(task_kwargs["define_vm"])) - task["vm_start"] = str(bool(task_kwargs["start_vm"])) - task_data.append(task) - retdata = task_data - - return retvalue, retdata - - -# -# Format functions -# -def format_list_template(template_data, template_type=None): - """ - Format the returned template template - - template_type can be used to only display part of the full list, allowing function - reuse with more limited output options. - """ - template_types = ["system", "network", "storage"] - normalized_template_data = dict() - ainformation = list() - - if template_type in template_types: - template_types = [template_type] - template_data_type = "{}_templates".format(template_type) - normalized_template_data[template_data_type] = template_data - else: - normalized_template_data = template_data - - if "system" in template_types: - ainformation.append( - format_list_template_system(normalized_template_data["system_templates"]) - ) - if len(template_types) > 1: - ainformation.append("") - - if "network" in template_types: - ainformation.append( - format_list_template_network(normalized_template_data["network_templates"]) - ) - if len(template_types) > 1: - ainformation.append("") - - if "storage" in template_types: - ainformation.append( - format_list_template_storage(normalized_template_data["storage_templates"]) - ) - - return "\n".join(ainformation) - - -def format_list_template_system(template_data): - if isinstance(template_data, dict): - template_data = [template_data] - - template_list_output = [] - - # Determine optimal column widths - template_name_length = 15 - template_id_length = 5 - template_vcpu_length = 6 - template_vram_length = 9 - template_serial_length = 7 - template_vnc_length = 4 - template_vnc_bind_length = 9 - template_node_limit_length = 6 - template_node_selector_length = 9 - template_node_autostart_length = 10 - template_migration_method_length = 10 - - for template in template_data: - # template_name column - _template_name_length = len(str(template["name"])) + 1 - if _template_name_length > template_name_length: - template_name_length = _template_name_length - # template_id column - _template_id_length = len(str(template["id"])) + 1 - if _template_id_length > template_id_length: - template_id_length = _template_id_length - # template_vcpu column - _template_vcpu_length = len(str(template["vcpu_count"])) + 1 - if _template_vcpu_length > template_vcpu_length: - template_vcpu_length = _template_vcpu_length - # template_vram column - _template_vram_length = len(str(template["vram_mb"])) + 1 - if _template_vram_length > template_vram_length: - template_vram_length = _template_vram_length - # template_serial column - _template_serial_length = len(str(template["serial"])) + 1 - if _template_serial_length > template_serial_length: - template_serial_length = _template_serial_length - # template_vnc column - _template_vnc_length = len(str(template["vnc"])) + 1 - if _template_vnc_length > template_vnc_length: - template_vnc_length = _template_vnc_length - # template_vnc_bind column - _template_vnc_bind_length = len(str(template["vnc_bind"])) + 1 - if _template_vnc_bind_length > template_vnc_bind_length: - template_vnc_bind_length = _template_vnc_bind_length - # template_node_limit column - _template_node_limit_length = len(str(template["node_limit"])) + 1 - if _template_node_limit_length > template_node_limit_length: - template_node_limit_length = _template_node_limit_length - # template_node_selector column - _template_node_selector_length = len(str(template["node_selector"])) + 1 - if _template_node_selector_length > template_node_selector_length: - template_node_selector_length = _template_node_selector_length - # template_node_autostart column - _template_node_autostart_length = len(str(template["node_autostart"])) + 1 - if _template_node_autostart_length > template_node_autostart_length: - template_node_autostart_length = _template_node_autostart_length - # template_migration_method column - _template_migration_method_length = len(str(template["migration_method"])) + 1 - if _template_migration_method_length > template_migration_method_length: - template_migration_method_length = _template_migration_method_length - - # Format the string (header) - template_list_output.append( - "{bold}{template_header: <{template_header_length}} {resources_header: <{resources_header_length}} {consoles_header: <{consoles_header_length}} {metadata_header: <{metadata_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - template_header_length=template_name_length + template_id_length + 1, - resources_header_length=template_vcpu_length + template_vram_length + 1, - consoles_header_length=template_serial_length - + template_vnc_length - + template_vnc_bind_length - + 2, - metadata_header_length=template_node_limit_length - + template_node_selector_length - + template_node_autostart_length - + template_migration_method_length - + 3, - template_header="System Templates " - + "".join( - ["-" for _ in range(17, template_name_length + template_id_length)] - ), - resources_header="Resources " - + "".join( - ["-" for _ in range(10, template_vcpu_length + template_vram_length)] - ), - consoles_header="Consoles " - + "".join( - [ - "-" - for _ in range( - 9, - template_serial_length - + template_vnc_length - + template_vnc_bind_length - + 1, - ) - ] - ), - metadata_header="Metadata " - + "".join( - [ - "-" - for _ in range( - 9, - template_node_limit_length - + template_node_selector_length - + template_node_autostart_length - + template_migration_method_length - + 2, - ) - ] - ), - ) - ) - - template_list_output.append( - "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ -{template_vcpu: <{template_vcpu_length}} \ -{template_vram: <{template_vram_length}} \ -{template_serial: <{template_serial_length}} \ -{template_vnc: <{template_vnc_length}} \ -{template_vnc_bind: <{template_vnc_bind_length}} \ -{template_node_limit: <{template_node_limit_length}} \ -{template_node_selector: <{template_node_selector_length}} \ -{template_node_autostart: <{template_node_autostart_length}} \ -{template_migration_method: <{template_migration_method_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - template_name_length=template_name_length, - template_id_length=template_id_length, - template_vcpu_length=template_vcpu_length, - template_vram_length=template_vram_length, - template_serial_length=template_serial_length, - template_vnc_length=template_vnc_length, - template_vnc_bind_length=template_vnc_bind_length, - template_node_limit_length=template_node_limit_length, - template_node_selector_length=template_node_selector_length, - template_node_autostart_length=template_node_autostart_length, - template_migration_method_length=template_migration_method_length, - template_name="Name", - template_id="ID", - template_vcpu="vCPUs", - template_vram="vRAM [M]", - template_serial="Serial", - template_vnc="VNC", - template_vnc_bind="VNC bind", - template_node_limit="Limit", - template_node_selector="Selector", - template_node_autostart="Autostart", - template_migration_method="Migration", - ) - ) - - # Format the string (elements) - for template in sorted(template_data, key=lambda i: i.get("name", None)): - template_list_output.append( - "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ -{template_vcpu: <{template_vcpu_length}} \ -{template_vram: <{template_vram_length}} \ -{template_serial: <{template_serial_length}} \ -{template_vnc: <{template_vnc_length}} \ -{template_vnc_bind: <{template_vnc_bind_length}} \ -{template_node_limit: <{template_node_limit_length}} \ -{template_node_selector: <{template_node_selector_length}} \ -{template_node_autostart: <{template_node_autostart_length}} \ -{template_migration_method: <{template_migration_method_length}}{end_bold}".format( - template_name_length=template_name_length, - template_id_length=template_id_length, - template_vcpu_length=template_vcpu_length, - template_vram_length=template_vram_length, - template_serial_length=template_serial_length, - template_vnc_length=template_vnc_length, - template_vnc_bind_length=template_vnc_bind_length, - template_node_limit_length=template_node_limit_length, - template_node_selector_length=template_node_selector_length, - template_node_autostart_length=template_node_autostart_length, - template_migration_method_length=template_migration_method_length, - bold="", - end_bold="", - template_name=str(template["name"]), - template_id=str(template["id"]), - template_vcpu=str(template["vcpu_count"]), - template_vram=str(template["vram_mb"]), - template_serial=str(template["serial"]), - template_vnc=str(template["vnc"]), - template_vnc_bind=str(template["vnc_bind"]), - template_node_limit=str(template["node_limit"]), - template_node_selector=str(template["node_selector"]), - template_node_autostart=str(template["node_autostart"]), - template_migration_method=str(template["migration_method"]), - ) - ) - - return "\n".join(template_list_output) - - -def format_list_template_network(template_template): - if isinstance(template_template, dict): - template_template = [template_template] - - template_list_output = [] - - # Determine optimal column widths - template_name_length = 18 - template_id_length = 5 - template_mac_template_length = 13 - template_networks_length = 10 - - for template in template_template: - # Join the networks elements into a single list of VNIs - network_list = list() - for network in template["networks"]: - network_list.append(str(network["vni"])) - template["networks_csv"] = ",".join(network_list) - - for template in template_template: - # template_name column - _template_name_length = len(str(template["name"])) + 1 - if _template_name_length > template_name_length: - template_name_length = _template_name_length - # template_id column - _template_id_length = len(str(template["id"])) + 1 - if _template_id_length > template_id_length: - template_id_length = _template_id_length - # template_mac_template column - _template_mac_template_length = len(str(template["mac_template"])) + 1 - if _template_mac_template_length > template_mac_template_length: - template_mac_template_length = _template_mac_template_length - # template_networks column - _template_networks_length = len(str(template["networks_csv"])) + 1 - if _template_networks_length > template_networks_length: - template_networks_length = _template_networks_length - - # Format the string (header) - template_list_output.append( - "{bold}{template_header: <{template_header_length}} {details_header: <{details_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - template_header_length=template_name_length + template_id_length + 1, - details_header_length=template_mac_template_length - + template_networks_length - + 1, - template_header="Network Templates " - + "".join( - ["-" for _ in range(18, template_name_length + template_id_length)] - ), - details_header="Details " - + "".join( - [ - "-" - for _ in range( - 8, template_mac_template_length + template_networks_length - ) - ] - ), - ) - ) - - template_list_output.append( - "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ -{template_mac_template: <{template_mac_template_length}} \ -{template_networks: <{template_networks_length}}{end_bold}".format( - template_name_length=template_name_length, - template_id_length=template_id_length, - template_mac_template_length=template_mac_template_length, - template_networks_length=template_networks_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - template_name="Name", - template_id="ID", - template_mac_template="MAC template", - template_networks="Network VNIs", - ) - ) - - # Format the string (elements) - for template in sorted(template_template, key=lambda i: i.get("name", None)): - template_list_output.append( - "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ -{template_mac_template: <{template_mac_template_length}} \ -{template_networks: <{template_networks_length}}{end_bold}".format( - template_name_length=template_name_length, - template_id_length=template_id_length, - template_mac_template_length=template_mac_template_length, - template_networks_length=template_networks_length, - bold="", - end_bold="", - template_name=str(template["name"]), - template_id=str(template["id"]), - template_mac_template=str(template["mac_template"]), - template_networks=str(template["networks_csv"]), - ) - ) - - return "\n".join(template_list_output) - - -def format_list_template_storage(template_template): - if isinstance(template_template, dict): - template_template = [template_template] - - template_list_output = [] - - # Determine optimal column widths - template_name_length = 18 - template_id_length = 5 - template_disk_id_length = 8 - template_disk_pool_length = 5 - template_disk_source_length = 14 - template_disk_size_length = 9 - template_disk_filesystem_length = 11 - template_disk_fsargs_length = 10 - template_disk_mountpoint_length = 10 - - for template in template_template: - # template_name column - _template_name_length = len(str(template["name"])) + 1 - if _template_name_length > template_name_length: - template_name_length = _template_name_length - # template_id column - _template_id_length = len(str(template["id"])) + 1 - if _template_id_length > template_id_length: - template_id_length = _template_id_length - - for disk in template["disks"]: - # template_disk_id column - _template_disk_id_length = len(str(disk["disk_id"])) + 1 - if _template_disk_id_length > template_disk_id_length: - template_disk_id_length = _template_disk_id_length - # template_disk_pool column - _template_disk_pool_length = len(str(disk["pool"])) + 1 - if _template_disk_pool_length > template_disk_pool_length: - template_disk_pool_length = _template_disk_pool_length - # template_disk_source column - _template_disk_source_length = len(str(disk["source_volume"])) + 1 - if _template_disk_source_length > template_disk_source_length: - template_disk_source_length = _template_disk_source_length - # template_disk_size column - _template_disk_size_length = len(str(disk["disk_size_gb"])) + 1 - if _template_disk_size_length > template_disk_size_length: - template_disk_size_length = _template_disk_size_length - # template_disk_filesystem column - _template_disk_filesystem_length = len(str(disk["filesystem"])) + 1 - if _template_disk_filesystem_length > template_disk_filesystem_length: - template_disk_filesystem_length = _template_disk_filesystem_length - # template_disk_fsargs column - _template_disk_fsargs_length = len(str(disk["filesystem_args"])) + 1 - if _template_disk_fsargs_length > template_disk_fsargs_length: - template_disk_fsargs_length = _template_disk_fsargs_length - # template_disk_mountpoint column - _template_disk_mountpoint_length = len(str(disk["mountpoint"])) + 1 - if _template_disk_mountpoint_length > template_disk_mountpoint_length: - template_disk_mountpoint_length = _template_disk_mountpoint_length - - # Format the string (header) - template_list_output.append( - "{bold}{template_header: <{template_header_length}} {details_header: <{details_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - template_header_length=template_name_length + template_id_length + 1, - details_header_length=template_disk_id_length - + template_disk_pool_length - + template_disk_source_length - + template_disk_size_length - + template_disk_filesystem_length - + template_disk_fsargs_length - + template_disk_mountpoint_length - + 7, - template_header="Storage Templates " - + "".join( - ["-" for _ in range(18, template_name_length + template_id_length)] - ), - details_header="Details " - + "".join( - [ - "-" - for _ in range( - 8, - template_disk_id_length - + template_disk_pool_length - + template_disk_source_length - + template_disk_size_length - + template_disk_filesystem_length - + template_disk_fsargs_length - + template_disk_mountpoint_length - + 6, - ) - ] - ), - ) - ) - - template_list_output.append( - "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ -{template_disk_id: <{template_disk_id_length}} \ -{template_disk_pool: <{template_disk_pool_length}} \ -{template_disk_source: <{template_disk_source_length}} \ -{template_disk_size: <{template_disk_size_length}} \ -{template_disk_filesystem: <{template_disk_filesystem_length}} \ -{template_disk_fsargs: <{template_disk_fsargs_length}} \ -{template_disk_mountpoint: <{template_disk_mountpoint_length}}{end_bold}".format( - template_name_length=template_name_length, - template_id_length=template_id_length, - template_disk_id_length=template_disk_id_length, - template_disk_pool_length=template_disk_pool_length, - template_disk_source_length=template_disk_source_length, - template_disk_size_length=template_disk_size_length, - template_disk_filesystem_length=template_disk_filesystem_length, - template_disk_fsargs_length=template_disk_fsargs_length, - template_disk_mountpoint_length=template_disk_mountpoint_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - template_name="Name", - template_id="ID", - template_disk_id="Disk ID", - template_disk_pool="Pool", - template_disk_source="Source Volume", - template_disk_size="Size [G]", - template_disk_filesystem="Filesystem", - template_disk_fsargs="Arguments", - template_disk_mountpoint="Mountpoint", - ) - ) - - # Format the string (elements) - for template in sorted(template_template, key=lambda i: i.get("name", None)): - template_list_output.append( - "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}}{end_bold}".format( - template_name_length=template_name_length, - template_id_length=template_id_length, - bold="", - end_bold="", - template_name=str(template["name"]), - template_id=str(template["id"]), - ) - ) - for disk in sorted(template["disks"], key=lambda i: i.get("disk_id", None)): - template_list_output.append( - "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ -{template_disk_id: <{template_disk_id_length}} \ -{template_disk_pool: <{template_disk_pool_length}} \ -{template_disk_source: <{template_disk_source_length}} \ -{template_disk_size: <{template_disk_size_length}} \ -{template_disk_filesystem: <{template_disk_filesystem_length}} \ -{template_disk_fsargs: <{template_disk_fsargs_length}} \ -{template_disk_mountpoint: <{template_disk_mountpoint_length}}{end_bold}".format( - template_name_length=template_name_length, - template_id_length=template_id_length, - template_disk_id_length=template_disk_id_length, - template_disk_pool_length=template_disk_pool_length, - template_disk_source_length=template_disk_source_length, - template_disk_size_length=template_disk_size_length, - template_disk_filesystem_length=template_disk_filesystem_length, - template_disk_fsargs_length=template_disk_fsargs_length, - template_disk_mountpoint_length=template_disk_mountpoint_length, - bold="", - end_bold="", - template_name="", - template_id="", - template_disk_id=str(disk["disk_id"]), - template_disk_pool=str(disk["pool"]), - template_disk_source=str(disk["source_volume"]), - template_disk_size=str(disk["disk_size_gb"]), - template_disk_filesystem=str(disk["filesystem"]), - template_disk_fsargs=str(disk["filesystem_args"]), - template_disk_mountpoint=str(disk["mountpoint"]), - ) - ) - - return "\n".join(template_list_output) - - -def format_list_userdata(userdata_data, lines=None): - if isinstance(userdata_data, dict): - userdata_data = [userdata_data] - - userdata_list_output = [] - - # Determine optimal column widths - userdata_name_length = 12 - userdata_id_length = 5 - userdata_document_length = 92 - userdata_name_length - userdata_id_length - - for userdata in userdata_data: - # userdata_name column - _userdata_name_length = len(str(userdata["name"])) + 1 - if _userdata_name_length > userdata_name_length: - userdata_name_length = _userdata_name_length - # userdata_id column - _userdata_id_length = len(str(userdata["id"])) + 1 - if _userdata_id_length > userdata_id_length: - userdata_id_length = _userdata_id_length - - # Format the string (header) - userdata_list_output.append( - "{bold}{userdata_header: <{userdata_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - userdata_header_length=userdata_name_length - + userdata_id_length - + userdata_document_length - + 2, - userdata_header="Userdata " - + "".join( - [ - "-" - for _ in range( - 9, - userdata_name_length - + userdata_id_length - + userdata_document_length - + 1, - ) - ] - ), - ) - ) - - userdata_list_output.append( - "{bold}{userdata_name: <{userdata_name_length}} {userdata_id: <{userdata_id_length}} \ -{userdata_data}{end_bold}".format( - userdata_name_length=userdata_name_length, - userdata_id_length=userdata_id_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - userdata_name="Name", - userdata_id="ID", - userdata_data="Document", - ) - ) - - # Format the string (elements) - for data in sorted(userdata_data, key=lambda i: i.get("name", None)): - line_count = 0 - for line in data["userdata"].split("\n"): - if line_count < 1: - userdata_name = data["name"] - userdata_id = data["id"] - else: - userdata_name = "" - userdata_id = "" - line_count += 1 - - if lines and line_count > lines: - userdata_list_output.append( - "{bold}{userdata_name: <{userdata_name_length}} {userdata_id: <{userdata_id_length}} \ -{userdata_data}{end_bold}".format( - userdata_name_length=userdata_name_length, - userdata_id_length=userdata_id_length, - bold="", - end_bold="", - userdata_name=userdata_name, - userdata_id=userdata_id, - userdata_data="[...]", - ) - ) - break - - userdata_list_output.append( - "{bold}{userdata_name: <{userdata_name_length}} {userdata_id: <{userdata_id_length}} \ -{userdata_data}{end_bold}".format( - userdata_name_length=userdata_name_length, - userdata_id_length=userdata_id_length, - bold="", - end_bold="", - userdata_name=userdata_name, - userdata_id=userdata_id, - userdata_data=str(line), - ) - ) - - return "\n".join(userdata_list_output) - - -def format_list_script(script_data, lines=None): - if isinstance(script_data, dict): - script_data = [script_data] - - script_list_output = [] - - # Determine optimal column widths - script_name_length = 12 - script_id_length = 5 - script_data_length = 92 - script_name_length - script_id_length - - for script in script_data: - # script_name column - _script_name_length = len(str(script["name"])) + 1 - if _script_name_length > script_name_length: - script_name_length = _script_name_length - # script_id column - _script_id_length = len(str(script["id"])) + 1 - if _script_id_length > script_id_length: - script_id_length = _script_id_length - - # Format the string (header) - script_list_output.append( - "{bold}{script_header: <{script_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - script_header_length=script_name_length - + script_id_length - + script_data_length - + 2, - script_header="Script " - + "".join( - [ - "-" - for _ in range( - 7, - script_name_length + script_id_length + script_data_length + 1, - ) - ] - ), - ) - ) - - script_list_output.append( - "{bold}{script_name: <{script_name_length}} {script_id: <{script_id_length}} \ -{script_data}{end_bold}".format( - script_name_length=script_name_length, - script_id_length=script_id_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - script_name="Name", - script_id="ID", - script_data="Script", - ) - ) - - # Format the string (elements) - for script in sorted(script_data, key=lambda i: i.get("name", None)): - line_count = 0 - for line in script["script"].split("\n"): - if line_count < 1: - script_name = script["name"] - script_id = script["id"] - else: - script_name = "" - script_id = "" - line_count += 1 - - if lines and line_count > lines: - script_list_output.append( - "{bold}{script_name: <{script_name_length}} {script_id: <{script_id_length}} \ -{script_data}{end_bold}".format( - script_name_length=script_name_length, - script_id_length=script_id_length, - bold="", - end_bold="", - script_name=script_name, - script_id=script_id, - script_data="[...]", - ) - ) - break - - script_list_output.append( - "{bold}{script_name: <{script_name_length}} {script_id: <{script_id_length}} \ -{script_data}{end_bold}".format( - script_name_length=script_name_length, - script_id_length=script_id_length, - bold="", - end_bold="", - script_name=script_name, - script_id=script_id, - script_data=str(line), - ) - ) - - return "\n".join(script_list_output) - - -def format_list_ova(ova_data): - if isinstance(ova_data, dict): - ova_data = [ova_data] - - ova_list_output = [] - - # Determine optimal column widths - ova_name_length = 18 - ova_id_length = 5 - ova_disk_id_length = 8 - ova_disk_size_length = 10 - ova_disk_pool_length = 5 - ova_disk_volume_format_length = 7 - ova_disk_volume_name_length = 13 - - for ova in ova_data: - # ova_name column - _ova_name_length = len(str(ova["name"])) + 1 - if _ova_name_length > ova_name_length: - ova_name_length = _ova_name_length - # ova_id column - _ova_id_length = len(str(ova["id"])) + 1 - if _ova_id_length > ova_id_length: - ova_id_length = _ova_id_length - - for disk in ova["volumes"]: - # ova_disk_id column - _ova_disk_id_length = len(str(disk["disk_id"])) + 1 - if _ova_disk_id_length > ova_disk_id_length: - ova_disk_id_length = _ova_disk_id_length - # ova_disk_size column - _ova_disk_size_length = len(str(disk["disk_size_gb"])) + 1 - if _ova_disk_size_length > ova_disk_size_length: - ova_disk_size_length = _ova_disk_size_length - # ova_disk_pool column - _ova_disk_pool_length = len(str(disk["pool"])) + 1 - if _ova_disk_pool_length > ova_disk_pool_length: - ova_disk_pool_length = _ova_disk_pool_length - # ova_disk_volume_format column - _ova_disk_volume_format_length = len(str(disk["volume_format"])) + 1 - if _ova_disk_volume_format_length > ova_disk_volume_format_length: - ova_disk_volume_format_length = _ova_disk_volume_format_length - # ova_disk_volume_name column - _ova_disk_volume_name_length = len(str(disk["volume_name"])) + 1 - if _ova_disk_volume_name_length > ova_disk_volume_name_length: - ova_disk_volume_name_length = _ova_disk_volume_name_length - - # Format the string (header) - ova_list_output.append( - "{bold}{ova_header: <{ova_header_length}} {details_header: <{details_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - ova_header_length=ova_name_length + ova_id_length + 1, - details_header_length=ova_disk_id_length - + ova_disk_size_length - + ova_disk_pool_length - + ova_disk_volume_format_length - + ova_disk_volume_name_length - + 4, - ova_header="OVAs " - + "".join(["-" for _ in range(5, ova_name_length + ova_id_length)]), - details_header="Details " - + "".join( - [ - "-" - for _ in range( - 8, - ova_disk_id_length - + ova_disk_size_length - + ova_disk_pool_length - + ova_disk_volume_format_length - + ova_disk_volume_name_length - + 3, - ) - ] - ), - ) - ) - - ova_list_output.append( - "{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}} \ -{ova_disk_id: <{ova_disk_id_length}} \ -{ova_disk_size: <{ova_disk_size_length}} \ -{ova_disk_pool: <{ova_disk_pool_length}} \ -{ova_disk_volume_format: <{ova_disk_volume_format_length}} \ -{ova_disk_volume_name: <{ova_disk_volume_name_length}}{end_bold}".format( - ova_name_length=ova_name_length, - ova_id_length=ova_id_length, - ova_disk_id_length=ova_disk_id_length, - ova_disk_pool_length=ova_disk_pool_length, - ova_disk_size_length=ova_disk_size_length, - ova_disk_volume_format_length=ova_disk_volume_format_length, - ova_disk_volume_name_length=ova_disk_volume_name_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - ova_name="Name", - ova_id="ID", - ova_disk_id="Disk ID", - ova_disk_size="Size [GB]", - ova_disk_pool="Pool", - ova_disk_volume_format="Format", - ova_disk_volume_name="Source Volume", - ) - ) - - # Format the string (elements) - for ova in sorted(ova_data, key=lambda i: i.get("name", None)): - ova_list_output.append( - "{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}}{end_bold}".format( - ova_name_length=ova_name_length, - ova_id_length=ova_id_length, - bold="", - end_bold="", - ova_name=str(ova["name"]), - ova_id=str(ova["id"]), - ) - ) - for disk in sorted(ova["volumes"], key=lambda i: i.get("disk_id", None)): - ova_list_output.append( - "{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}} \ -{ova_disk_id: <{ova_disk_id_length}} \ -{ova_disk_size: <{ova_disk_size_length}} \ -{ova_disk_pool: <{ova_disk_pool_length}} \ -{ova_disk_volume_format: <{ova_disk_volume_format_length}} \ -{ova_disk_volume_name: <{ova_disk_volume_name_length}}{end_bold}".format( - ova_name_length=ova_name_length, - ova_id_length=ova_id_length, - ova_disk_id_length=ova_disk_id_length, - ova_disk_size_length=ova_disk_size_length, - ova_disk_pool_length=ova_disk_pool_length, - ova_disk_volume_format_length=ova_disk_volume_format_length, - ova_disk_volume_name_length=ova_disk_volume_name_length, - bold="", - end_bold="", - ova_name="", - ova_id="", - ova_disk_id=str(disk["disk_id"]), - ova_disk_size=str(disk["disk_size_gb"]), - ova_disk_pool=str(disk["pool"]), - ova_disk_volume_format=str(disk["volume_format"]), - ova_disk_volume_name=str(disk["volume_name"]), - ) - ) - - return "\n".join(ova_list_output) - - -def format_list_profile(profile_data): - if isinstance(profile_data, dict): - profile_data = [profile_data] - - # Format the profile "source" from the type and, if applicable, OVA profile name - for profile in profile_data: - profile_type = profile["type"] - if "ova" in profile_type: - # Set the source to the name of the OVA: - profile["source"] = "OVA {}".format(profile["ova"]) - else: - # Set the source to be the type - profile["source"] = profile_type - - profile_list_output = [] - - # Determine optimal column widths - profile_name_length = 18 - profile_id_length = 5 - profile_source_length = 7 - - profile_system_template_length = 7 - profile_network_template_length = 8 - profile_storage_template_length = 8 - profile_userdata_length = 9 - profile_script_length = 7 - profile_arguments_length = 18 - - for profile in profile_data: - # profile_name column - _profile_name_length = len(str(profile["name"])) + 1 - if _profile_name_length > profile_name_length: - profile_name_length = _profile_name_length - # profile_id column - _profile_id_length = len(str(profile["id"])) + 1 - if _profile_id_length > profile_id_length: - profile_id_length = _profile_id_length - # profile_source column - _profile_source_length = len(str(profile["source"])) + 1 - if _profile_source_length > profile_source_length: - profile_source_length = _profile_source_length - # profile_system_template column - _profile_system_template_length = len(str(profile["system_template"])) + 1 - if _profile_system_template_length > profile_system_template_length: - profile_system_template_length = _profile_system_template_length - # profile_network_template column - _profile_network_template_length = len(str(profile["network_template"])) + 1 - if _profile_network_template_length > profile_network_template_length: - profile_network_template_length = _profile_network_template_length - # profile_storage_template column - _profile_storage_template_length = len(str(profile["storage_template"])) + 1 - if _profile_storage_template_length > profile_storage_template_length: - profile_storage_template_length = _profile_storage_template_length - # profile_userdata column - _profile_userdata_length = len(str(profile["userdata"])) + 1 - if _profile_userdata_length > profile_userdata_length: - profile_userdata_length = _profile_userdata_length - # profile_script column - _profile_script_length = len(str(profile["script"])) + 1 - if _profile_script_length > profile_script_length: - profile_script_length = _profile_script_length - - # Format the string (header) - profile_list_output.append( - "{bold}{profile_header: <{profile_header_length}} {templates_header: <{templates_header_length}} {data_header: <{data_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - profile_header_length=profile_name_length - + profile_id_length - + profile_source_length - + 2, - templates_header_length=profile_system_template_length - + profile_network_template_length - + profile_storage_template_length - + 2, - data_header_length=profile_userdata_length - + profile_script_length - + profile_arguments_length - + 2, - profile_header="Profiles " - + "".join( - [ - "-" - for _ in range( - 9, - profile_name_length - + profile_id_length - + profile_source_length - + 1, - ) - ] - ), - templates_header="Templates " - + "".join( - [ - "-" - for _ in range( - 10, - profile_system_template_length - + profile_network_template_length - + profile_storage_template_length - + 1, - ) - ] - ), - data_header="Data " - + "".join( - [ - "-" - for _ in range( - 5, - profile_userdata_length - + profile_script_length - + profile_arguments_length - + 1, - ) - ] - ), - ) - ) - - profile_list_output.append( - "{bold}{profile_name: <{profile_name_length}} {profile_id: <{profile_id_length}} {profile_source: <{profile_source_length}} \ -{profile_system_template: <{profile_system_template_length}} \ -{profile_network_template: <{profile_network_template_length}} \ -{profile_storage_template: <{profile_storage_template_length}} \ -{profile_userdata: <{profile_userdata_length}} \ -{profile_script: <{profile_script_length}} \ -{profile_arguments}{end_bold}".format( - profile_name_length=profile_name_length, - profile_id_length=profile_id_length, - profile_source_length=profile_source_length, - profile_system_template_length=profile_system_template_length, - profile_network_template_length=profile_network_template_length, - profile_storage_template_length=profile_storage_template_length, - profile_userdata_length=profile_userdata_length, - profile_script_length=profile_script_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - profile_name="Name", - profile_id="ID", - profile_source="Source", - profile_system_template="System", - profile_network_template="Network", - profile_storage_template="Storage", - profile_userdata="Userdata", - profile_script="Script", - profile_arguments="Script Arguments", - ) - ) - - # Format the string (elements) - for profile in sorted(profile_data, key=lambda i: i.get("name", None)): - arguments_list = ", ".join(profile["arguments"]) - if not arguments_list: - arguments_list = "N/A" - profile_list_output.append( - "{bold}{profile_name: <{profile_name_length}} {profile_id: <{profile_id_length}} {profile_source: <{profile_source_length}} \ -{profile_system_template: <{profile_system_template_length}} \ -{profile_network_template: <{profile_network_template_length}} \ -{profile_storage_template: <{profile_storage_template_length}} \ -{profile_userdata: <{profile_userdata_length}} \ -{profile_script: <{profile_script_length}} \ -{profile_arguments}{end_bold}".format( - profile_name_length=profile_name_length, - profile_id_length=profile_id_length, - profile_source_length=profile_source_length, - profile_system_template_length=profile_system_template_length, - profile_network_template_length=profile_network_template_length, - profile_storage_template_length=profile_storage_template_length, - profile_userdata_length=profile_userdata_length, - profile_script_length=profile_script_length, - bold="", - end_bold="", - profile_name=profile["name"], - profile_id=profile["id"], - profile_source=profile["source"], - profile_system_template=profile["system_template"], - profile_network_template=profile["network_template"], - profile_storage_template=profile["storage_template"], - profile_userdata=profile["userdata"], - profile_script=profile["script"], - profile_arguments=arguments_list, - ) - ) - - return "\n".join(profile_list_output) - - -def format_list_task(task_data): - task_list_output = [] - - # Determine optimal column widths - task_id_length = 7 - task_type_length = 7 - task_worker_length = 7 - task_vm_name_length = 5 - task_vm_profile_length = 8 - task_vm_define_length = 8 - task_vm_start_length = 7 - - for task in task_data: - # task_id column - _task_id_length = len(str(task["id"])) + 1 - if _task_id_length > task_id_length: - task_id_length = _task_id_length - # task_worker column - _task_worker_length = len(str(task["worker"])) + 1 - if _task_worker_length > task_worker_length: - task_worker_length = _task_worker_length - # task_type column - _task_type_length = len(str(task["type"])) + 1 - if _task_type_length > task_type_length: - task_type_length = _task_type_length - # task_vm_name column - _task_vm_name_length = len(str(task["vm_name"])) + 1 - if _task_vm_name_length > task_vm_name_length: - task_vm_name_length = _task_vm_name_length - # task_vm_profile column - _task_vm_profile_length = len(str(task["vm_profile"])) + 1 - if _task_vm_profile_length > task_vm_profile_length: - task_vm_profile_length = _task_vm_profile_length - # task_vm_define column - _task_vm_define_length = len(str(task["vm_define"])) + 1 - if _task_vm_define_length > task_vm_define_length: - task_vm_define_length = _task_vm_define_length - # task_vm_start column - _task_vm_start_length = len(str(task["vm_start"])) + 1 - if _task_vm_start_length > task_vm_start_length: - task_vm_start_length = _task_vm_start_length - - # Format the string (header) - task_list_output.append( - "{bold}{task_header: <{task_header_length}} {vms_header: <{vms_header_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - task_header_length=task_id_length - + task_type_length - + task_worker_length - + 2, - vms_header_length=task_vm_name_length - + task_vm_profile_length - + task_vm_define_length - + task_vm_start_length - + 3, - task_header="Tasks " - + "".join( - [ - "-" - for _ in range( - 6, task_id_length + task_type_length + task_worker_length + 1 - ) - ] - ), - vms_header="VM Details " - + "".join( - [ - "-" - for _ in range( - 11, - task_vm_name_length - + task_vm_profile_length - + task_vm_define_length - + task_vm_start_length - + 2, - ) - ] - ), - ) - ) - - task_list_output.append( - "{bold}{task_id: <{task_id_length}} {task_type: <{task_type_length}} \ -{task_worker: <{task_worker_length}} \ -{task_vm_name: <{task_vm_name_length}} \ -{task_vm_profile: <{task_vm_profile_length}} \ -{task_vm_define: <{task_vm_define_length}} \ -{task_vm_start: <{task_vm_start_length}}{end_bold}".format( - task_id_length=task_id_length, - task_type_length=task_type_length, - task_worker_length=task_worker_length, - task_vm_name_length=task_vm_name_length, - task_vm_profile_length=task_vm_profile_length, - task_vm_define_length=task_vm_define_length, - task_vm_start_length=task_vm_start_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - task_id="Job ID", - task_type="Status", - task_worker="Worker", - task_vm_name="Name", - task_vm_profile="Profile", - task_vm_define="Define?", - task_vm_start="Start?", - ) - ) - - # Format the string (elements) - for task in sorted(task_data, key=lambda i: i.get("type", None)): - task_list_output.append( - "{bold}{task_id: <{task_id_length}} {task_type: <{task_type_length}} \ -{task_worker: <{task_worker_length}} \ -{task_vm_name: <{task_vm_name_length}} \ -{task_vm_profile: <{task_vm_profile_length}} \ -{task_vm_define: <{task_vm_define_length}} \ -{task_vm_start: <{task_vm_start_length}}{end_bold}".format( - task_id_length=task_id_length, - task_type_length=task_type_length, - task_worker_length=task_worker_length, - task_vm_name_length=task_vm_name_length, - task_vm_profile_length=task_vm_profile_length, - task_vm_define_length=task_vm_define_length, - task_vm_start_length=task_vm_start_length, - bold="", - end_bold="", - task_id=task["id"], - task_type=task["type"], - task_worker=task["worker"], - task_vm_name=task["vm_name"], - task_vm_profile=task["vm_profile"], - task_vm_define=task["vm_define"], - task_vm_start=task["vm_start"], - ) - ) - - return "\n".join(task_list_output) diff --git a/client-cli-old/pvc/lib/vm.py b/client-cli-old/pvc/lib/vm.py deleted file mode 100644 index 65abc5c8..00000000 --- a/client-cli-old/pvc/lib/vm.py +++ /dev/null @@ -1,2085 +0,0 @@ -#!/usr/bin/env python3 - -# vm.py - PVC CLI client function library, VM functions -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -import time -import re - -import pvc.lib.ansiprint as ansiprint -from pvc.lib.common import call_api, format_bytes, format_metric - - -# -# Primary functions -# -def vm_info(config, vm): - """ - Get information about (single) VM - - API endpoint: GET /api/v1/vm/{vm} - API arguments: - API schema: {json_data_object} - """ - response = call_api(config, "get", "/vm/{vm}".format(vm=vm)) - - if response.status_code == 200: - if isinstance(response.json(), list) and len(response.json()) != 1: - # No exact match; return not found - return False, "VM not found." - else: - # Return a single instance if the response is a list - if isinstance(response.json(), list): - return True, response.json()[0] - # This shouldn't happen, but is here just in case - else: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def vm_list(config, limit, target_node, target_state, target_tag, negate): - """ - Get list information about VMs (limited by {limit}, {target_node}, or {target_state}) - - API endpoint: GET /api/v1/vm - API arguments: limit={limit}, node={target_node}, state={target_state}, tag={target_tag}, negate={negate} - API schema: [{json_data_object},{json_data_object},etc.] - """ - params = dict() - if limit: - params["limit"] = limit - if target_node: - params["node"] = target_node - if target_state: - params["state"] = target_state - if target_tag: - params["tag"] = target_tag - params["negate"] = negate - - response = call_api(config, "get", "/vm", params=params) - - if response.status_code == 200: - return True, response.json() - else: - return False, response.json().get("message", "") - - -def vm_define( - config, - xml, - node, - node_limit, - node_selector, - node_autostart, - migration_method, - user_tags, - protected_tags, -): - """ - Define a new VM on the cluster - - API endpoint: POST /vm - API arguments: xml={xml}, node={node}, limit={node_limit}, selector={node_selector}, autostart={node_autostart}, migration_method={migration_method}, user_tags={user_tags}, protected_tags={protected_tags} - API schema: {"message":"{data}"} - """ - params = { - "node": node, - "limit": node_limit, - "selector": node_selector, - "autostart": node_autostart, - "migration_method": migration_method, - "user_tags": user_tags, - "protected_tags": protected_tags, - } - data = {"xml": xml} - response = call_api(config, "post", "/vm", params=params, data=data) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def vm_modify(config, vm, xml, restart): - """ - Modify the configuration of VM - - API endpoint: PUT /vm/{vm} - API arguments: xml={xml}, restart={restart} - API schema: {"message":"{data}"} - """ - params = {"restart": restart} - data = {"xml": xml} - response = call_api( - config, "put", "/vm/{vm}".format(vm=vm), params=params, data=data - ) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def vm_device_attach(config, vm, xml): - """ - Attach a device to a VM - - API endpoint: POST /vm/{vm}/device - API arguments: xml={xml} - API schema: {"message":"{data}"} - """ - data = {"xml": xml} - response = call_api(config, "post", "/vm/{vm}/device".format(vm=vm), data=data) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def vm_device_detach(config, vm, xml): - """ - Detach a device from a VM - - API endpoint: DELETE /vm/{vm}/device - API arguments: xml={xml} - API schema: {"message":"{data}"} - """ - data = {"xml": xml} - response = call_api(config, "delete", "/vm/{vm}/device".format(vm=vm), data=data) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def vm_rename(config, vm, new_name): - """ - Rename VM to new name - - API endpoint: POST /vm/{vm}/rename - API arguments: new_name={new_name} - API schema: {"message":"{data}"} - """ - params = {"new_name": new_name} - response = call_api(config, "post", "/vm/{vm}/rename".format(vm=vm), params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def vm_metadata( - config, - vm, - node_limit, - node_selector, - node_autostart, - migration_method, - provisioner_profile, -): - """ - Modify PVC metadata of a VM - - API endpoint: POST /vm/{vm}/meta - API arguments: limit={node_limit}, selector={node_selector}, autostart={node_autostart}, migration_method={migration_method} profile={provisioner_profile} - API schema: {"message":"{data}"} - """ - params = dict() - - # Update any params that we've sent - if node_limit is not None: - params["limit"] = node_limit - - if node_selector is not None: - params["selector"] = node_selector - - if node_autostart is not None: - params["autostart"] = node_autostart - - if migration_method is not None: - params["migration_method"] = migration_method - - if provisioner_profile is not None: - params["profile"] = provisioner_profile - - # Write the new metadata - response = call_api(config, "post", "/vm/{vm}/meta".format(vm=vm), params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def vm_tags_get(config, vm): - """ - Get PVC tags of a VM - - API endpoint: GET /vm/{vm}/tags - API arguments: - API schema: {{"name": "{name}", "type": "{type}"},...} - """ - - response = call_api(config, "get", "/vm/{vm}/tags".format(vm=vm)) - - if response.status_code == 200: - retstatus = True - retdata = response.json() - else: - retstatus = False - retdata = response.json().get("message", "") - - return retstatus, retdata - - -def vm_tag_set(config, vm, action, tag, protected=False): - """ - Modify PVC tags of a VM - - API endpoint: POST /vm/{vm}/tags - API arguments: action={action}, tag={tag}, protected={protected} - API schema: {"message":"{data}"} - """ - - params = {"action": action, "tag": tag, "protected": protected} - - # Update the tags - response = call_api(config, "post", "/vm/{vm}/tags".format(vm=vm), params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def format_vm_tags(config, name, tags): - """ - Format the output of a tags dictionary in a nice table - """ - if len(tags) < 1: - return "No tags found." - - output_list = [] - - name_length = 5 - _name_length = len(name) + 1 - if _name_length > name_length: - name_length = _name_length - - tags_name_length = 4 - tags_type_length = 5 - tags_protected_length = 10 - for tag in tags: - _tags_name_length = len(tag["name"]) + 1 - if _tags_name_length > tags_name_length: - tags_name_length = _tags_name_length - - _tags_type_length = len(tag["type"]) + 1 - if _tags_type_length > tags_type_length: - tags_type_length = _tags_type_length - - _tags_protected_length = len(str(tag["protected"])) + 1 - if _tags_protected_length > tags_protected_length: - tags_protected_length = _tags_protected_length - - output_list.append( - "{bold}{tags_name: <{tags_name_length}} \ -{tags_type: <{tags_type_length}} \ -{tags_protected: <{tags_protected_length}}{end_bold}".format( - tags_name_length=tags_name_length, - tags_type_length=tags_type_length, - tags_protected_length=tags_protected_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - tags_name="Name", - tags_type="Type", - tags_protected="Protected", - ) - ) - - for tag in sorted(tags, key=lambda t: t["name"]): - output_list.append( - "{bold}{tags_name: <{tags_name_length}} \ -{tags_type: <{tags_type_length}} \ -{tags_protected: <{tags_protected_length}}{end_bold}".format( - tags_type_length=tags_type_length, - tags_name_length=tags_name_length, - tags_protected_length=tags_protected_length, - bold="", - end_bold="", - tags_name=tag["name"], - tags_type=tag["type"], - tags_protected=str(tag["protected"]), - ) - ) - - return "\n".join(output_list) - - -def vm_remove(config, vm, delete_disks=False): - """ - Remove a VM - - API endpoint: DELETE /vm/{vm} - API arguments: delete_disks={delete_disks} - API schema: {"message":"{data}"} - """ - params = {"delete_disks": delete_disks} - response = call_api(config, "delete", "/vm/{vm}".format(vm=vm), params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def vm_state(config, vm, target_state, force=False, wait=False): - """ - Modify the current state of VM - - API endpoint: POST /vm/{vm}/state - API arguments: state={state}, wait={wait} - API schema: {"message":"{data}"} - """ - params = { - "state": target_state, - "force": str(force).lower(), - "wait": str(wait).lower(), - } - response = call_api(config, "post", "/vm/{vm}/state".format(vm=vm), params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def vm_node(config, vm, target_node, action, force=False, wait=False, force_live=False): - """ - Modify the current node of VM via {action} - - API endpoint: POST /vm/{vm}/node - API arguments: node={target_node}, action={action}, force={force}, wait={wait}, force_live={force_live} - API schema: {"message":"{data}"} - """ - params = { - "node": target_node, - "action": action, - "force": str(force).lower(), - "wait": str(wait).lower(), - "force_live": str(force_live).lower(), - } - response = call_api(config, "post", "/vm/{vm}/node".format(vm=vm), params=params) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def vm_locks(config, vm): - """ - Flush RBD locks of (stopped) VM - - API endpoint: POST /vm/{vm}/locks - API arguments: - API schema: {"message":"{data}"} - """ - response = call_api(config, "post", "/vm/{vm}/locks".format(vm=vm)) - - if response.status_code == 200: - retstatus = True - else: - retstatus = False - - return retstatus, response.json().get("message", "") - - -def vm_vcpus_set(config, vm, vcpus, topology, restart): - """ - Set the vCPU count of the VM with topology - - Calls vm_info to get the VM XML. - - Calls vm_modify to set the VM XML. - """ - from lxml.objectify import fromstring - from lxml.etree import tostring - - status, domain_information = vm_info(config, vm) - if not status: - return status, domain_information - - xml = domain_information.get("xml", None) - if xml is None: - return False, "VM does not have a valid XML doccument." - - try: - parsed_xml = fromstring(xml) - except Exception: - return False, "ERROR: Failed to parse XML data." - - parsed_xml.vcpu._setText(str(vcpus)) - parsed_xml.cpu.topology.set("sockets", str(topology[0])) - parsed_xml.cpu.topology.set("cores", str(topology[1])) - parsed_xml.cpu.topology.set("threads", str(topology[2])) - - try: - new_xml = tostring(parsed_xml, pretty_print=True) - except Exception: - return False, "ERROR: Failed to dump XML data." - - return vm_modify(config, vm, new_xml, restart) - - -def vm_vcpus_get(config, vm): - """ - Get the vCPU count of the VM - - Calls vm_info to get VM XML. - - Returns a tuple of (vcpus, (sockets, cores, threads)) - """ - from lxml.objectify import fromstring - - status, domain_information = vm_info(config, vm) - if not status: - return status, domain_information - - xml = domain_information.get("xml", None) - if xml is None: - return False, "VM does not have a valid XML doccument." - - try: - parsed_xml = fromstring(xml) - except Exception: - return False, "ERROR: Failed to parse XML data." - - vm_vcpus = int(parsed_xml.vcpu.text) - vm_sockets = parsed_xml.cpu.topology.attrib.get("sockets") - vm_cores = parsed_xml.cpu.topology.attrib.get("cores") - vm_threads = parsed_xml.cpu.topology.attrib.get("threads") - - return True, (vm_vcpus, (vm_sockets, vm_cores, vm_threads)) - - -def format_vm_vcpus(config, name, vcpus): - """ - Format the output of a vCPU value in a nice table - """ - output_list = [] - - name_length = 5 - _name_length = len(name) + 1 - if _name_length > name_length: - name_length = _name_length - - vcpus_length = 6 - sockets_length = 8 - cores_length = 6 - threads_length = 8 - - output_list.append( - "{bold}{name: <{name_length}} \ -{vcpus: <{vcpus_length}} \ -{sockets: <{sockets_length}} \ -{cores: <{cores_length}} \ -{threads: <{threads_length}}{end_bold}".format( - name_length=name_length, - vcpus_length=vcpus_length, - sockets_length=sockets_length, - cores_length=cores_length, - threads_length=threads_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - name="Name", - vcpus="vCPUs", - sockets="Sockets", - cores="Cores", - threads="Threads", - ) - ) - output_list.append( - "{bold}{name: <{name_length}} \ -{vcpus: <{vcpus_length}} \ -{sockets: <{sockets_length}} \ -{cores: <{cores_length}} \ -{threads: <{threads_length}}{end_bold}".format( - name_length=name_length, - vcpus_length=vcpus_length, - sockets_length=sockets_length, - cores_length=cores_length, - threads_length=threads_length, - bold="", - end_bold="", - name=name, - vcpus=vcpus[0], - sockets=vcpus[1][0], - cores=vcpus[1][1], - threads=vcpus[1][2], - ) - ) - return "\n".join(output_list) - - -def vm_memory_set(config, vm, memory, restart): - """ - Set the provisioned memory of the VM with topology - - Calls vm_info to get the VM XML. - - Calls vm_modify to set the VM XML. - """ - from lxml.objectify import fromstring - from lxml.etree import tostring - - status, domain_information = vm_info(config, vm) - if not status: - return status, domain_information - - xml = domain_information.get("xml", None) - if xml is None: - return False, "VM does not have a valid XML doccument." - - try: - parsed_xml = fromstring(xml) - except Exception: - return False, "ERROR: Failed to parse XML data." - - parsed_xml.memory._setText(str(memory)) - - try: - new_xml = tostring(parsed_xml, pretty_print=True) - except Exception: - return False, "ERROR: Failed to dump XML data." - - return vm_modify(config, vm, new_xml, restart) - - -def vm_memory_get(config, vm): - """ - Get the provisioned memory of the VM - - Calls vm_info to get VM XML. - - Returns an integer memory value. - """ - from lxml.objectify import fromstring - - status, domain_information = vm_info(config, vm) - if not status: - return status, domain_information - - xml = domain_information.get("xml", None) - if xml is None: - return False, "VM does not have a valid XML doccument." - - try: - parsed_xml = fromstring(xml) - except Exception: - return False, "ERROR: Failed to parse XML data." - - vm_memory = int(parsed_xml.memory.text) - - return True, vm_memory - - -def format_vm_memory(config, name, memory): - """ - Format the output of a memory value in a nice table - """ - output_list = [] - - name_length = 5 - _name_length = len(name) + 1 - if _name_length > name_length: - name_length = _name_length - - memory_length = 6 - - output_list.append( - "{bold}{name: <{name_length}} \ -{memory: <{memory_length}}{end_bold}".format( - name_length=name_length, - memory_length=memory_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - name="Name", - memory="RAM (M)", - ) - ) - output_list.append( - "{bold}{name: <{name_length}} \ -{memory: <{memory_length}}{end_bold}".format( - name_length=name_length, - memory_length=memory_length, - bold="", - end_bold="", - name=name, - memory=memory, - ) - ) - return "\n".join(output_list) - - -def vm_networks_add( - config, vm, network, macaddr, model, sriov, sriov_mode, live, restart -): - """ - Add a new network to the VM - - Calls vm_info to get the VM XML. - - Calls vm_modify to set the VM XML. - - Calls vm_device_attach if live to hot-attach the device. - """ - from lxml.objectify import fromstring - from lxml.etree import tostring - from random import randint - import pvc.lib.network as pvc_network - - network_exists, _ = pvc_network.net_info(config, network) - if not network_exists: - return False, "Network {} not found on the cluster.".format(network) - - status, domain_information = vm_info(config, vm) - if not status: - return status, domain_information - - xml = domain_information.get("xml", None) - if xml is None: - return False, "VM does not have a valid XML doccument." - - try: - parsed_xml = fromstring(xml) - except Exception: - return False, "ERROR: Failed to parse XML data." - - if macaddr is None: - mac_prefix = "52:54:00" - random_octet_A = "{:x}".format(randint(16, 238)) - random_octet_B = "{:x}".format(randint(16, 238)) - random_octet_C = "{:x}".format(randint(16, 238)) - macaddr = "{prefix}:{octetA}:{octetB}:{octetC}".format( - prefix=mac_prefix, - octetA=random_octet_A, - octetB=random_octet_B, - octetC=random_octet_C, - ) - - # Add an SR-IOV network - if sriov: - valid, sriov_vf_information = pvc_network.net_sriov_vf_info( - config, domain_information["node"], network - ) - if not valid: - return ( - False, - 'Specified SR-IOV VF "{}" does not exist on VM node "{}".'.format( - network, domain_information["node"] - ), - ) - - # Add a hostdev (direct PCIe) SR-IOV network - if sriov_mode == "hostdev": - bus_address = 'domain="0x{pci_domain}" bus="0x{pci_bus}" slot="0x{pci_slot}" function="0x{pci_function}"'.format( - pci_domain=sriov_vf_information["pci"]["domain"], - pci_bus=sriov_vf_information["pci"]["bus"], - pci_slot=sriov_vf_information["pci"]["slot"], - pci_function=sriov_vf_information["pci"]["function"], - ) - device_string = '
{network}'.format( - macaddr=macaddr, bus_address=bus_address, network=network - ) - # Add a macvtap SR-IOV network - elif sriov_mode == "macvtap": - device_string = ''.format( - macaddr=macaddr, network=network, model=model - ) - else: - return False, "ERROR: Invalid SR-IOV mode specified." - # Add a normal bridged PVC network - else: - # Set the bridge prefix - if network in ["upstream", "cluster", "storage"]: - br_prefix = "br" - else: - br_prefix = "vmbr" - - device_string = ''.format( - macaddr=macaddr, bridge="{}{}".format(br_prefix, network), model=model - ) - - device_xml = fromstring(device_string) - - all_interfaces = parsed_xml.devices.find("interface") - if all_interfaces is None: - all_interfaces = [] - for interface in all_interfaces: - if sriov: - if sriov_mode == "hostdev": - if interface.attrib.get("type") == "hostdev": - interface_address = 'domain="{pci_domain}" bus="{pci_bus}" slot="{pci_slot}" function="{pci_function}"'.format( - pci_domain=interface.source.address.attrib.get("domain"), - pci_bus=interface.source.address.attrib.get("bus"), - pci_slot=interface.source.address.attrib.get("slot"), - pci_function=interface.source.address.attrib.get("function"), - ) - if interface_address == bus_address: - return ( - False, - 'SR-IOV device "{}" is already configured for VM "{}".'.format( - network, vm - ), - ) - elif sriov_mode == "macvtap": - if interface.attrib.get("type") == "direct": - interface_dev = interface.source.attrib.get("dev") - if interface_dev == network: - return ( - False, - 'SR-IOV device "{}" is already configured for VM "{}".'.format( - network, vm - ), - ) - - # Add the interface at the end of the list (or, right above emulator) - if len(all_interfaces) > 0: - for idx, interface in enumerate(parsed_xml.devices.find("interface")): - if idx == len(all_interfaces) - 1: - interface.addnext(device_xml) - else: - parsed_xml.devices.find("emulator").addprevious(device_xml) - - try: - new_xml = tostring(parsed_xml, pretty_print=True) - except Exception: - return False, "ERROR: Failed to dump XML data." - - modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart) - - if not modify_retcode: - return modify_retcode, modify_retmsg - - if live: - attach_retcode, attach_retmsg = vm_device_attach(config, vm, device_string) - - if not attach_retcode: - retcode = attach_retcode - retmsg = attach_retmsg - else: - retcode = attach_retcode - retmsg = "Network '{}' successfully added to VM config and hot attached to running VM.".format( - network - ) - else: - retcode = modify_retcode - retmsg = modify_retmsg - - return retcode, retmsg - - -def vm_networks_remove(config, vm, network, macaddr, sriov, live, restart): - """ - Remove a network from the VM, optionally by MAC - - Calls vm_info to get the VM XML. - - Calls vm_modify to set the VM XML. - - Calls vm_device_detach to hot-remove the device. - """ - from lxml.objectify import fromstring - from lxml.etree import tostring - - if network is None and macaddr is None: - return False, "A network or MAC address must be specified for removal." - - status, domain_information = vm_info(config, vm) - if not status: - return status, domain_information - - xml = domain_information.get("xml", None) - if xml is None: - return False, "VM does not have a valid XML doccument." - - try: - parsed_xml = fromstring(xml) - except Exception: - return False, "ERROR: Failed to parse XML data." - - changed = False - device_string = None - for interface in parsed_xml.devices.find("interface"): - if sriov: - if interface.attrib.get("type") == "hostdev": - if_dev = str(interface.sriov_device) - if macaddr is None and network == if_dev: - interface.getparent().remove(interface) - changed = True - elif macaddr is not None and macaddr == interface.mac.attrib.get( - "address" - ): - interface.getparent().remove(interface) - changed = True - elif interface.attrib.get("type") == "direct": - if_dev = str(interface.source.attrib.get("dev")) - if macaddr is None and network == if_dev: - interface.getparent().remove(interface) - changed = True - elif macaddr is not None and macaddr == interface.mac.attrib.get( - "address" - ): - interface.getparent().remove(interface) - changed = True - else: - if_vni = re.match( - r"[vm]*br([0-9a-z]+)", interface.source.attrib.get("bridge") - ).group(1) - if macaddr is None and network == if_vni: - interface.getparent().remove(interface) - changed = True - elif macaddr is not None and macaddr == interface.mac.attrib.get("address"): - interface.getparent().remove(interface) - changed = True - if changed: - device_string = tostring(interface) - - if changed: - try: - new_xml = tostring(parsed_xml, pretty_print=True) - except Exception: - return False, "ERROR: Failed to dump XML data." - elif not changed and macaddr is not None: - return False, 'ERROR: Interface with MAC "{}" does not exist on VM.'.format( - macaddr - ) - elif not changed and network is not None: - return False, 'ERROR: Network "{}" does not exist on VM.'.format(network) - else: - return False, "ERROR: Unspecified error finding interface to remove." - - modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart) - - if not modify_retcode: - return modify_retcode, modify_retmsg - - if live and device_string: - detach_retcode, detach_retmsg = vm_device_detach(config, vm, device_string) - - if not detach_retcode: - retcode = detach_retcode - retmsg = detach_retmsg - else: - retcode = detach_retcode - retmsg = "Network '{}' successfully removed from VM config and hot detached from running VM.".format( - network - ) - else: - retcode = modify_retcode - retmsg = modify_retmsg - - return retcode, retmsg - - -def vm_networks_get(config, vm): - """ - Get the networks of the VM - - Calls vm_info to get VM XML. - - Returns a list of tuples of (network_vni, mac_address, model) - """ - from lxml.objectify import fromstring - - status, domain_information = vm_info(config, vm) - if not status: - return status, domain_information - - xml = domain_information.get("xml", None) - if xml is None: - return False, "VM does not have a valid XML doccument." - - try: - parsed_xml = fromstring(xml) - except Exception: - return False, "ERROR: Failed to parse XML data." - - network_data = list() - for interface in parsed_xml.devices.find("interface"): - mac_address = interface.mac.attrib.get("address") - model = interface.model.attrib.get("type") - interface_type = interface.attrib.get("type") - if interface_type == "bridge": - network = re.search( - r"[vm]*br([0-9a-z]+)", interface.source.attrib.get("bridge") - ).group(1) - elif interface_type == "direct": - network = "macvtap:{}".format(interface.source.attrib.get("dev")) - elif interface_type == "hostdev": - network = "hostdev:{}".format(interface.source.attrib.get("dev")) - - network_data.append((network, mac_address, model)) - - return True, network_data - - -def format_vm_networks(config, name, networks): - """ - Format the output of a network list in a nice table - """ - output_list = [] - - name_length = 5 - vni_length = 8 - macaddr_length = 12 - model_length = 6 - - _name_length = len(name) + 1 - if _name_length > name_length: - name_length = _name_length - - for network in networks: - _vni_length = len(network[0]) + 1 - if _vni_length > vni_length: - vni_length = _vni_length - - _macaddr_length = len(network[1]) + 1 - if _macaddr_length > macaddr_length: - macaddr_length = _macaddr_length - - _model_length = len(network[2]) + 1 - if _model_length > model_length: - model_length = _model_length - - output_list.append( - "{bold}{name: <{name_length}} \ -{vni: <{vni_length}} \ -{macaddr: <{macaddr_length}} \ -{model: <{model_length}}{end_bold}".format( - name_length=name_length, - vni_length=vni_length, - macaddr_length=macaddr_length, - model_length=model_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - name="Name", - vni="Network", - macaddr="MAC Address", - model="Model", - ) - ) - count = 0 - for network in networks: - if count > 0: - name = "" - count += 1 - output_list.append( - "{bold}{name: <{name_length}} \ -{vni: <{vni_length}} \ -{macaddr: <{macaddr_length}} \ -{model: <{model_length}}{end_bold}".format( - name_length=name_length, - vni_length=vni_length, - macaddr_length=macaddr_length, - model_length=model_length, - bold="", - end_bold="", - name=name, - vni=network[0], - macaddr=network[1], - model=network[2], - ) - ) - return "\n".join(output_list) - - -def vm_volumes_add(config, vm, volume, disk_id, bus, disk_type, live, restart): - """ - Add a new volume to the VM - - Calls vm_info to get the VM XML. - - Calls vm_modify to set the VM XML. - """ - from lxml.objectify import fromstring - from lxml.etree import tostring - from copy import deepcopy - import pvc.lib.ceph as pvc_ceph - - if disk_type == "rbd": - # Verify that the provided volume is valid - vpool = volume.split("/")[0] - vname = volume.split("/")[1] - retcode, retdata = pvc_ceph.ceph_volume_info(config, vpool, vname) - if not retcode: - return False, "Volume {} is not present in the cluster.".format(volume) - - status, domain_information = vm_info(config, vm) - if not status: - return status, domain_information - - xml = domain_information.get("xml", None) - if xml is None: - return False, "VM does not have a valid XML doccument." - - try: - parsed_xml = fromstring(xml) - except Exception: - return False, "ERROR: Failed to parse XML data." - - last_disk = None - id_list = list() - all_disks = parsed_xml.devices.find("disk") - if all_disks is None: - all_disks = [] - for disk in all_disks: - id_list.append(disk.target.attrib.get("dev")) - if disk.source.attrib.get("protocol") == disk_type: - if disk_type == "rbd": - last_disk = disk.source.attrib.get("name") - elif disk_type == "file": - last_disk = disk.source.attrib.get("file") - if last_disk == volume: - return False, "Volume {} is already configured for VM {}.".format( - volume, vm - ) - last_disk_details = deepcopy(disk) - - if disk_id is not None: - if disk_id in id_list: - return ( - False, - "Manually specified disk ID {} is already in use for VM {}.".format( - disk_id, vm - ), - ) - else: - # Find the next free disk ID - first_dev_prefix = id_list[0][0:-1] - - for char in range(ord("a"), ord("z")): - char = chr(char) - next_id = "{}{}".format(first_dev_prefix, char) - if next_id not in id_list: - break - else: - next_id = None - if next_id is None: - return ( - False, - "Failed to find a valid disk_id and none specified; too many disks for VM {}?".format( - vm - ), - ) - disk_id = next_id - - if last_disk is None: - if disk_type == "rbd": - # RBD volumes need an example to be based on - return ( - False, - "There are no existing RBD volumes attached to this VM. Autoconfiguration failed; use the 'vm modify' command to manually configure this volume with the required details for authentication, hosts, etc..", - ) - elif disk_type == "file": - # File types can be added ad-hoc - disk_template = ''.format( - source=volume, dev=disk_id, bus=bus - ) - last_disk_details = fromstring(disk_template) - - new_disk_details = last_disk_details - new_disk_details.target.set("dev", disk_id) - new_disk_details.target.set("bus", bus) - if disk_type == "rbd": - new_disk_details.source.set("name", volume) - elif disk_type == "file": - new_disk_details.source.set("file", volume) - device_xml = new_disk_details - - all_disks = parsed_xml.devices.find("disk") - if all_disks is None: - all_disks = [] - for disk in all_disks: - last_disk = disk - - # Add the disk at the end of the list (or, right above emulator) - if len(all_disks) > 0: - for idx, disk in enumerate(parsed_xml.devices.find("disk")): - if idx == len(all_disks) - 1: - disk.addnext(device_xml) - else: - parsed_xml.devices.find("emulator").addprevious(device_xml) - - try: - new_xml = tostring(parsed_xml, pretty_print=True) - except Exception: - return False, "ERROR: Failed to dump XML data." - - modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart) - - if not modify_retcode: - return modify_retcode, modify_retmsg - - if live: - device_string = tostring(device_xml) - attach_retcode, attach_retmsg = vm_device_attach(config, vm, device_string) - - if not attach_retcode: - retcode = attach_retcode - retmsg = attach_retmsg - else: - retcode = attach_retcode - retmsg = "Volume '{}/{}' successfully added to VM config and hot attached to running VM.".format( - vpool, vname - ) - else: - retcode = modify_retcode - retmsg = modify_retmsg - - return retcode, retmsg - - -def vm_volumes_remove(config, vm, volume, live, restart): - """ - Remove a volume to the VM - - Calls vm_info to get the VM XML. - - Calls vm_modify to set the VM XML. - """ - from lxml.objectify import fromstring - from lxml.etree import tostring - - status, domain_information = vm_info(config, vm) - if not status: - return status, domain_information - - xml = domain_information.get("xml", None) - if xml is None: - return False, "VM does not have a valid XML document." - - try: - parsed_xml = fromstring(xml) - except Exception: - return False, "ERROR: Failed to parse XML data." - - changed = False - device_string = None - for disk in parsed_xml.devices.find("disk"): - disk_name = disk.source.attrib.get("name") - if not disk_name: - disk_name = disk.source.attrib.get("file") - if volume == disk_name: - device_string = tostring(disk) - disk.getparent().remove(disk) - changed = True - - if changed: - try: - new_xml = tostring(parsed_xml, pretty_print=True) - except Exception: - return False, "ERROR: Failed to dump XML data." - else: - return False, 'ERROR: Volume "{}" does not exist on VM.'.format(volume) - - modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart) - - if not modify_retcode: - return modify_retcode, modify_retmsg - - if live and device_string: - detach_retcode, detach_retmsg = vm_device_detach(config, vm, device_string) - - if not detach_retcode: - retcode = detach_retcode - retmsg = detach_retmsg - else: - retcode = detach_retcode - retmsg = "Volume '{}' successfully removed from VM config and hot detached from running VM.".format( - volume - ) - else: - retcode = modify_retcode - retmsg = modify_retmsg - - return retcode, retmsg - - -def vm_volumes_get(config, vm): - """ - Get the volumes of the VM - - Calls vm_info to get VM XML. - - Returns a list of tuples of (volume, disk_id, type, bus) - """ - from lxml.objectify import fromstring - - status, domain_information = vm_info(config, vm) - if not status: - return status, domain_information - - xml = domain_information.get("xml", None) - if xml is None: - return False, "VM does not have a valid XML doccument." - - try: - parsed_xml = fromstring(xml) - except Exception: - return False, "ERROR: Failed to parse XML data." - - volume_data = list() - for disk in parsed_xml.devices.find("disk"): - protocol = disk.attrib.get("type") - disk_id = disk.target.attrib.get("dev") - bus = disk.target.attrib.get("bus") - if protocol == "network": - protocol = disk.source.attrib.get("protocol") - source = disk.source.attrib.get("name") - elif protocol == "file": - protocol = "file" - source = disk.source.attrib.get("file") - else: - protocol = "unknown" - source = "unknown" - - volume_data.append((source, disk_id, protocol, bus)) - - return True, volume_data - - -def format_vm_volumes(config, name, volumes): - """ - Format the output of a volume value in a nice table - """ - output_list = [] - - name_length = 5 - volume_length = 7 - disk_id_length = 4 - protocol_length = 5 - bus_length = 4 - - _name_length = len(name) + 1 - if _name_length > name_length: - name_length = _name_length - - for volume in volumes: - _volume_length = len(volume[0]) + 1 - if _volume_length > volume_length: - volume_length = _volume_length - - _disk_id_length = len(volume[1]) + 1 - if _disk_id_length > disk_id_length: - disk_id_length = _disk_id_length - - _protocol_length = len(volume[2]) + 1 - if _protocol_length > protocol_length: - protocol_length = _protocol_length - - _bus_length = len(volume[3]) + 1 - if _bus_length > bus_length: - bus_length = _bus_length - - output_list.append( - "{bold}{name: <{name_length}} \ -{volume: <{volume_length}} \ -{disk_id: <{disk_id_length}} \ -{protocol: <{protocol_length}} \ -{bus: <{bus_length}}{end_bold}".format( - name_length=name_length, - volume_length=volume_length, - disk_id_length=disk_id_length, - protocol_length=protocol_length, - bus_length=bus_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - name="Name", - volume="Volume", - disk_id="Dev", - protocol="Type", - bus="Bus", - ) - ) - count = 0 - for volume in volumes: - if count > 0: - name = "" - count += 1 - output_list.append( - "{bold}{name: <{name_length}} \ -{volume: <{volume_length}} \ -{disk_id: <{disk_id_length}} \ -{protocol: <{protocol_length}} \ -{bus: <{bus_length}}{end_bold}".format( - name_length=name_length, - volume_length=volume_length, - disk_id_length=disk_id_length, - protocol_length=protocol_length, - bus_length=bus_length, - bold="", - end_bold="", - name=name, - volume=volume[0], - disk_id=volume[1], - protocol=volume[2], - bus=volume[3], - ) - ) - return "\n".join(output_list) - - -def view_console_log(config, vm, lines=100): - """ - Return console log lines from the API (and display them in a pager in the main CLI) - - API endpoint: GET /vm/{vm}/console - API arguments: lines={lines} - API schema: {"name":"{vmname}","data":"{console_log}"} - """ - params = {"lines": lines} - response = call_api(config, "get", "/vm/{vm}/console".format(vm=vm), params=params) - - if response.status_code != 200: - return False, response.json().get("message", "") - - console_log = response.json()["data"] - - # Shrink the log buffer to length lines - shrunk_log = console_log.split("\n")[-lines:] - loglines = "\n".join(shrunk_log) - - return True, loglines - - -def follow_console_log(config, vm, lines=10): - """ - Return and follow console log lines from the API - - API endpoint: GET /vm/{vm}/console - API arguments: lines={lines} - API schema: {"name":"{vmname}","data":"{console_log}"} - """ - # We always grab 200 to match the follow call, but only _show_ `lines` number - params = {"lines": 200} - response = call_api(config, "get", "/vm/{vm}/console".format(vm=vm), params=params) - - if response.status_code != 200: - return False, response.json().get("message", "") - - # Shrink the log buffer to length lines - console_log = response.json()["data"] - shrunk_log = console_log.split("\n")[-int(lines) :] - loglines = "\n".join(shrunk_log) - - # Print the initial data and begin following - print(loglines, end="") - - while True: - # Grab the next line set (200 is a reasonable number of lines per half-second; any more are skipped) - try: - params = {"lines": 200} - response = call_api( - config, "get", "/vm/{vm}/console".format(vm=vm), params=params - ) - new_console_log = response.json()["data"] - except Exception: - break - # Split the new and old log strings into constitutent lines - old_console_loglines = console_log.split("\n") - new_console_loglines = new_console_log.split("\n") - - # Set the console log to the new log value for the next iteration - console_log = new_console_log - - # Remove the lines from the old log until we hit the first line of the new log; this - # ensures that the old log is a string that we can remove from the new log entirely - for index, line in enumerate(old_console_loglines, start=0): - if line == new_console_loglines[0]: - del old_console_loglines[0:index] - break - # Rejoin the log lines into strings - old_console_log = "\n".join(old_console_loglines) - new_console_log = "\n".join(new_console_loglines) - # Remove the old lines from the new log - diff_console_log = new_console_log.replace(old_console_log, "") - # If there's a difference, print it out - if diff_console_log: - print(diff_console_log, end="") - # Wait half a second - time.sleep(0.5) - - return True, "" - - -# -# Output display functions -# -def format_info(config, domain_information, long_output): - # Format a nice output; do this line-by-line then concat the elements at the end - ainformation = [] - ainformation.append( - "{}Virtual machine information:{}".format(ansiprint.bold(), ansiprint.end()) - ) - ainformation.append("") - # Basic information - ainformation.append( - "{}Name:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["name"] - ) - ) - ainformation.append( - "{}UUID:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["uuid"] - ) - ) - ainformation.append( - "{}Description:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["description"] - ) - ) - ainformation.append( - "{}Profile:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["profile"] - ) - ) - ainformation.append( - "{}Memory (M):{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["memory"] - ) - ) - ainformation.append( - "{}vCPUs:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["vcpu"] - ) - ) - ainformation.append( - "{}Topology (S/C/T):{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["vcpu_topology"] - ) - ) - - if ( - domain_information["vnc"].get("listen", "None") != "None" - and domain_information["vnc"].get("port", "None") != "None" - ): - ainformation.append("") - ainformation.append( - "{}VNC listen:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["vnc"]["listen"] - ) - ) - ainformation.append( - "{}VNC port:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["vnc"]["port"] - ) - ) - - if long_output is True: - # Virtualization information - ainformation.append("") - ainformation.append( - "{}Emulator:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["emulator"] - ) - ) - ainformation.append( - "{}Type:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["type"] - ) - ) - ainformation.append( - "{}Arch:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["arch"] - ) - ) - ainformation.append( - "{}Machine:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["machine"] - ) - ) - ainformation.append( - "{}Features:{} {}".format( - ansiprint.purple(), - ansiprint.end(), - " ".join(domain_information["features"]), - ) - ) - ainformation.append("") - ainformation.append( - "{0}Memory stats:{1} {2}Swap In Swap Out Faults (maj/min) Available Usable Unused RSS{3}".format( - ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end() - ) - ) - ainformation.append( - " {0: <7} {1: <8} {2: <16} {3: <10} {4: <7} {5: <7} {6: <10}".format( - format_metric(domain_information["memory_stats"].get("swap_in", 0)), - format_metric(domain_information["memory_stats"].get("swap_out", 0)), - "/".join( - [ - format_metric( - domain_information["memory_stats"].get("major_fault", 0) - ), - format_metric( - domain_information["memory_stats"].get("minor_fault", 0) - ), - ] - ), - format_bytes( - domain_information["memory_stats"].get("available", 0) * 1024 - ), - format_bytes( - domain_information["memory_stats"].get("usable", 0) * 1024 - ), - format_bytes( - domain_information["memory_stats"].get("unused", 0) * 1024 - ), - format_bytes(domain_information["memory_stats"].get("rss", 0) * 1024), - ) - ) - ainformation.append("") - ainformation.append( - "{0}vCPU stats:{1} {2}CPU time (ns) User time (ns) System time (ns){3}".format( - ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end() - ) - ) - ainformation.append( - " {0: <16} {1: <16} {2: <15}".format( - str(domain_information["vcpu_stats"].get("cpu_time", 0)), - str(domain_information["vcpu_stats"].get("user_time", 0)), - str(domain_information["vcpu_stats"].get("system_time", 0)), - ) - ) - - # PVC cluster information - ainformation.append("") - dstate_colour = { - "start": ansiprint.green(), - "restart": ansiprint.yellow(), - "shutdown": ansiprint.yellow(), - "stop": ansiprint.red(), - "disable": ansiprint.blue(), - "fail": ansiprint.red(), - "migrate": ansiprint.blue(), - "unmigrate": ansiprint.blue(), - "provision": ansiprint.blue(), - } - ainformation.append( - "{}State:{} {}{}{}".format( - ansiprint.purple(), - ansiprint.end(), - dstate_colour[domain_information["state"]], - domain_information["state"], - ansiprint.end(), - ) - ) - ainformation.append( - "{}Current Node:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["node"] - ) - ) - if not domain_information["last_node"]: - domain_information["last_node"] = "N/A" - ainformation.append( - "{}Previous Node:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["last_node"] - ) - ) - - # Get a failure reason if applicable - if domain_information["failed_reason"]: - ainformation.append("") - ainformation.append( - "{}Failure reason:{} {}".format( - ansiprint.purple(), ansiprint.end(), domain_information["failed_reason"] - ) - ) - - if not domain_information.get("node_selector"): - formatted_node_selector = "False" - else: - formatted_node_selector = domain_information["node_selector"] - - if not domain_information.get("node_limit"): - formatted_node_limit = "False" - else: - formatted_node_limit = ", ".join(domain_information["node_limit"]) - - if not domain_information.get("node_autostart"): - formatted_node_autostart = "False" - else: - formatted_node_autostart = domain_information["node_autostart"] - - if not domain_information.get("migration_method"): - formatted_migration_method = "any" - else: - formatted_migration_method = domain_information["migration_method"] - - ainformation.append( - "{}Migration selector:{} {}".format( - ansiprint.purple(), ansiprint.end(), formatted_node_selector - ) - ) - ainformation.append( - "{}Node limit:{} {}".format( - ansiprint.purple(), ansiprint.end(), formatted_node_limit - ) - ) - ainformation.append( - "{}Autostart:{} {}".format( - ansiprint.purple(), ansiprint.end(), formatted_node_autostart - ) - ) - ainformation.append( - "{}Migration Method:{} {}".format( - ansiprint.purple(), ansiprint.end(), formatted_migration_method - ) - ) - - # Tag list - tags_name_length = 5 - tags_type_length = 5 - tags_protected_length = 10 - for tag in domain_information["tags"]: - _tags_name_length = len(tag["name"]) + 1 - if _tags_name_length > tags_name_length: - tags_name_length = _tags_name_length - - _tags_type_length = len(tag["type"]) + 1 - if _tags_type_length > tags_type_length: - tags_type_length = _tags_type_length - - _tags_protected_length = len(str(tag["protected"])) + 1 - if _tags_protected_length > tags_protected_length: - tags_protected_length = _tags_protected_length - - if len(domain_information["tags"]) > 0: - ainformation.append("") - ainformation.append( - "{purple}Tags:{end} {bold}{tags_name: <{tags_name_length}} {tags_type: <{tags_type_length}} {tags_protected: <{tags_protected_length}}{end}".format( - purple=ansiprint.purple(), - bold=ansiprint.bold(), - end=ansiprint.end(), - tags_name_length=tags_name_length, - tags_type_length=tags_type_length, - tags_protected_length=tags_protected_length, - tags_name="Name", - tags_type="Type", - tags_protected="Protected", - ) - ) - - for tag in sorted( - domain_information["tags"], key=lambda t: t["type"] + t["name"] - ): - ainformation.append( - " {tags_name: <{tags_name_length}} {tags_type: <{tags_type_length}} {tags_protected: <{tags_protected_length}}".format( - tags_name_length=tags_name_length, - tags_type_length=tags_type_length, - tags_protected_length=tags_protected_length, - tags_name=tag["name"], - tags_type=tag["type"], - tags_protected=str(tag["protected"]), - ) - ) - else: - ainformation.append("") - ainformation.append( - "{purple}Tags:{end} N/A".format( - purple=ansiprint.purple(), - end=ansiprint.end(), - ) - ) - - # Network list - net_list = [] - cluster_net_list = call_api(config, "get", "/network").json() - for net in domain_information["networks"]: - net_vni = net["vni"] - if ( - net_vni not in ["cluster", "storage", "upstream"] - and not re.match(r"^macvtap:.*", net_vni) - and not re.match(r"^hostdev:.*", net_vni) - ): - if int(net_vni) not in [net["vni"] for net in cluster_net_list]: - net_list.append( - ansiprint.red() + net_vni + ansiprint.end() + " [invalid]" - ) - else: - net_list.append(net_vni) - else: - net_list.append(net_vni) - - ainformation.append("") - ainformation.append( - "{}Networks:{} {}".format( - ansiprint.purple(), ansiprint.end(), ", ".join(net_list) - ) - ) - - if long_output is True: - # Disk list - ainformation.append("") - name_length = 0 - for disk in domain_information["disks"]: - _name_length = len(disk["name"]) + 1 - if _name_length > name_length: - name_length = _name_length - ainformation.append( - "{0}Disks:{1} {2}ID Type {3: <{width}} Dev Bus Requests (r/w) Data (r/w){4}".format( - ansiprint.purple(), - ansiprint.end(), - ansiprint.bold(), - "Name", - ansiprint.end(), - width=name_length, - ) - ) - for disk in domain_information["disks"]: - ainformation.append( - " {0: <3} {1: <5} {2: <{width}} {3: <4} {4: <5} {5: <15} {6}".format( - domain_information["disks"].index(disk), - disk["type"], - disk["name"], - disk["dev"], - disk["bus"], - "/".join( - [ - str(format_metric(disk.get("rd_req", 0))), - str(format_metric(disk.get("wr_req", 0))), - ] - ), - "/".join( - [ - str(format_bytes(disk.get("rd_bytes", 0))), - str(format_bytes(disk.get("wr_bytes", 0))), - ] - ), - width=name_length, - ) - ) - ainformation.append("") - ainformation.append( - "{}Interfaces:{} {}ID Type Source Model MAC Data (r/w) Packets (r/w) Errors (r/w){}".format( - ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end() - ) - ) - for net in domain_information["networks"]: - net_type = net["type"] - net_source = net["source"] - net_mac = net["mac"] - if net_type in ["direct", "hostdev"]: - net_model = "N/A" - net_bytes = "N/A" - net_packets = "N/A" - net_errors = "N/A" - elif net_type in ["bridge"]: - net_model = net["model"] - net_bytes = "/".join( - [ - str(format_bytes(net.get("rd_bytes", 0))), - str(format_bytes(net.get("wr_bytes", 0))), - ] - ) - net_packets = "/".join( - [ - str(format_metric(net.get("rd_packets", 0))), - str(format_metric(net.get("wr_packets", 0))), - ] - ) - net_errors = "/".join( - [ - str(format_metric(net.get("rd_errors", 0))), - str(format_metric(net.get("wr_errors", 0))), - ] - ) - - ainformation.append( - " {0: <3} {1: <8} {2: <12} {3: <8} {4: <18} {5: <12} {6: <15} {7: <12}".format( - domain_information["networks"].index(net), - net_type, - net_source, - net_model, - net_mac, - net_bytes, - net_packets, - net_errors, - ) - ) - # Controller list - ainformation.append("") - ainformation.append( - "{}Controllers:{} {}ID Type Model{}".format( - ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end() - ) - ) - for controller in domain_information["controllers"]: - ainformation.append( - " {0: <3} {1: <14} {2: <8}".format( - domain_information["controllers"].index(controller), - controller["type"], - str(controller["model"]), - ) - ) - - # Join it all together - ainformation.append("") - return "\n".join(ainformation) - - -def format_list(config, vm_list, raw): - # Function to strip the "br" off of nets and return a nicer list - def getNiceNetID(domain_information): - # Network list - net_list = [] - for net in domain_information["networks"]: - net_list.append(net["vni"]) - return net_list - - # Function to get tag names and returna nicer list - def getNiceTagName(domain_information): - # Tag list - tag_list = [] - for tag in sorted( - domain_information["tags"], key=lambda t: t["type"] + t["name"] - ): - tag_list.append(tag["name"]) - return tag_list - - # Handle raw mode since it just lists the names - if raw: - ainformation = list() - for vm in sorted(item["name"] for item in vm_list): - ainformation.append(vm) - return "\n".join(ainformation) - - vm_list_output = [] - - # Determine optimal column widths - # Dynamic columns: node_name, node, migrated - vm_name_length = 5 - vm_state_length = 6 - vm_tags_length = 5 - vm_nets_length = 9 - vm_ram_length = 8 - vm_vcpu_length = 6 - vm_node_length = 8 - vm_migrated_length = 9 - for domain_information in vm_list: - net_list = getNiceNetID(domain_information) - tag_list = getNiceTagName(domain_information) - # vm_name column - _vm_name_length = len(domain_information["name"]) + 1 - if _vm_name_length > vm_name_length: - vm_name_length = _vm_name_length - # vm_state column - _vm_state_length = len(domain_information["state"]) + 1 - if _vm_state_length > vm_state_length: - vm_state_length = _vm_state_length - # vm_tags column - _vm_tags_length = len(",".join(tag_list)) + 1 - if _vm_tags_length > vm_tags_length: - vm_tags_length = _vm_tags_length - # vm_nets column - _vm_nets_length = len(",".join(net_list)) + 1 - if _vm_nets_length > vm_nets_length: - vm_nets_length = _vm_nets_length - # vm_node column - _vm_node_length = len(domain_information["node"]) + 1 - if _vm_node_length > vm_node_length: - vm_node_length = _vm_node_length - # vm_migrated column - _vm_migrated_length = len(domain_information["migrated"]) + 1 - if _vm_migrated_length > vm_migrated_length: - vm_migrated_length = _vm_migrated_length - - # Format the string (header) - vm_list_output.append( - "{bold}{vm_header: <{vm_header_length}} {resource_header: <{resource_header_length}} {node_header: <{node_header_length}}{end_bold}".format( - vm_header_length=vm_name_length + vm_state_length + vm_tags_length + 2, - resource_header_length=vm_nets_length + vm_ram_length + vm_vcpu_length + 2, - node_header_length=vm_node_length + vm_migrated_length + 1, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - vm_header="VMs " - + "".join( - [ - "-" - for _ in range( - 4, vm_name_length + vm_state_length + vm_tags_length + 1 - ) - ] - ), - resource_header="Resources " - + "".join( - [ - "-" - for _ in range( - 10, vm_nets_length + vm_ram_length + vm_vcpu_length + 1 - ) - ] - ), - node_header="Node " - + "".join(["-" for _ in range(5, vm_node_length + vm_migrated_length)]), - ) - ) - - vm_list_output.append( - "{bold}{vm_name: <{vm_name_length}} \ -{vm_state_colour}{vm_state: <{vm_state_length}}{end_colour} \ -{vm_tags: <{vm_tags_length}} \ -{vm_networks: <{vm_nets_length}} \ -{vm_memory: <{vm_ram_length}} {vm_vcpu: <{vm_vcpu_length}} \ -{vm_node: <{vm_node_length}} \ -{vm_migrated: <{vm_migrated_length}}{end_bold}".format( - vm_name_length=vm_name_length, - vm_state_length=vm_state_length, - vm_tags_length=vm_tags_length, - vm_nets_length=vm_nets_length, - vm_ram_length=vm_ram_length, - vm_vcpu_length=vm_vcpu_length, - vm_node_length=vm_node_length, - vm_migrated_length=vm_migrated_length, - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - vm_state_colour="", - end_colour="", - vm_name="Name", - vm_state="State", - vm_tags="Tags", - vm_networks="Networks", - vm_memory="RAM (M)", - vm_vcpu="vCPUs", - vm_node="Current", - vm_migrated="Migrated", - ) - ) - - # Get a list of cluster networks for validity comparisons - cluster_net_list = call_api(config, "get", "/network").json() - - # Format the string (elements) - for domain_information in sorted(vm_list, key=lambda v: v["name"]): - if domain_information["state"] == "start": - vm_state_colour = ansiprint.green() - elif domain_information["state"] == "restart": - vm_state_colour = ansiprint.yellow() - elif domain_information["state"] == "shutdown": - vm_state_colour = ansiprint.yellow() - elif domain_information["state"] == "stop": - vm_state_colour = ansiprint.red() - elif domain_information["state"] == "fail": - vm_state_colour = ansiprint.red() - else: - vm_state_colour = ansiprint.blue() - - # Handle colouring for an invalid network config - net_list = getNiceNetID(domain_information) - tag_list = getNiceTagName(domain_information) - if len(tag_list) < 1: - tag_list = ["N/A"] - - net_invalid_list = [] - for net_vni in net_list: - if ( - net_vni not in ["cluster", "storage", "upstream"] - and not re.match(r"^macvtap:.*", net_vni) - and not re.match(r"^hostdev:.*", net_vni) - ): - if int(net_vni) not in [net["vni"] for net in cluster_net_list]: - net_invalid_list.append(True) - else: - net_invalid_list.append(False) - else: - net_invalid_list.append(False) - - net_string_list = [] - for net_idx, net_vni in enumerate(net_list): - if net_invalid_list[net_idx]: - net_string_list.append( - "{}{}{}".format( - ansiprint.red(), - net_vni, - ansiprint.end(), - ) - ) - # Fix the length due to the extra fake characters - vm_nets_length -= len(net_vni) - vm_nets_length += len(net_string_list[net_idx]) - else: - net_string_list.append(net_vni) - - vm_list_output.append( - "{bold}{vm_name: <{vm_name_length}} \ -{vm_state_colour}{vm_state: <{vm_state_length}}{end_colour} \ -{vm_tags: <{vm_tags_length}} \ -{vm_networks: <{vm_nets_length}} \ -{vm_memory: <{vm_ram_length}} {vm_vcpu: <{vm_vcpu_length}} \ -{vm_node: <{vm_node_length}} \ -{vm_migrated: <{vm_migrated_length}}{end_bold}".format( - vm_name_length=vm_name_length, - vm_state_length=vm_state_length, - vm_tags_length=vm_tags_length, - vm_nets_length=vm_nets_length, - vm_ram_length=vm_ram_length, - vm_vcpu_length=vm_vcpu_length, - vm_node_length=vm_node_length, - vm_migrated_length=vm_migrated_length, - bold="", - end_bold="", - vm_state_colour=vm_state_colour, - end_colour=ansiprint.end(), - vm_name=domain_information["name"], - vm_state=domain_information["state"], - vm_tags=",".join(tag_list), - vm_networks=",".join(net_string_list), - vm_memory=domain_information["memory"], - vm_vcpu=domain_information["vcpu"], - vm_node=domain_information["node"], - vm_migrated=domain_information["migrated"], - ) - ) - - return "\n".join(vm_list_output) diff --git a/client-cli-old/pvc/lib/zkhandler.py b/client-cli-old/pvc/lib/zkhandler.py deleted file mode 100644 index b1437ce2..00000000 --- a/client-cli-old/pvc/lib/zkhandler.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python3 - -# zkhandler.py - Secure versioned ZooKeeper updates -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -import uuid - - -# Exists function -def exists(zk_conn, key): - stat = zk_conn.exists(key) - if stat: - return True - else: - return False - - -# Child list function -def listchildren(zk_conn, key): - children = zk_conn.get_children(key) - return children - - -# Delete key function -def deletekey(zk_conn, key, recursive=True): - zk_conn.delete(key, recursive=recursive) - - -# Data read function -def readdata(zk_conn, key): - data_raw = zk_conn.get(key) - data = data_raw[0].decode("utf8") - return data - - -# Data write function -def writedata(zk_conn, kv): - # Start up a transaction - zk_transaction = zk_conn.transaction() - - # Proceed one KV pair at a time - for key in sorted(kv): - data = kv[key] - - # Check if this key already exists or not - if not zk_conn.exists(key): - # We're creating a new key - zk_transaction.create(key, str(data).encode("utf8")) - else: - # We're updating a key with version validation - orig_data = zk_conn.get(key) - version = orig_data[1].version - - # Set what we expect the new version to be - new_version = version + 1 - - # Update the data - zk_transaction.set_data(key, str(data).encode("utf8")) - - # Set up the check - try: - zk_transaction.check(key, new_version) - except TypeError: - print('Zookeeper key "{}" does not match expected version'.format(key)) - return False - - # Commit the transaction - try: - zk_transaction.commit() - return True - except Exception: - return False - - -# Write lock function -def writelock(zk_conn, key): - lock_id = str(uuid.uuid1()) - lock = zk_conn.WriteLock("{}".format(key), lock_id) - return lock - - -# Read lock function -def readlock(zk_conn, key): - lock_id = str(uuid.uuid1()) - lock = zk_conn.ReadLock("{}".format(key), lock_id) - return lock diff --git a/client-cli-old/pvc/pvc.py b/client-cli-old/pvc/pvc.py deleted file mode 100755 index c4fbcb1d..00000000 --- a/client-cli-old/pvc/pvc.py +++ /dev/null @@ -1,6352 +0,0 @@ -#!/usr/bin/env python3 - -# pvc.py - PVC client command-line interface -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -import socket -import click -import os -import difflib -import re -import time -import colorama -import yaml -import json -import syslog -import lxml.etree as etree - -from sys import argv - -from distutils.util import strtobool - -from functools import wraps - -import pvc.lib.ansiprint as ansiprint -import pvc.lib.cluster as pvc_cluster -import pvc.lib.node as pvc_node -import pvc.lib.vm as pvc_vm -import pvc.lib.network as pvc_network -import pvc.lib.ceph as pvc_ceph -import pvc.lib.provisioner as pvc_provisioner - - -myhostname = socket.gethostname().split(".")[0] -zk_host = "" -is_completion = True if os.environ.get("_PVC_COMPLETE", "") == "complete" else False - -default_store_data = {"cfgfile": "/etc/pvc/pvcapid.yaml"} -config = dict() - - -# -# Audit function -# -def audit(): - args = argv - args[0] = "pvc" - syslog.openlog(facility=syslog.LOG_AUTH) - syslog.syslog( - 'client audit: command "{}" by user "{}"'.format( - " ".join(args), - os.environ.get("USER", None), - ) - ) - syslog.closelog() - - -# -# Version function -# -def print_version(ctx, param, value): - if not value or ctx.resilient_parsing: - return - from pkg_resources import get_distribution - - version = get_distribution("pvc").version - echo(f"Parallel Virtual Cluster version {version}") - ctx.exit() - - -# -# Data store handling functions -# -def read_from_yaml(cfgfile): - with open(cfgfile, "r") as fh: - api_config = yaml.load(fh, Loader=yaml.BaseLoader) - host = api_config["pvc"]["api"]["listen_address"] - port = api_config["pvc"]["api"]["listen_port"] - if strtobool(api_config["pvc"]["api"]["ssl"]["enabled"]): - scheme = "https" - else: - scheme = "http" - if strtobool(api_config["pvc"]["api"]["authentication"]["enabled"]): - # Always use the first token - api_key = api_config["pvc"]["api"]["authentication"]["tokens"][0]["token"] - else: - api_key = "N/A" - return cfgfile, host, port, scheme, api_key - - -def get_config(store_data, cluster=None): - # This is generally static - prefix = "/api/v1" - - cluster_details = store_data.get(cluster) - - if not cluster_details: - cluster_details = default_store_data - cluster = "local" - - if cluster_details.get("cfgfile", None): - # This is a reference to an API configuration; grab the details from its listen address - cfgfile = cluster_details.get("cfgfile") - if os.path.isfile(cfgfile): - description, host, port, scheme, api_key = read_from_yaml(cfgfile) - else: - return {"badcfg": True} - # Handle an all-wildcard address - if host == "0.0.0.0": - host = "127.0.0.1" - else: - # This is a static configuration, get the raw details - description = cluster_details["description"] - host = cluster_details["host"] - port = cluster_details["port"] - scheme = cluster_details["scheme"] - api_key = cluster_details["api_key"] - - config = dict() - config["debug"] = False - config["cluster"] = cluster - config["description"] = description - config["api_host"] = "{}:{}".format(host, port) - config["api_scheme"] = scheme - config["api_key"] = api_key - config["api_prefix"] = prefix - if cluster == "local": - config["verify_ssl"] = False - else: - config["verify_ssl"] = bool( - strtobool(os.environ.get("PVC_CLIENT_VERIFY_SSL", "True")) - ) - - return config - - -def get_store(store_path): - store_file = "{}/pvc-cli.json".format(store_path) - with open(store_file, "r") as fh: - store_data = json.loads(fh.read()) - return store_data - - -def update_store(store_path, store_data): - store_file = "{}/pvc-cli.json".format(store_path) - if not os.path.exists(store_file): - with open(store_file, "w") as fh: - fh.write(json.dumps(store_data, sort_keys=True, indent=4)) - # Ensure file has sensible permissions due to API key storage, but only when created! - os.chmod(store_file, int(os.environ.get("PVC_CLIENT_DB_PERMS", "600"), 8)) - else: - with open(store_file, "w") as fh: - fh.write(json.dumps(store_data, sort_keys=True, indent=4)) - - -if not is_completion: - pvc_client_dir = os.environ.get("PVC_CLIENT_DIR", None) - home_dir = os.environ.get("HOME", None) - if pvc_client_dir: - store_path = "{}".format(pvc_client_dir) - elif home_dir: - store_path = "{}/.config/pvc".format(home_dir) - else: - print("WARNING: No client or home config dir found, using /tmp instead") - store_path = "/tmp/pvc" - - if not os.path.isdir(store_path): - os.makedirs(store_path) - if not os.path.isfile(store_path + "/pvc-cli.json"): - update_store(store_path, {"local": default_store_data}) - -CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"], max_content_width=120) - - -def echo(msg, nl=True, err=False): - if config.get("colour", False): - colour = True - else: - colour = None - - click.echo(message=msg, color=colour, nl=nl, err=err) - - -def cleanup(retcode, retmsg): - if retmsg != "": - echo(retmsg) - if retcode is True: - exit(0) - else: - exit(1) - - -############################################################################### -# pvc cluster -############################################################################### -@click.group( - name="cluster", - short_help="Manage PVC cluster connections.", - context_settings=CONTEXT_SETTINGS, -) -def cli_cluster(): - """ - Manage the PVC clusters this CLI can connect to. - """ - pass - - -############################################################################### -# pvc cluster add -############################################################################### -@click.command(name="add", short_help="Add a new cluster to the client.") -@click.option( - "-d", - "--description", - "description", - required=False, - default="N/A", - help="A text description of the cluster.", -) -@click.option( - "-a", - "--address", - "address", - required=True, - help="The IP address or hostname of the cluster API client.", -) -@click.option( - "-p", - "--port", - "port", - required=False, - default=7370, - show_default=True, - help="The cluster API client port.", -) -@click.option( - "-s/-S", - "--ssl/--no-ssl", - "ssl", - is_flag=True, - default=False, - show_default=True, - help="Whether to use SSL or not.", -) -@click.option( - "-k", - "--api-key", - "api_key", - required=False, - default=None, - help="An API key to authenticate against the cluster.", -) -@click.argument("name") -def cluster_add(description, address, port, ssl, name, api_key): - """ - Add a new PVC cluster NAME, via its API connection details, to the configuration of the local CLI client. Replaces any existing cluster with this name. - """ - if ssl: - scheme = "https" - else: - scheme = "http" - - # Get the existing data - existing_config = get_store(store_path) - # Append our new entry to the end - existing_config[name] = { - "description": description, - "host": address, - "port": port, - "scheme": scheme, - "api_key": api_key, - } - # Update the store - update_store(store_path, existing_config) - echo('Added new cluster "{}" at host "{}" to local database'.format(name, address)) - - -############################################################################### -# pvc cluster remove -############################################################################### -@click.command(name="remove", short_help="Remove a cluster from the client.") -@click.argument("name") -def cluster_remove(name): - """ - Remove a PVC cluster from the configuration of the local CLI client. - """ - # Get the existing data - existing_config = get_store(store_path) - # Remove the entry matching the name - try: - existing_config.pop(name) - except KeyError: - print('No cluster with name "{}" found'.format(name)) - # Update the store - update_store(store_path, existing_config) - echo('Removed cluster "{}" from local database'.format(name)) - - -############################################################################### -# pvc cluster list -############################################################################### -@click.command(name="list", short_help="List all available clusters.") -@click.option( - "-r", - "--raw", - "raw", - is_flag=True, - default=False, - help="Display the raw list of cluster names only.", -) -def cluster_list(raw): - """ - List all the available PVC clusters configured in this CLI instance. - """ - # Get the existing data - clusters = get_store(store_path) - # Find the lengths of each column - name_length = 5 - description_length = 12 - address_length = 10 - port_length = 5 - scheme_length = 7 - api_key_length = 32 - - for cluster in clusters: - cluster_details = clusters[cluster] - if cluster_details.get("cfgfile", None): - # This is a reference to an API configuration; grab the details from its listen address - cfgfile = cluster_details.get("cfgfile") - if os.path.isfile(cfgfile): - description, address, port, scheme, api_key = read_from_yaml(cfgfile) - else: - description, address, port, scheme, api_key = ( - "N/A", - "N/A", - "N/A", - "N/A", - "N/A", - ) - else: - description = cluster_details.get("description", "") - address = cluster_details.get("host", "N/A") - port = cluster_details.get("port", "N/A") - scheme = cluster_details.get("scheme", "N/A") - api_key = cluster_details.get("api_key", "N/A") - if not api_key: - api_key = "N/A" - - _name_length = len(cluster) + 1 - if _name_length > name_length: - name_length = _name_length - _address_length = len(address) + 1 - _description_length = len(description) + 1 - if _description_length > description_length: - description_length = _description_length - if _address_length > address_length: - address_length = _address_length - _port_length = len(str(port)) + 1 - if _port_length > port_length: - port_length = _port_length - _scheme_length = len(scheme) + 1 - if _scheme_length > scheme_length: - scheme_length = _scheme_length - _api_key_length = len(api_key) + 1 - if _api_key_length > api_key_length: - api_key_length = _api_key_length - - if not raw: - # Display the data nicely - echo( - "{bold}{name: <{name_length}} {description: <{description_length}} {address: <{address_length}} {port: <{port_length}} {scheme: <{scheme_length}} {api_key: <{api_key_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - name="Name", - name_length=name_length, - description="Description", - description_length=description_length, - address="Address", - address_length=address_length, - port="Port", - port_length=port_length, - scheme="Scheme", - scheme_length=scheme_length, - api_key="API Key", - api_key_length=api_key_length, - ) - ) - - for cluster in clusters: - cluster_details = clusters[cluster] - if cluster_details.get("cfgfile", None): - # This is a reference to an API configuration; grab the details from its listen address - if os.path.isfile(cfgfile): - description, address, port, scheme, api_key = read_from_yaml(cfgfile) - else: - continue - else: - address = cluster_details.get("host", "N/A") - description = cluster_details.get("description", "N/A") - port = cluster_details.get("port", "N/A") - scheme = cluster_details.get("scheme", "N/A") - api_key = cluster_details.get("api_key", "N/A") - if not api_key: - api_key = "N/A" - - if not raw: - echo( - "{bold}{name: <{name_length}} {description: <{description_length}} {address: <{address_length}} {port: <{port_length}} {scheme: <{scheme_length}} {api_key: <{api_key_length}}{end_bold}".format( - bold="", - end_bold="", - name=cluster, - name_length=name_length, - description=description, - description_length=description_length, - address=address, - address_length=address_length, - port=port, - port_length=port_length, - scheme=scheme, - scheme_length=scheme_length, - api_key=api_key, - api_key_length=api_key_length, - ) - ) - else: - echo(cluster) - - -############################################################################### -# pvc cluster detail -############################################################################### -@click.command(name="detail", short_help="Show details of all available clusters.") -def cluster_detail(): - """ - Show quick details of all PVC clusters configured in this CLI instance. - """ - - # Get the existing data - clusters = get_store(store_path) - - cluster_details_list = list() - - echo("Gathering information from clusters... ", nl=False) - - for cluster in clusters: - _store_data = get_store(store_path) - cluster_config = get_config(_store_data, cluster=cluster) - retcode, retdata = pvc_cluster.get_info(cluster_config) - if retcode == 0: - retdata = None - cluster_details = {"config": cluster_config, "data": retdata} - cluster_details_list.append(cluster_details) - - echo("done.") - echo("") - - # Find the lengths of each column - name_length = 5 - description_length = 12 - health_length = 7 - primary_node_length = 8 - pvc_version_length = 8 - nodes_length = 6 - vms_length = 4 - networks_length = 9 - osds_length = 5 - pools_length = 6 - volumes_length = 8 - snapshots_length = 10 - - for cluster_details in cluster_details_list: - _name_length = len(cluster_details["config"]["cluster"]) + 1 - if _name_length > name_length: - name_length = _name_length - - _description_length = len(cluster_details["config"]["description"]) + 1 - if _description_length > description_length: - description_length = _description_length - - if cluster_details["data"] is None: - continue - - _health_length = ( - len( - str( - cluster_details["data"] - .get("cluster_health", {}) - .get("health", "N/A") - ) - + "%" - ) - + 1 - ) - if _health_length > health_length: - health_length = _health_length - - _primary_node_length = len(cluster_details["data"]["primary_node"]) + 1 - if _primary_node_length > primary_node_length: - primary_node_length = _primary_node_length - - _pvc_version_length = ( - len(cluster_details["data"].get("pvc_version", "< 0.9.62")) + 1 - ) - if _pvc_version_length > pvc_version_length: - pvc_version_length = _pvc_version_length - - _nodes_length = len(str(cluster_details["data"]["nodes"]["total"])) + 1 - if _nodes_length > nodes_length: - nodes_length = _nodes_length - - _vms_length = len(str(cluster_details["data"]["vms"]["total"])) + 1 - if _vms_length > vms_length: - vms_length = _vms_length - - _networks_length = len(str(cluster_details["data"]["networks"])) + 1 - if _networks_length > networks_length: - networks_length = _networks_length - - _osds_length = len(str(cluster_details["data"]["osds"]["total"])) + 1 - if _osds_length > osds_length: - osds_length = _osds_length - - _pools_length = len(str(cluster_details["data"]["pools"])) + 1 - if _pools_length > pools_length: - pools_length = _pools_length - - _volumes_length = len(str(cluster_details["data"]["volumes"])) + 1 - if _volumes_length > volumes_length: - volumes_length = _volumes_length - - _snapshots_length = len(str(cluster_details["data"]["snapshots"])) + 1 - if _snapshots_length > snapshots_length: - snapshots_length = _snapshots_length - - # Display the data nicely - echo( - "{bold}{name: <{name_length}} {description: <{description_length}} {health: <{health_length}} {primary_node: <{primary_node_length}} {pvc_version: <{pvc_version_length}} {nodes: <{nodes_length}} {vms: <{vms_length}} {networks: <{networks_length}} {osds: <{osds_length}} {pools: <{pools_length}} {volumes: <{volumes_length}} {snapshots: <{snapshots_length}}{end_bold}".format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - name="Name", - name_length=name_length, - description="Description", - description_length=description_length, - health="Health", - health_length=health_length, - primary_node="Primary", - primary_node_length=primary_node_length, - pvc_version="Version", - pvc_version_length=pvc_version_length, - nodes="Nodes", - nodes_length=nodes_length, - vms="VMs", - vms_length=vms_length, - networks="Networks", - networks_length=networks_length, - osds="OSDs", - osds_length=osds_length, - pools="Pools", - pools_length=pools_length, - volumes="Volumes", - volumes_length=volumes_length, - snapshots="Snapshots", - snapshots_length=snapshots_length, - ) - ) - - for cluster_details in cluster_details_list: - if cluster_details["data"] is None: - health_colour = ansiprint.blue() - name = cluster_details["config"]["cluster"] - description = cluster_details["config"]["description"] - health = "N/A" - primary_node = "N/A" - pvc_version = "N/A" - nodes = "N/A" - vms = "N/A" - networks = "N/A" - osds = "N/A" - pools = "N/A" - volumes = "N/A" - snapshots = "N/A" - else: - if ( - cluster_details["data"].get("maintenance") == "true" - or cluster_details["data"] - .get("cluster_health", {}) - .get("health", "N/A") - == "N/A" - ): - health_colour = ansiprint.blue() - elif ( - cluster_details["data"].get("cluster_health", {}).get("health", 100) - > 90 - ): - health_colour = ansiprint.green() - elif ( - cluster_details["data"].get("cluster_health", {}).get("health", 100) - > 50 - ): - health_colour = ansiprint.yellow() - else: - health_colour = ansiprint.red() - - name = cluster_details["config"]["cluster"] - description = cluster_details["config"]["description"] - health = str( - cluster_details["data"].get("cluster_health", {}).get("health", "N/A") - ) - if health != "N/A": - health += "%" - primary_node = cluster_details["data"]["primary_node"] - pvc_version = cluster_details["data"].get("pvc_version", "< 0.9.62") - nodes = str(cluster_details["data"]["nodes"]["total"]) - vms = str(cluster_details["data"]["vms"]["total"]) - networks = str(cluster_details["data"]["networks"]) - osds = str(cluster_details["data"]["osds"]["total"]) - pools = str(cluster_details["data"]["pools"]) - volumes = str(cluster_details["data"]["volumes"]) - snapshots = str(cluster_details["data"]["snapshots"]) - - echo( - "{name: <{name_length}} {description: <{description_length}} {health_colour}{health: <{health_length}}{end_colour} {primary_node: <{primary_node_length}} {pvc_version: <{pvc_version_length}} {nodes: <{nodes_length}} {vms: <{vms_length}} {networks: <{networks_length}} {osds: <{osds_length}} {pools: <{pools_length}} {volumes: <{volumes_length}} {snapshots: <{snapshots_length}}".format( - health_colour=health_colour, - end_colour=ansiprint.end(), - name=name, - name_length=name_length, - description=description, - description_length=description_length, - health=health, - health_length=health_length, - primary_node=primary_node, - primary_node_length=primary_node_length, - pvc_version=pvc_version, - pvc_version_length=pvc_version_length, - nodes=nodes, - nodes_length=nodes_length, - vms=vms, - vms_length=vms_length, - networks=networks, - networks_length=networks_length, - osds=osds, - osds_length=osds_length, - pools=pools, - pools_length=pools_length, - volumes=volumes, - volumes_length=volumes_length, - snapshots=snapshots, - snapshots_length=snapshots_length, - ) - ) - - -# Validate that the cluster is set for a given command -def cluster_req(function): - @wraps(function) - def validate_cluster(*args, **kwargs): - if config.get("badcfg", None): - echo( - 'No cluster specified and no local pvcapid.yaml configuration found. Use "pvc cluster" to add a cluster API to connect to.' - ) - exit(1) - - if not config["quiet"]: - if config["api_scheme"] == "https" and not config["verify_ssl"]: - ssl_unverified_msg = " (unverified)" - else: - ssl_unverified_msg = "" - echo( - 'Using cluster "{}" - Host: "{}" Scheme: "{}{}" Prefix: "{}"'.format( - config["cluster"], - config["api_host"], - config["api_scheme"], - ssl_unverified_msg, - config["api_prefix"], - ), - err=True, - ) - echo("", err=True) - - return function(*args, **kwargs) - - return validate_cluster - - -############################################################################### -# pvc node -############################################################################### -@click.group( - name="node", short_help="Manage a PVC node.", context_settings=CONTEXT_SETTINGS -) -def cli_node(): - """ - Manage the state of a node in the PVC cluster. - """ - pass - - -############################################################################### -# pvc node secondary -############################################################################### -@click.command(name="secondary", short_help="Set a node in secondary node status.") -@click.argument("node") -@click.option( - "-w", - "--wait", - "wait", - is_flag=True, - default=False, - help="Wait for transition to complete before returning.", -) -@cluster_req -def node_secondary(node, wait): - """ - Take NODE out of primary coordinator mode. - """ - - task_retcode, task_retdata = pvc_provisioner.task_status(config, None) - if len(task_retdata) > 0: - echo( - "Note: There are currently {} active or queued provisioner jobs on the current primary node.".format( - len(task_retdata) - ) - ) - echo( - " These jobs will continue executing, but status will not be visible until the current" - ) - echo(" node returns to primary state.") - echo("") - - retcode, retmsg = pvc_node.node_coordinator_state(config, node, "secondary") - if not retcode: - cleanup(retcode, retmsg) - else: - if wait: - echo(retmsg) - echo("Waiting for state transition... ", nl=False) - # Every half-second, check if the API is reachable and the node is in secondary state - while True: - try: - _retcode, _retmsg = pvc_node.node_info(config, node) - if _retmsg["coordinator_state"] == "secondary": - retmsg = "done." - break - else: - time.sleep(0.5) - except Exception: - time.sleep(0.5) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc node primary -############################################################################### -@click.command(name="primary", short_help="Set a node in primary status.") -@click.argument("node") -@click.option( - "-w", - "--wait", - "wait", - is_flag=True, - default=False, - help="Wait for transition to complete before returning.", -) -@cluster_req -def node_primary(node, wait): - """ - Put NODE into primary coordinator mode. - """ - - task_retcode, task_retdata = pvc_provisioner.task_status(config, None) - if len(task_retdata) > 0: - echo( - "Note: There are currently {} active or queued provisioner jobs on the current primary node.".format( - len(task_retdata) - ) - ) - echo( - " These jobs will continue executing, but status will not be visible until the current" - ) - echo(" node returns to primary state.") - echo("") - - retcode, retmsg = pvc_node.node_coordinator_state(config, node, "primary") - if not retcode: - cleanup(retcode, retmsg) - else: - if wait: - echo(retmsg) - echo("Waiting for state transition... ", nl=False) - # Every half-second, check if the API is reachable and the node is in secondary state - while True: - try: - _retcode, _retmsg = pvc_node.node_info(config, node) - if _retmsg["coordinator_state"] == "primary": - retmsg = "done." - break - else: - time.sleep(0.5) - except Exception: - time.sleep(0.5) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc node flush -############################################################################### -@click.command(name="flush", short_help="Take a node out of service.") -@click.option( - "-w", - "--wait", - "wait", - is_flag=True, - default=False, - help="Wait for migrations to complete before returning.", -) -@click.argument("node", default=myhostname) -@cluster_req -def node_flush(node, wait): - """ - Take NODE out of active service and migrate away all VMs. If unspecified, defaults to this host. - """ - - retcode, retmsg = pvc_node.node_domain_state(config, node, "flush", wait) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc node ready/unflush -############################################################################### -@click.command(name="ready", short_help="Restore node to service.") -@click.argument("node", default=myhostname) -@click.option( - "-w", - "--wait", - "wait", - is_flag=True, - default=False, - help="Wait for migrations to complete before returning.", -) -@cluster_req -def node_ready(node, wait): - """ - Restore NODE to active service and migrate back all VMs. If unspecified, defaults to this host. - """ - - retcode, retmsg = pvc_node.node_domain_state(config, node, "ready", wait) - cleanup(retcode, retmsg) - - -@click.command(name="unflush", short_help="Restore node to service.") -@click.argument("node", default=myhostname) -@click.option( - "-w", - "--wait", - "wait", - is_flag=True, - default=False, - help="Wait for migrations to complete before returning.", -) -def node_unflush(node, wait): - """ - Restore NODE to active service and migrate back all VMs. If unspecified, defaults to this host. - """ - - retcode, retmsg = pvc_node.node_domain_state(config, node, "ready", wait) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc node log -############################################################################### -@click.command(name="log", short_help="Show logs of a node.") -@click.argument("node") -@click.option( - "-l", - "--lines", - "lines", - default=None, - show_default=False, - help="Display this many log lines from the end of the log buffer. [default: 1000; with follow: 10]", -) -@click.option( - "-f", - "--follow", - "follow", - is_flag=True, - default=False, - help="Follow the log buffer; output may be delayed by a few seconds relative to the live system. The --lines value defaults to 10 for the initial output.", -) -@cluster_req -def node_log(node, lines, follow): - """ - Show node logs of virtual machine DOMAIN on its current node in a pager or continuously. DOMAIN may be a UUID or name. Note that migrating a VM to a different node will cause the log buffer to be overwritten by entries from the new node. - """ - - # Set the default here so we can handle it - if lines is None: - if follow: - lines = 10 - else: - lines = 1000 - - if follow: - retcode, retmsg = pvc_node.follow_node_log(config, node, lines) - else: - retcode, retmsg = pvc_node.view_node_log(config, node, lines) - click.echo_via_pager(retmsg) - retmsg = "" - cleanup(retcode, retmsg) - - -############################################################################### -# pvc node info -############################################################################### -@click.command(name="info", short_help="Show details of a node object.") -@click.argument("node", default=myhostname) -@click.option( - "-l", - "--long", - "long_output", - is_flag=True, - default=False, - help="Display more detailed information.", -) -@click.option( - "-f", - "--format", - "oformat", - default="plain", - show_default=True, - type=click.Choice(["plain", "json", "json-pretty"]), - help="Output format of node status information.", -) -@cluster_req -def node_info(node, long_output, oformat): - """ - Show information about node NODE. If unspecified, defaults to this host. - """ - - retcode, retdata = pvc_node.node_info(config, node) - if retcode: - if oformat == "json": - retdata = json.dumps(retdata) - elif oformat == "json-pretty": - retdata = json.dumps(retdata, indent=4) - else: - retdata = pvc_node.format_info(retdata, long_output) - cleanup(retcode, retdata) - - -############################################################################### -# pvc node list -############################################################################### -@click.command(name="list", short_help="List all node objects.") -@click.argument("limit", default=None, required=False) -@click.option( - "-ds", - "--daemon-state", - "target_daemon_state", - default=None, - help="Limit list to nodes in the specified daemon state.", -) -@click.option( - "-cs", - "--coordinator-state", - "target_coordinator_state", - default=None, - help="Limit list to nodes in the specified coordinator state.", -) -@click.option( - "-vs", - "--domain-state", - "target_domain_state", - default=None, - help="Limit list to nodes in the specified domain state.", -) -@click.option( - "-r", - "--raw", - "raw", - is_flag=True, - default=False, - help="Display the raw list of node names only.", -) -@cluster_req -def node_list( - limit, target_daemon_state, target_coordinator_state, target_domain_state, raw -): - """ - List all nodes; optionally only match names matching regex LIMIT. - """ - - retcode, retdata = pvc_node.node_list( - config, - limit, - target_daemon_state, - target_coordinator_state, - target_domain_state, - ) - if retcode: - retdata = pvc_node.format_list(retdata, raw) - else: - if raw: - retdata = "" - cleanup(retcode, retdata) - - -############################################################################### -# pvc vm -############################################################################### -@click.group( - name="vm", - short_help="Manage a PVC virtual machine.", - context_settings=CONTEXT_SETTINGS, -) -def cli_vm(): - """ - Manage the state of a virtual machine in the PVC cluster. - """ - pass - - -############################################################################### -# pvc vm define -############################################################################### -@click.command( - name="define", short_help="Define a new virtual machine from a Libvirt XML file." -) -@click.option( - "-t", - "--target", - "target_node", - help="Home node for this domain; autoselect if unspecified.", -) -@click.option( - "-l", - "--limit", - "node_limit", - default=None, - show_default=False, - help="Comma-separated list of nodes to limit VM operation to; saved with VM.", -) -@click.option( - "-s", - "--node-selector", - "node_selector", - default="none", - show_default=True, - type=click.Choice(["mem", "memprov", "load", "vcpus", "vms", "none"]), - help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.', -) -@click.option( - "-a/-A", - "--autostart/--no-autostart", - "node_autostart", - is_flag=True, - default=False, - help="Start VM automatically on next unflush/ready state of home node; unset by daemon once used.", -) -@click.option( - "-m", - "--method", - "migration_method", - default="none", - show_default=True, - type=click.Choice(["none", "live", "shutdown"]), - help="The preferred migration method of the VM between nodes; saved with VM.", -) -@click.option( - "-g", - "--tag", - "user_tags", - default=[], - multiple=True, - help="User tag for the VM; can be specified multiple times, once per tag.", -) -@click.option( - "-G", - "--protected-tag", - "protected_tags", - default=[], - multiple=True, - help="Protected user tag for the VM; can be specified multiple times, once per tag.", -) -@click.argument("vmconfig", type=click.File()) -@cluster_req -def vm_define( - vmconfig, - target_node, - node_limit, - node_selector, - node_autostart, - migration_method, - user_tags, - protected_tags, -): - """ - Define a new virtual machine from Libvirt XML configuration file VMCONFIG. - - The target node selector ("--node-selector"/"-s") can be "none" to use the cluster default, or one of the following values: - * "mem": choose the node with the most (real) free memory - * "memprov": choose the node with the least provisioned VM memory - * "vcpus": choose the node with the least allocated VM vCPUs - * "load": choose the node with the lowest current load average - * "vms": choose the node with the least number of provisioned VMs - - For most clusters, "mem" should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered: - * "mem" looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. - * "memprov" looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use. - * "load" looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs. - """ - - # Open the XML file - vmconfig_data = vmconfig.read() - vmconfig.close() - - # Verify our XML is sensible - try: - xml_data = etree.fromstring(vmconfig_data) - new_cfg = etree.tostring(xml_data, pretty_print=True).decode("utf8") - except Exception: - cleanup(False, "Error: XML is malformed or invalid") - - retcode, retmsg = pvc_vm.vm_define( - config, - new_cfg, - target_node, - node_limit, - node_selector, - node_autostart, - migration_method, - user_tags, - protected_tags, - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm meta -############################################################################### -@click.command(name="meta", short_help="Modify PVC metadata of an existing VM.") -@click.option( - "-l", - "--limit", - "node_limit", - default=None, - show_default=False, - help="Comma-separated list of nodes to limit VM operation to; set to an empty string to remove.", -) -@click.option( - "-s", - "--node-selector", - "node_selector", - default=None, - show_default=False, - type=click.Choice(["mem", "memprov", "load", "vcpus", "vms", "none"]), - help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.', -) -@click.option( - "-a/-A", - "--autostart/--no-autostart", - "node_autostart", - is_flag=True, - default=None, - help="Start VM automatically on next unflush/ready state of home node; unset by daemon once used.", -) -@click.option( - "-m", - "--method", - "migration_method", - default="none", - show_default=True, - type=click.Choice(["none", "live", "shutdown"]), - help="The preferred migration method of the VM between nodes.", -) -@click.option( - "-p", - "--profile", - "provisioner_profile", - default=None, - show_default=False, - help="PVC provisioner profile name for VM.", -) -@click.argument("domain") -@cluster_req -def vm_meta( - domain, - node_limit, - node_selector, - node_autostart, - migration_method, - provisioner_profile, -): - """ - Modify the PVC metadata of existing virtual machine DOMAIN. At least one option to update must be specified. DOMAIN may be a UUID or name. - - For details on the "--node-selector"/"-s" values, please see help for the command "pvc vm define". - """ - - if ( - node_limit is None - and node_selector is None - and node_autostart is None - and migration_method is None - and provisioner_profile is None - ): - cleanup(False, "At least one metadata option must be specified to update.") - - retcode, retmsg = pvc_vm.vm_metadata( - config, - domain, - node_limit, - node_selector, - node_autostart, - migration_method, - provisioner_profile, - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm modify -############################################################################### -@click.command(name="modify", short_help="Modify an existing VM configuration.") -@click.option( - "-e", - "--editor", - "editor", - is_flag=True, - help="Use local editor to modify existing config.", -) -@click.option( - "-r", - "--restart", - "restart", - is_flag=True, - help="Immediately restart VM to apply new config.", -) -@click.option( - "-d", - "--confirm-diff", - "confirm_diff_flag", - is_flag=True, - default=False, - help="Confirm the diff.", -) -@click.option( - "-c", - "--confirm-restart", - "confirm_restart_flag", - is_flag=True, - default=False, - help="Confirm the restart.", -) -@click.option( - "-y", - "--yes", - "confirm_all_flag", - is_flag=True, - default=False, - help="Confirm the diff and the restart.", -) -@click.argument("domain") -@click.argument("cfgfile", type=click.File(), default=None, required=False) -@cluster_req -def vm_modify( - domain, - cfgfile, - editor, - restart, - confirm_diff_flag, - confirm_restart_flag, - confirm_all_flag, -): - """ - Modify existing virtual machine DOMAIN, either in-editor or with replacement CONFIG. DOMAIN may be a UUID or name. - """ - - if editor is False and cfgfile is None: - cleanup( - False, - 'Either an XML config file or the "--editor" option must be specified.', - ) - - retcode, vm_information = pvc_vm.vm_info(config, domain) - if not retcode or not vm_information.get("name", None): - cleanup(False, 'ERROR: Could not find VM "{}"!'.format(domain)) - - dom_name = vm_information.get("name") - - # Grab the current config - current_vm_cfg_raw = vm_information.get("xml") - xml_data = etree.fromstring(current_vm_cfg_raw) - current_vm_cfgfile = ( - etree.tostring(xml_data, pretty_print=True).decode("utf8").strip() - ) - - if editor is True: - new_vm_cfgfile = click.edit( - text=current_vm_cfgfile, require_save=True, extension=".xml" - ) - if new_vm_cfgfile is None: - echo("Aborting with no modifications.") - exit(0) - else: - new_vm_cfgfile = new_vm_cfgfile.strip() - - # We're operating in replace mode - else: - # Open the XML file - new_vm_cfgfile = cfgfile.read() - cfgfile.close() - - echo( - 'Replacing configuration of VM "{}" with file "{}".'.format( - dom_name, cfgfile.name - ) - ) - - # Show a diff and confirm - echo("Pending modifications:") - echo("") - diff = list( - difflib.unified_diff( - current_vm_cfgfile.split("\n"), - new_vm_cfgfile.split("\n"), - fromfile="current", - tofile="modified", - fromfiledate="", - tofiledate="", - n=3, - lineterm="", - ) - ) - for line in diff: - if re.match(r"^\+", line) is not None: - echo(colorama.Fore.GREEN + line + colorama.Fore.RESET) - elif re.match(r"^\-", line) is not None: - echo(colorama.Fore.RED + line + colorama.Fore.RESET) - elif re.match(r"^\^", line) is not None: - echo(colorama.Fore.BLUE + line + colorama.Fore.RESET) - else: - echo(line) - echo("") - - # Verify our XML is sensible - try: - xml_data = etree.fromstring(new_vm_cfgfile) - new_cfg = etree.tostring(xml_data, pretty_print=True).decode("utf8") - except Exception as e: - cleanup(False, "Error: XML is malformed or invalid: {}".format(e)) - - if not confirm_diff_flag and not confirm_all_flag and not config["unsafe"]: - click.confirm("Write modifications to cluster?", abort=True) - - if ( - restart - and not confirm_restart_flag - and not confirm_all_flag - and not config["unsafe"] - ): - try: - click.confirm( - "Restart VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - restart = False - - retcode, retmsg = pvc_vm.vm_modify(config, domain, new_cfg, restart) - if retcode and not restart: - retmsg = retmsg + " Changes will be applied on next VM start/restart." - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm rename -############################################################################### -@click.command(name="rename", short_help="Rename a virtual machine.") -@click.argument("domain") -@click.argument("new_name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the rename", -) -@cluster_req -def vm_rename(domain, new_name, confirm_flag): - """ - Rename virtual machine DOMAIN, and all its connected disk volumes, to NEW_NAME. DOMAIN may be a UUID or name. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Rename VM {} to {}".format(domain, new_name), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_vm.vm_rename(config, domain, new_name) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm undefine -############################################################################### -@click.command(name="undefine", short_help="Undefine a virtual machine.") -@click.argument("domain") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def vm_undefine(domain, confirm_flag): - """ - Stop virtual machine DOMAIN and remove it database, preserving disks. DOMAIN may be a UUID or name. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Undefine VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_vm.vm_remove(config, domain, delete_disks=False) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm remove -############################################################################### -@click.command(name="remove", short_help="Remove a virtual machine.") -@click.argument("domain") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def vm_remove(domain, confirm_flag): - """ - Stop virtual machine DOMAIN and remove it, along with all disks,. DOMAIN may be a UUID or name. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Undefine VM {} and remove all disks".format(domain), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_vm.vm_remove(config, domain, delete_disks=True) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm start -############################################################################### -@click.command(name="start", short_help="Start up a defined virtual machine.") -@click.argument("domain") -@cluster_req -def vm_start(domain): - """ - Start virtual machine DOMAIN on its configured node. DOMAIN may be a UUID or name. - """ - - retcode, retmsg = pvc_vm.vm_state(config, domain, "start") - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm restart -############################################################################### -@click.command(name="restart", short_help="Restart a running virtual machine.") -@click.argument("domain") -@click.option( - "-w", - "--wait", - "wait", - is_flag=True, - default=False, - help="Wait for restart to complete before returning.", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the restart", -) -@cluster_req -def vm_restart(domain, wait, confirm_flag): - """ - Restart running virtual machine DOMAIN. DOMAIN may be a UUID or name. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Restart VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_vm.vm_state(config, domain, "restart", wait=wait) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm shutdown -############################################################################### -@click.command( - name="shutdown", short_help="Gracefully shut down a running virtual machine." -) -@click.argument("domain") -@click.option( - "-w", - "--wait", - "wait", - is_flag=True, - default=False, - help="Wait for shutdown to complete before returning.", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the shutdown", -) -@cluster_req -def vm_shutdown(domain, wait, confirm_flag): - """ - Gracefully shut down virtual machine DOMAIN. DOMAIN may be a UUID or name. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Shut down VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_vm.vm_state(config, domain, "shutdown", wait=wait) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm stop -############################################################################### -@click.command(name="stop", short_help="Forcibly halt a running virtual machine.") -@click.argument("domain") -@click.option( - "-y", "--yes", "confirm_flag", is_flag=True, default=False, help="Confirm the stop" -) -@cluster_req -def vm_stop(domain, confirm_flag): - """ - Forcibly halt (destroy) running virtual machine DOMAIN. DOMAIN may be a UUID or name. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Forcibly stop VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_vm.vm_state(config, domain, "stop") - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm disable -############################################################################### -@click.command(name="disable", short_help="Mark a virtual machine as disabled.") -@click.argument("domain") -@click.option( - "--force", - "force_flag", - is_flag=True, - default=False, - help="Forcibly stop the VM instead of waiting for shutdown.", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the disable", -) -@cluster_req -def vm_disable(domain, force_flag, confirm_flag): - """ - Shut down virtual machine DOMAIN and mark it as disabled. DOMAIN may be a UUID or name. - - Disabled VMs will not be counted towards a degraded cluster health status, unlike stopped VMs. Use this option for a VM that will remain off for an extended period. - """ - - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Disable VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_vm.vm_state(config, domain, "disable", force=force_flag) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm move -############################################################################### -@click.command( - name="move", short_help="Permanently move a virtual machine to another node." -) -@click.argument("domain") -@click.option( - "-t", - "--target", - "target_node", - default=None, - help="Target node to migrate to; autodetect if unspecified.", -) -@click.option( - "-w", - "--wait", - "wait", - is_flag=True, - default=False, - help="Wait for migration to complete before returning.", -) -@click.option( - "--force-live", - "force_live", - is_flag=True, - default=False, - help="Do not fall back to shutdown-based migration if live migration fails.", -) -@cluster_req -def vm_move(domain, target_node, wait, force_live): - """ - Permanently move virtual machine DOMAIN, via live migration if running and possible, to another node. DOMAIN may be a UUID or name. - """ - - retcode, retmsg = pvc_vm.vm_node( - config, - domain, - target_node, - "move", - force=False, - wait=wait, - force_live=force_live, - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm migrate -############################################################################### -@click.command( - name="migrate", short_help="Temporarily migrate a virtual machine to another node." -) -@click.argument("domain") -@click.option( - "-t", - "--target", - "target_node", - default=None, - help="Target node to migrate to; autodetect if unspecified.", -) -@click.option( - "-f", - "--force", - "force_migrate", - is_flag=True, - default=False, - help="Force migrate an already migrated VM; does not replace an existing previous node value.", -) -@click.option( - "-w", - "--wait", - "wait", - is_flag=True, - default=False, - help="Wait for migration to complete before returning.", -) -@click.option( - "--force-live", - "force_live", - is_flag=True, - default=False, - help="Do not fall back to shutdown-based migration if live migration fails.", -) -@cluster_req -def vm_migrate(domain, target_node, force_migrate, wait, force_live): - """ - Temporarily migrate running virtual machine DOMAIN, via live migration if possible, to another node. DOMAIN may be a UUID or name. If DOMAIN is not running, it will be started on the target node. - """ - - retcode, retmsg = pvc_vm.vm_node( - config, - domain, - target_node, - "migrate", - force=force_migrate, - wait=wait, - force_live=force_live, - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm unmigrate -############################################################################### -@click.command( - name="unmigrate", - short_help="Restore a migrated virtual machine to its original node.", -) -@click.argument("domain") -@click.option( - "-w", - "--wait", - "wait", - is_flag=True, - default=False, - help="Wait for migration to complete before returning.", -) -@click.option( - "--force-live", - "force_live", - is_flag=True, - default=False, - help="Do not fall back to shutdown-based migration if live migration fails.", -) -@cluster_req -def vm_unmigrate(domain, wait, force_live): - """ - Restore previously migrated virtual machine DOMAIN, via live migration if possible, to its original node. DOMAIN may be a UUID or name. If DOMAIN is not running, it will be started on the target node. - """ - - retcode, retmsg = pvc_vm.vm_node( - config, domain, None, "unmigrate", force=False, wait=wait, force_live=force_live - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm flush-locks -############################################################################### -@click.command( - name="flush-locks", short_help="Flush stale RBD locks for a virtual machine." -) -@click.argument("domain") -@cluster_req -def vm_flush_locks(domain): - """ - Flush stale RBD locks for virtual machine DOMAIN. DOMAIN may be a UUID or name. DOMAIN must be in a stopped state before flushing locks. - """ - - retcode, retmsg = pvc_vm.vm_locks(config, domain) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm tag -############################################################################### -@click.group( - name="tag", - short_help="Manage tags of a virtual machine.", - context_settings=CONTEXT_SETTINGS, -) -def vm_tags(): - """ - Manage the tags of a virtual machine in the PVC cluster." - """ - pass - - -############################################################################### -# pvc vm tag get -############################################################################### -@click.command(name="get", short_help="Get the current tags of a virtual machine.") -@click.argument("domain") -@click.option( - "-r", - "--raw", - "raw", - is_flag=True, - default=False, - help="Display the raw value only without formatting.", -) -@cluster_req -def vm_tags_get(domain, raw): - """ - Get the current tags of the virtual machine DOMAIN. - """ - - retcode, retdata = pvc_vm.vm_tags_get(config, domain) - if retcode: - if not raw: - retdata = pvc_vm.format_vm_tags(config, domain, retdata["tags"]) - else: - if len(retdata["tags"]) > 0: - retdata = "\n".join([tag["name"] for tag in retdata["tags"]]) - else: - retdata = "No tags found." - cleanup(retcode, retdata) - - -############################################################################### -# pvc vm tag add -############################################################################### -@click.command(name="add", short_help="Add new tags to a virtual machine.") -@click.argument("domain") -@click.argument("tag") -@click.option( - "-p", - "--protected", - "protected", - is_flag=True, - required=False, - default=False, - help="Set this tag as protected; protected tags cannot be removed.", -) -@cluster_req -def vm_tags_add(domain, tag, protected): - """ - Add TAG to the virtual machine DOMAIN. - """ - - retcode, retmsg = pvc_vm.vm_tag_set(config, domain, "add", tag, protected) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm tag remove -############################################################################### -@click.command(name="remove", short_help="Remove tags from a virtual machine.") -@click.argument("domain") -@click.argument("tag") -@cluster_req -def vm_tags_remove(domain, tag): - """ - Remove TAG from the virtual machine DOMAIN. - """ - - retcode, retmsg = pvc_vm.vm_tag_set(config, domain, "remove", tag) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm vcpu -############################################################################### -@click.group( - name="vcpu", - short_help="Manage vCPU counts of a virtual machine.", - context_settings=CONTEXT_SETTINGS, -) -def vm_vcpu(): - """ - Manage the vCPU counts of a virtual machine in the PVC cluster." - """ - pass - - -############################################################################### -# pvc vm vcpu get -############################################################################### -@click.command( - name="get", short_help="Get the current vCPU count of a virtual machine." -) -@click.argument("domain") -@click.option( - "-r", - "--raw", - "raw", - is_flag=True, - default=False, - help="Display the raw value only without formatting.", -) -@cluster_req -def vm_vcpu_get(domain, raw): - """ - Get the current vCPU count of the virtual machine DOMAIN. - """ - - retcode, retmsg = pvc_vm.vm_vcpus_get(config, domain) - if not raw: - retmsg = pvc_vm.format_vm_vcpus(config, domain, retmsg) - else: - retmsg = retmsg[0] # Get only the first part of the tuple (vm_vcpus) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm vcpu set -############################################################################### -@click.command(name="set", short_help="Set the vCPU count of a virtual machine.") -@click.argument("domain") -@click.argument("vcpus") -@click.option( - "-t", - "--topology", - "topology", - default=None, - help="Use an alternative topology for the vCPUs in the CSV form ,,. SxCxT must equal VCPUS.", -) -@click.option( - "-r", - "--restart", - "restart", - is_flag=True, - default=False, - help="Immediately restart VM to apply new config.", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the restart", -) -@cluster_req -def vm_vcpu_set(domain, vcpus, topology, restart, confirm_flag): - """ - Set the vCPU count of the virtual machine DOMAIN to VCPUS. - - By default, the topology of the vCPus is 1 socket, VCPUS cores per socket, 1 thread per core. - """ - if topology is not None: - try: - sockets, cores, threads = topology.split(",") - if sockets * cores * threads != vcpus: - raise - except Exception: - cleanup(False, "The specified topology is not valid.") - topology = (sockets, cores, threads) - else: - topology = (1, vcpus, 1) - - if restart and not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Restart VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - restart = False - - retcode, retmsg = pvc_vm.vm_vcpus_set(config, domain, vcpus, topology, restart) - if retcode and not restart: - retmsg = retmsg + " Changes will be applied on next VM start/restart." - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm memory -############################################################################### -@click.group( - name="memory", - short_help="Manage provisioned memory of a virtual machine.", - context_settings=CONTEXT_SETTINGS, -) -def vm_memory(): - """ - Manage the provisioned memory of a virtual machine in the PVC cluster." - """ - pass - - -############################################################################### -# pvc vm memory get -############################################################################### -@click.command( - name="get", short_help="Get the current provisioned memory of a virtual machine." -) -@click.argument("domain") -@click.option( - "-r", - "--raw", - "raw", - is_flag=True, - default=False, - help="Display the raw value only without formatting.", -) -@cluster_req -def vm_memory_get(domain, raw): - """ - Get the current provisioned memory of the virtual machine DOMAIN. - """ - - retcode, retmsg = pvc_vm.vm_memory_get(config, domain) - if not raw: - retmsg = pvc_vm.format_vm_memory(config, domain, retmsg) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm memory set -############################################################################### -@click.command( - name="set", short_help="Set the provisioned memory of a virtual machine." -) -@click.argument("domain") -@click.argument("memory") -@click.option( - "-r", - "--restart", - "restart", - is_flag=True, - default=False, - help="Immediately restart VM to apply new config.", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the restart", -) -@cluster_req -def vm_memory_set(domain, memory, restart, confirm_flag): - """ - Set the provisioned memory of the virtual machine DOMAIN to MEMORY; MEMORY must be an integer in MB. - """ - if restart and not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Restart VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - restart = False - - retcode, retmsg = pvc_vm.vm_memory_set(config, domain, memory, restart) - if retcode and not restart: - retmsg = retmsg + " Changes will be applied on next VM start/restart." - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm network -############################################################################### -@click.group( - name="network", - short_help="Manage attached networks of a virtual machine.", - context_settings=CONTEXT_SETTINGS, -) -def vm_network(): - """ - Manage the attached networks of a virtual machine in the PVC cluster. - - Network details cannot be modified here. To modify a network, first remove it, then readd it with the correct settings. Unless the '-r'/'--reboot' flag is provided, this will not affect the running VM until it is restarted. - """ - pass - - -############################################################################### -# pvc vm network get -############################################################################### -@click.command(name="get", short_help="Get the networks of a virtual machine.") -@click.argument("domain") -@click.option( - "-r", - "--raw", - "raw", - is_flag=True, - default=False, - help="Display the raw values only without formatting.", -) -@cluster_req -def vm_network_get(domain, raw): - """ - Get the networks of the virtual machine DOMAIN. - """ - - retcode, retdata = pvc_vm.vm_networks_get(config, domain) - if not raw: - retmsg = pvc_vm.format_vm_networks(config, domain, retdata) - else: - network_vnis = list() - for network in retdata: - network_vnis.append(network[0]) - retmsg = ",".join(network_vnis) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm network add -############################################################################### -@click.command(name="add", short_help="Add network to a virtual machine.") -@click.argument("domain") -@click.argument("net") -@click.option( - "-a", - "--macaddr", - "macaddr", - default=None, - help="Use this MAC address instead of random generation; must be a valid MAC address in colon-delimited format.", -) -@click.option( - "-m", - "--model", - "model", - default="virtio", - show_default=True, - help='The model for the interface; must be a valid libvirt model. Not used for "netdev" SR-IOV NETs.', -) -@click.option( - "-s", - "--sriov", - "sriov_flag", - is_flag=True, - default=False, - help="Identify that NET is an SR-IOV device name and not a VNI. Required for adding SR-IOV NETs.", -) -@click.option( - "-d", - "--sriov-mode", - "sriov_mode", - default="macvtap", - show_default=True, - type=click.Choice(["hostdev", "macvtap"]), - help="For SR-IOV NETs, the SR-IOV network device mode.", -) -@click.option( - "-l/-L", - "--live/--no-live", - "live_flag", - is_flag=True, - default=True, - help="Immediately live-attach device to VM [default] or disable this behaviour.", -) -@click.option( - "-r", - "--restart", - "restart_flag", - is_flag=True, - default=False, - help='Immediately restart VM to apply new config; implies "--no-live".', -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the VM restart.", -) -@cluster_req -def vm_network_add( - domain, - net, - macaddr, - model, - sriov_flag, - sriov_mode, - live_flag, - restart_flag, - confirm_flag, -): - """ - Add the network NET to the virtual machine DOMAIN. Networks are always addded to the end of the current list of networks in the virtual machine. - - NET may be a PVC network VNI, which is added as a bridged device, or a SR-IOV VF device connected in the given mode. - - NOTE: Adding a SR-IOV network device in the "hostdev" mode has the following caveats: - - 1. The VM will not be able to be live migrated; it must be shut down to migrate between nodes. The VM metadata will be updated to force this. - - 2. If an identical SR-IOV VF device is not present on the target node, post-migration startup will fail. It may be prudent to use a node limit here. - - """ - if restart_flag and live_flag: - live_flag = False - - if restart_flag and not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Restart VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - restart_flag = False - - retcode, retmsg = pvc_vm.vm_networks_add( - config, - domain, - net, - macaddr, - model, - sriov_flag, - sriov_mode, - live_flag, - restart_flag, - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm network remove -############################################################################### -@click.command(name="remove", short_help="Remove network from a virtual machine.") -@click.argument("domain") -@click.argument("net", required=False, default=None) -@click.option( - "-m", - "--mac-address", - "macaddr", - default=None, - help="Remove an interface with this MAC address; required if NET is unspecified.", -) -@click.option( - "-s", - "--sriov", - "sriov_flag", - is_flag=True, - default=False, - help="Identify that NET is an SR-IOV device name and not a VNI. Required for removing SR-IOV NETs.", -) -@click.option( - "-l/-L", - "--live/--no-live", - "live_flag", - is_flag=True, - default=True, - help="Immediately live-detach device to VM [default] or disable this behaviour.", -) -@click.option( - "-r", - "--restart", - "restart_flag", - is_flag=True, - default=False, - help='Immediately restart VM to apply new config; implies "--no-live".', -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the restart.", -) -@cluster_req -def vm_network_remove( - domain, net, macaddr, sriov_flag, live_flag, restart_flag, confirm_flag -): - """ - Remove the network NET from the virtual machine DOMAIN. - - NET may be a PVC network VNI, which is added as a bridged device, or a SR-IOV VF device connected in the given mode. - - NET is optional if the '-m'/'--mac-address' option is specified. If it is, then the specific device with that MAC address is removed instead. - - If multiple interfaces are present on the VM in network NET, and '-m'/'--mac-address' is not specified, then all interfaces in that network will be removed. - """ - if restart_flag and live_flag: - live_flag = False - - if restart_flag and not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Restart VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - restart_flag = False - - retcode, retmsg = pvc_vm.vm_networks_remove( - config, domain, net, macaddr, sriov_flag, live_flag, restart_flag - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm volume -############################################################################### -@click.group( - name="volume", - short_help="Manage attached volumes of a virtual machine.", - context_settings=CONTEXT_SETTINGS, -) -def vm_volume(): - """ - Manage the attached volumes of a virtual machine in the PVC cluster. - - Volume details cannot be modified here. To modify a volume, first remove it, then readd it with the correct settings. Unless the '-r'/'--reboot' flag is provided, this will not affect the running VM until it is restarted. - """ - pass - - -############################################################################### -# pvc vm volume get -############################################################################### -@click.command(name="get", short_help="Get the volumes of a virtual machine.") -@click.argument("domain") -@click.option( - "-r", - "--raw", - "raw", - is_flag=True, - default=False, - help="Display the raw values only without formatting.", -) -@cluster_req -def vm_volume_get(domain, raw): - """ - Get the volumes of the virtual machine DOMAIN. - """ - - retcode, retdata = pvc_vm.vm_volumes_get(config, domain) - if not raw: - retmsg = pvc_vm.format_vm_volumes(config, domain, retdata) - else: - volume_paths = list() - for volume in retdata: - volume_paths.append("{}:{}".format(volume[2], volume[0])) - retmsg = ",".join(volume_paths) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm volume add -############################################################################### -@click.command(name="add", short_help="Add volume to a virtual machine.") -@click.argument("domain") -@click.argument("volume") -@click.option( - "-d", - "--disk-id", - "disk_id", - default=None, - help="The disk ID in sdX/vdX/hdX format; if not specified, the next available will be used.", -) -@click.option( - "-b", - "--bus", - "bus", - default="scsi", - show_default=True, - type=click.Choice(["scsi", "ide", "usb", "virtio"]), - help="The bus to attach the disk to; must be present in the VM.", -) -@click.option( - "-t", - "--type", - "disk_type", - default="rbd", - show_default=True, - type=click.Choice(["rbd", "file"]), - help="The type of volume to add.", -) -@click.option( - "-l/-L", - "--live/--no-live", - "live_flag", - is_flag=True, - default=True, - help="Immediately live-attach device to VM [default] or disable this behaviour.", -) -@click.option( - "-r", - "--restart", - "restart_flag", - is_flag=True, - default=False, - help='Immediately restart VM to apply new config; implies "--no-live".', -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the restart", -) -@cluster_req -def vm_volume_add( - domain, volume, disk_id, bus, disk_type, live_flag, restart_flag, confirm_flag -): - """ - Add the volume VOLUME to the virtual machine DOMAIN. - - VOLUME may be either an absolute file path (for type 'file') or an RBD volume in the form "pool/volume" (for type 'rbd'). RBD volumes are verified against the cluster before adding and must exist. - """ - if restart_flag and live_flag: - live_flag = False - - if restart_flag and not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Restart VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - restart_flag = False - - retcode, retmsg = pvc_vm.vm_volumes_add( - config, domain, volume, disk_id, bus, disk_type, live_flag, restart_flag - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm volume remove -############################################################################### -@click.command(name="remove", short_help="Remove volume from a virtual machine.") -@click.argument("domain") -@click.argument("volume") -@click.option( - "-l/-L", - "--live/--no-live", - "live_flag", - is_flag=True, - default=True, - help="Immediately live-detach device to VM [default] or disable this behaviour.", -) -@click.option( - "-r", - "--restart", - "restart_flag", - is_flag=True, - default=False, - help='Immediately restart VM to apply new config; implies "--no-live".', -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the restart", -) -@cluster_req -def vm_volume_remove(domain, volume, live_flag, restart_flag, confirm_flag): - """ - Remove VOLUME from the virtual machine DOMAIN; VOLUME must be a file path or RBD path in 'pool/volume' format. - """ - if restart_flag and live_flag: - live_flag = False - - if restart_flag and not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Restart VM {}".format(domain), prompt_suffix="? ", abort=True - ) - except Exception: - restart_flag = False - - retcode, retmsg = pvc_vm.vm_volumes_remove( - config, domain, volume, live_flag, restart_flag - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm log -############################################################################### -@click.command(name="log", short_help="Show console logs of a VM object.") -@click.argument("domain") -@click.option( - "-l", - "--lines", - "lines", - default=None, - show_default=False, - help="Display this many log lines from the end of the log buffer. [default: 1000; with follow: 10]", -) -@click.option( - "-f", - "--follow", - "follow", - is_flag=True, - default=False, - help="Follow the log buffer; output may be delayed by a few seconds relative to the live system. The --lines value defaults to 10 for the initial output.", -) -@cluster_req -def vm_log(domain, lines, follow): - """ - Show console logs of virtual machine DOMAIN on its current node in a pager or continuously. DOMAIN may be a UUID or name. Note that migrating a VM to a different node will cause the log buffer to be overwritten by entries from the new node. - """ - - # Set the default here so we can handle it - if lines is None: - if follow: - lines = 10 - else: - lines = 1000 - - if follow: - retcode, retmsg = pvc_vm.follow_console_log(config, domain, lines) - else: - retcode, retmsg = pvc_vm.view_console_log(config, domain, lines) - click.echo_via_pager(retmsg) - retmsg = "" - cleanup(retcode, retmsg) - - -############################################################################### -# pvc vm info -############################################################################### -@click.command(name="info", short_help="Show details of a VM object.") -@click.argument("domain") -@click.option( - "-l", - "--long", - "long_output", - is_flag=True, - default=False, - help="Display more detailed information.", -) -@cluster_req -def vm_info(domain, long_output): - """ - Show information about virtual machine DOMAIN. DOMAIN may be a UUID or name. - """ - - retcode, retdata = pvc_vm.vm_info(config, domain) - if retcode: - retdata = pvc_vm.format_info(config, retdata, long_output) - cleanup(retcode, retdata) - - -############################################################################### -# pvc vm dump -############################################################################### -@click.command(name="dump", short_help="Dump a virtual machine XML to stdout.") -@click.option( - "-f", - "--file", - "filename", - default=None, - type=click.File(mode="w"), - help="Write VM XML to this file.", -) -@click.argument("domain") -@cluster_req -def vm_dump(filename, domain): - """ - Dump the Libvirt XML definition of virtual machine DOMAIN to stdout. DOMAIN may be a UUID or name. - """ - - retcode, retdata = pvc_vm.vm_info(config, domain) - if not retcode or not retdata.get("name", None): - cleanup(False, 'ERROR: Could not find VM "{}"!'.format(domain)) - - current_vm_cfg_raw = retdata.get("xml") - xml_data = etree.fromstring(current_vm_cfg_raw) - current_vm_cfgfile = etree.tostring(xml_data, pretty_print=True).decode("utf8") - xml = current_vm_cfgfile.strip() - - if filename is not None: - filename.write(xml) - cleanup(retcode, 'VM XML written to "{}".'.format(filename.name)) - else: - cleanup(retcode, xml) - - -############################################################################### -# pvc vm list -############################################################################### -@click.command(name="list", short_help="List all VM objects.") -@click.argument("limit", default=None, required=False) -@click.option( - "-t", - "--target", - "target_node", - default=None, - help="Limit list to VMs on the specified node.", -) -@click.option( - "-s", - "--state", - "target_state", - default=None, - help="Limit list to VMs in the specified state.", -) -@click.option( - "-g", - "--tag", - "target_tag", - default=None, - help="Limit list to VMs with the specified tag.", -) -@click.option( - "-r", - "--raw", - "raw", - is_flag=True, - default=False, - help="Display the raw list of VM names only.", -) -@click.option( - "-n", - "--negate", - "negate", - is_flag=True, - default=False, - help="Negate the specified node, state, or tag limit(s).", -) -@cluster_req -def vm_list(target_node, target_state, target_tag, limit, raw, negate): - """ - List all virtual machines; optionally only match names or full UUIDs matching regex LIMIT. - - NOTE: Red-coloured network lists indicate one or more configured networks are missing/invalid. - """ - - retcode, retdata = pvc_vm.vm_list( - config, limit, target_node, target_state, target_tag, negate - ) - if retcode: - retdata = pvc_vm.format_list(config, retdata, raw) - else: - if raw: - retdata = "" - cleanup(retcode, retdata) - - -############################################################################### -# pvc network -############################################################################### -@click.group( - name="network", - short_help="Manage a PVC virtual network.", - context_settings=CONTEXT_SETTINGS, -) -def cli_network(): - """ - Manage the state of a network in the PVC cluster. - """ - pass - - -############################################################################### -# pvc network add -############################################################################### -@click.command(name="add", short_help="Add a new virtual network.") -@click.option( - "-d", - "--description", - "description", - required=True, - help="Description of the network; must be unique and not contain whitespace.", -) -@click.option( - "-p", - "--type", - "nettype", - required=True, - type=click.Choice(["managed", "bridged"]), - help="Network type; managed networks control IP addressing; bridged networks are simple vLAN bridges. All subsequent options are unused for bridged networks.", -) -@click.option("-m", "--mtu", "mtu", default="", help="MTU of the network interfaces.") -@click.option( - "-n", "--domain", "domain", default=None, help="Domain name of the network." -) -@click.option( - "--dns-server", - "name_servers", - multiple=True, - help="DNS nameserver for network; multiple entries may be specified.", -) -@click.option( - "-i", - "--ipnet", - "ip_network", - default=None, - help="CIDR-format IPv4 network address for subnet.", -) -@click.option( - "-i6", - "--ipnet6", - "ip6_network", - default=None, - help='CIDR-format IPv6 network address for subnet; should be /64 or larger ending "::/YY".', -) -@click.option( - "-g", - "--gateway", - "ip_gateway", - default=None, - help="Default IPv4 gateway address for subnet.", -) -@click.option( - "-g6", - "--gateway6", - "ip6_gateway", - default=None, - help='Default IPv6 gateway address for subnet. [default: "X::1"]', -) -@click.option( - "--dhcp/--no-dhcp", - "dhcp_flag", - is_flag=True, - default=False, - help="Enable/disable IPv4 DHCP for clients on subnet.", -) -@click.option( - "--dhcp-start", "dhcp_start", default=None, help="IPv4 DHCP range start address." -) -@click.option( - "--dhcp-end", "dhcp_end", default=None, help="IPv4 DHCP range end address." -) -@click.argument("vni") -@cluster_req -def net_add( - vni, - description, - nettype, - mtu, - domain, - ip_network, - ip_gateway, - ip6_network, - ip6_gateway, - dhcp_flag, - dhcp_start, - dhcp_end, - name_servers, -): - """ - Add a new virtual network with VXLAN identifier VNI. - - NOTE: The MTU must be equal to, or less than, the underlying device MTU (either the node 'bridge_mtu' for bridged networks, or the node 'cluster_mtu' minus 50 for managed networks). Is only required if the device MTU should be lower than the underlying physical device MTU for compatibility. If unset, defaults to the underlying device MTU which will be set explcitly when the network is added to the nodes. - - Examples: - - pvc network add 101 --description my-bridged-net --type bridged - - > Creates vLAN 101 and a simple bridge on the VNI dev interface. - - pvc network add 1001 --description my-managed-net --type managed --domain test.local --ipnet 10.1.1.0/24 --gateway 10.1.1.1 - - > Creates a VXLAN with ID 1001 on the VNI dev interface, with IPv4 managed networking. - - IPv6 is fully supported with --ipnet6 and --gateway6 in addition to or instead of IPv4. PVC will configure DHCPv6 in a semi-managed configuration for the network if set. - """ - - retcode, retmsg = pvc_network.net_add( - config, - vni, - description, - nettype, - mtu, - domain, - name_servers, - ip_network, - ip_gateway, - ip6_network, - ip6_gateway, - dhcp_flag, - dhcp_start, - dhcp_end, - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc network modify -############################################################################### -@click.command(name="modify", short_help="Modify an existing virtual network.") -@click.option( - "-d", - "--description", - "description", - default=None, - help="Description of the network; must be unique and not contain whitespace.", -) -@click.option("-m", "--mtu", "mtu", default=None, help="MTU of the network interfaces.") -@click.option( - "-n", "--domain", "domain", default=None, help="Domain name of the network." -) -@click.option( - "--dns-server", - "name_servers", - multiple=True, - help="DNS nameserver for network; multiple entries may be specified (will overwrite all previous entries).", -) -@click.option( - "-i", - "--ipnet", - "ip4_network", - default=None, - help='CIDR-format IPv4 network address for subnet; disable with "".', -) -@click.option( - "-i6", - "--ipnet6", - "ip6_network", - default=None, - help='CIDR-format IPv6 network address for subnet; disable with "".', -) -@click.option( - "-g", - "--gateway", - "ip4_gateway", - default=None, - help='Default IPv4 gateway address for subnet; disable with "".', -) -@click.option( - "-g6", - "--gateway6", - "ip6_gateway", - default=None, - help='Default IPv6 gateway address for subnet; disable with "".', -) -@click.option( - "--dhcp/--no-dhcp", - "dhcp_flag", - is_flag=True, - default=None, - help="Enable/disable DHCPv4 for clients on subnet (DHCPv6 is always enabled if DHCPv6 network is set).", -) -@click.option( - "--dhcp-start", "dhcp_start", default=None, help="DHCPvr range start address." -) -@click.option("--dhcp-end", "dhcp_end", default=None, help="DHCPv4 range end address.") -@click.argument("vni") -@cluster_req -def net_modify( - vni, - description, - mtu, - domain, - name_servers, - ip6_network, - ip6_gateway, - ip4_network, - ip4_gateway, - dhcp_flag, - dhcp_start, - dhcp_end, -): - """ - Modify details of virtual network VNI. All fields optional; only specified fields will be updated. - - NOTE: The MTU must be equal to, or less than, the underlying device MTU (either the node 'bridge_mtu' for bridged networks, or the node 'cluster_mtu' minus 50 for managed networks). Is only required if the device MTU should be lower than the underlying physical device MTU for compatibility. To reset an explicit MTU to the default underlying device MTU, specify '--mtu' with a quoted empty string argument. - - Example: - - pvc network modify 1001 --gateway 10.1.1.1 --dhcp - """ - - retcode, retmsg = pvc_network.net_modify( - config, - vni, - description, - mtu, - domain, - name_servers, - ip4_network, - ip4_gateway, - ip6_network, - ip6_gateway, - dhcp_flag, - dhcp_start, - dhcp_end, - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc network remove -############################################################################### -@click.command(name="remove", short_help="Remove a virtual network.") -@click.argument("net") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def net_remove(net, confirm_flag): - """ - Remove an existing virtual network NET; NET must be a VNI. - - WARNING: PVC does not verify whether clients are still present in this network. Before removing, ensure - that all client VMs have been removed from the network or undefined behaviour may occur. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove network {}".format(net), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_network.net_remove(config, net) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc network info -############################################################################### -@click.command(name="info", short_help="Show details of a network.") -@click.argument("vni") -@click.option( - "-l", - "--long", - "long_output", - is_flag=True, - default=False, - help="Display more detailed information.", -) -@cluster_req -def net_info(vni, long_output): - """ - Show information about virtual network VNI. - """ - - retcode, retdata = pvc_network.net_info(config, vni) - if retcode: - retdata = pvc_network.format_info(config, retdata, long_output) - cleanup(retcode, retdata) - - -############################################################################### -# pvc network list -############################################################################### -@click.command(name="list", short_help="List all VM objects.") -@click.argument("limit", default=None, required=False) -@cluster_req -def net_list(limit): - """ - List all virtual networks; optionally only match VNIs or Descriptions matching regex LIMIT. - """ - - retcode, retdata = pvc_network.net_list(config, limit) - if retcode: - retdata = pvc_network.format_list(config, retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc network dhcp -############################################################################### -@click.group( - name="dhcp", - short_help="Manage IPv4 DHCP leases in a PVC virtual network.", - context_settings=CONTEXT_SETTINGS, -) -def net_dhcp(): - """ - Manage host IPv4 DHCP leases of a VXLAN network in the PVC cluster. - """ - pass - - -############################################################################### -# pvc network dhcp add -############################################################################### -@click.command(name="add", short_help="Add a DHCP static reservation.") -@click.argument("net") -@click.argument("ipaddr") -@click.argument("hostname") -@click.argument("macaddr") -@cluster_req -def net_dhcp_add(net, ipaddr, macaddr, hostname): - """ - Add a new DHCP static reservation of IP address IPADDR with hostname HOSTNAME for MAC address MACADDR to virtual network NET; NET must be a VNI. - """ - - retcode, retmsg = pvc_network.net_dhcp_add(config, net, ipaddr, macaddr, hostname) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc network dhcp remove -############################################################################### -@click.command(name="remove", short_help="Remove a DHCP static reservation.") -@click.argument("net") -@click.argument("macaddr") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def net_dhcp_remove(net, macaddr, confirm_flag): - """ - Remove a DHCP lease for MACADDR from virtual network NET; NET must be a VNI. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove DHCP lease for {} in network {}".format(macaddr, net), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_network.net_dhcp_remove(config, net, macaddr) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc network dhcp list -############################################################################### -@click.command(name="list", short_help="List active DHCP leases.") -@click.argument("net") -@click.argument("limit", default=None, required=False) -@click.option( - "-s", - "--static", - "only_static", - is_flag=True, - default=False, - help="Show only static leases.", -) -@cluster_req -def net_dhcp_list(net, limit, only_static): - """ - List all DHCP leases in virtual network NET; optionally only match elements matching regex LIMIT; NET must be a VNI. - """ - - retcode, retdata = pvc_network.net_dhcp_list(config, net, limit, only_static) - if retcode: - retdata = pvc_network.format_list_dhcp(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc network acl -############################################################################### -@click.group( - name="acl", - short_help="Manage a PVC virtual network firewall ACL rule.", - context_settings=CONTEXT_SETTINGS, -) -def net_acl(): - """ - Manage firewall ACLs of a VXLAN network in the PVC cluster. - """ - pass - - -############################################################################### -# pvc network acl add -############################################################################### -@click.command(name="add", short_help="Add firewall ACL.") -@click.option( - "--in/--out", - "direction", - is_flag=True, - default=True, # inbound - help="Inbound or outbound ruleset.", -) -@click.option( - "-d", - "--description", - "description", - required=True, - help="Description of the ACL; must be unique and not contain whitespace.", -) -@click.option("-r", "--rule", "rule", required=True, help="NFT firewall rule.") -@click.option( - "-o", - "--order", - "order", - default=None, - help='Order of rule in the chain (see "list"); defaults to last.', -) -@click.argument("net") -@cluster_req -def net_acl_add(net, direction, description, rule, order): - """ - Add a new NFT firewall rule to network NET; the rule is a literal NFT rule belonging to the forward table for the client network; NET must be a VNI. - - NOTE: All client networks are default-allow in both directions; deny rules MUST be added here at the end of the sequence for a default-deny setup. - - NOTE: Ordering places the rule at the specified ID, not before it; the old rule of that ID and all subsequent rules will be moved down. - - NOTE: Descriptions are used as names, and must be unique within a network (both directions). - - Example: - - pvc network acl add 1001 --in --rule "tcp dport 22 ct state new accept" --description "ssh-in" --order 3 - """ - if direction: - direction = "in" - else: - direction = "out" - - retcode, retmsg = pvc_network.net_acl_add( - config, net, direction, description, rule, order - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc network acl remove -############################################################################### -@click.command(name="remove", short_help="Remove firewall ACL.") -@click.argument("net") -@click.argument( - "rule", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def net_acl_remove(net, rule, confirm_flag): - """ - Remove an NFT firewall rule RULE from network NET; RULE must be a description; NET must be a VNI. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove ACL {} in network {}".format(rule, net), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_network.net_acl_remove(config, net, rule) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc network acl list -############################################################################### -@click.command(name="list", short_help="List firewall ACLs.") -@click.option( - "--in/--out", - "direction", - is_flag=True, - required=False, - default=None, - help="Inbound or outbound rule set only.", -) -@click.argument("net") -@click.argument("limit", default=None, required=False) -@cluster_req -def net_acl_list(net, limit, direction): - """ - List all NFT firewall rules in network NET; optionally only match elements matching description regex LIMIT; NET can be either a VNI or description. - """ - if direction is not None: - if direction: - direction = "in" - else: - direction = "out" - - retcode, retdata = pvc_network.net_acl_list(config, net, limit, direction) - if retcode: - retdata = pvc_network.format_list_acl(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc network sriov -############################################################################### -@click.group( - name="sriov", - short_help="Manage SR-IOV network resources.", - context_settings=CONTEXT_SETTINGS, -) -def net_sriov(): - """ - Manage SR-IOV network resources on nodes (PFs and VFs). - """ - pass - - -############################################################################### -# pvc network sriov pf -############################################################################### -@click.group( - name="pf", short_help="Manage PF devices.", context_settings=CONTEXT_SETTINGS -) -def net_sriov_pf(): - """ - Manage SR-IOV PF devices on nodes. - """ - pass - - -############################################################################### -# pvc network sriov pf list -############################################################################### -@click.command(name="list", short_help="List PF devices.") -@click.argument("node") -@cluster_req -def net_sriov_pf_list(node): - """ - List all SR-IOV PFs on NODE. - """ - retcode, retdata = pvc_network.net_sriov_pf_list(config, node) - if retcode: - retdata = pvc_network.format_list_sriov_pf(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc network sriov vf -############################################################################### -@click.group( - name="vf", short_help="Manage VF devices.", context_settings=CONTEXT_SETTINGS -) -def net_sriov_vf(): - """ - Manage SR-IOV VF devices on nodes. - """ - pass - - -############################################################################### -# pvc network sriov vf set -############################################################################### -@click.command(name="set", short_help="Set VF device properties.") -@click.option( - "--vlan-id", - "vlan_id", - default=None, - show_default=False, - help="The vLAN ID for vLAN tagging.", -) -@click.option( - "--qos-prio", - "vlan_qos", - default=None, - show_default=False, - help="The vLAN QOS priority.", -) -@click.option( - "--tx-min", - "tx_rate_min", - default=None, - show_default=False, - help="The minimum TX rate.", -) -@click.option( - "--tx-max", - "tx_rate_max", - default=None, - show_default=False, - help="The maximum TX rate.", -) -@click.option( - "--link-state", - "link_state", - default=None, - show_default=False, - type=click.Choice(["auto", "enable", "disable"]), - help="The administrative link state.", -) -@click.option( - "--spoof-check/--no-spoof-check", - "spoof_check", - is_flag=True, - default=None, - show_default=False, - help="Enable or disable spoof checking.", -) -@click.option( - "--trust/--no-trust", - "trust", - is_flag=True, - default=None, - show_default=False, - help="Enable or disable VF user trust.", -) -@click.option( - "--query-rss/--no-query-rss", - "query_rss", - is_flag=True, - default=None, - show_default=False, - help="Enable or disable query RSS support.", -) -@click.argument("node") -@click.argument("vf") -@cluster_req -def net_sriov_vf_set( - node, - vf, - vlan_id, - vlan_qos, - tx_rate_min, - tx_rate_max, - link_state, - spoof_check, - trust, - query_rss, -): - """ - Set a property of SR-IOV VF on NODE. - """ - if ( - vlan_id is None - and vlan_qos is None - and tx_rate_min is None - and tx_rate_max is None - and link_state is None - and spoof_check is None - and trust is None - and query_rss is None - ): - cleanup( - False, "At least one configuration property must be specified to update." - ) - - retcode, retmsg = pvc_network.net_sriov_vf_set( - config, - node, - vf, - vlan_id, - vlan_qos, - tx_rate_min, - tx_rate_max, - link_state, - spoof_check, - trust, - query_rss, - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc network sriov vf list -############################################################################### -@click.command(name="list", short_help="List VF devices.") -@click.argument("node") -@click.argument("pf", default=None, required=False) -@cluster_req -def net_sriov_vf_list(node, pf): - """ - List all SR-IOV VFs on NODE, optionally limited to device PF. - """ - retcode, retdata = pvc_network.net_sriov_vf_list(config, node, pf) - if retcode: - retdata = pvc_network.format_list_sriov_vf(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc network sriov vf info -############################################################################### -@click.command(name="info", short_help="List VF devices.") -@click.argument("node") -@click.argument("vf") -@cluster_req -def net_sriov_vf_info(node, vf): - """ - Show details of the SR-IOV VF on NODE. - """ - retcode, retdata = pvc_network.net_sriov_vf_info(config, node, vf) - if retcode: - retdata = pvc_network.format_info_sriov_vf(config, retdata, node) - cleanup(retcode, retdata) - - -############################################################################### -# pvc storage -############################################################################### -# Note: The prefix `storage` allows future potential storage subsystems. -# Since Ceph is the only section not abstracted by PVC directly -# (i.e. it references Ceph-specific concepts), this makes more -# sense in the long-term. -############################################################################### -@click.group( - name="storage", - short_help="Manage the PVC storage cluster.", - context_settings=CONTEXT_SETTINGS, -) -def cli_storage(): - """ - Manage the storage of the PVC cluster. - """ - pass - - -############################################################################### -# pvc storage status -############################################################################### -@click.command(name="status", short_help="Show storage cluster status.") -@cluster_req -def ceph_status(): - """ - Show detailed status of the storage cluster. - """ - - retcode, retdata = pvc_ceph.ceph_status(config) - if retcode: - retdata = pvc_ceph.format_raw_output(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc storage util -############################################################################### -@click.command(name="util", short_help="Show storage cluster utilization.") -@cluster_req -def ceph_util(): - """ - Show utilization of the storage cluster. - """ - - retcode, retdata = pvc_ceph.ceph_util(config) - if retcode: - retdata = pvc_ceph.format_raw_output(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc storage benchmark -############################################################################### -@click.group(name="benchmark", short_help="Run or view cluster storage benchmarks.") -@cluster_req -def ceph_benchmark(): - """ - Run or view benchmarks of the storage cluster. - """ - pass - - -############################################################################### -# pvc storage benchmark run -############################################################################### -@click.command(name="run", short_help="Run a storage benchmark.") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the run", -) -@click.argument("pool") -@cluster_req -def ceph_benchmark_run(confirm_flag, pool): - """ - Run a storage benchmark on POOL in the background. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "NOTE: Storage benchmarks take approximately 10 minutes to run and generate significant load on the cluster; they should be run sparingly. Continue", - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_ceph.ceph_benchmark_run(config, pool) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage benchmark info -############################################################################### -@click.command(name="info", short_help="Show detailed storage benchmark results.") -@click.argument("job", required=True) -@click.option( - "-f", - "--format", - "oformat", - default="summary", - show_default=True, - type=click.Choice(["summary", "json", "json-pretty"]), - help="Output format of benchmark information.", -) -@cluster_req -def ceph_benchmark_info(job, oformat): - """ - Show full details of storage benchmark JOB. - """ - - retcode, retdata = pvc_ceph.ceph_benchmark_list(config, job) - if retcode: - retdata = pvc_ceph.format_info_benchmark(config, oformat, retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc storage benchmark list -############################################################################### -@click.command(name="list", short_help="List storage benchmark results.") -@click.argument("job", default=None, required=False) -@cluster_req -def ceph_benchmark_list(job): - """ - List all Ceph storage benchmarks; optionally only match JOB. - """ - - retcode, retdata = pvc_ceph.ceph_benchmark_list(config, job) - if retcode: - retdata = pvc_ceph.format_list_benchmark(config, retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc storage osd -############################################################################### -@click.group( - name="osd", - short_help="Manage OSDs in the PVC storage cluster.", - context_settings=CONTEXT_SETTINGS, -) -def ceph_osd(): - """ - Manage the Ceph OSDs of the PVC cluster. - """ - pass - - -############################################################################### -# pvc storage osd create-db-vg -############################################################################### -@click.command(name="create-db-vg", short_help="Create new OSD database volume group.") -@click.argument("node") -@click.argument("device") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the creation.", -) -@cluster_req -def ceph_osd_create_db_vg(node, device, confirm_flag): - """ - Create a new Ceph OSD database volume group on node NODE with block device DEVICE. DEVICE must be a valid raw block device (e.g. '/dev/nvme0n1', '/dev/disk/by-path/...') or a "detect" string. Using partitions is not supported. - - A "detect" string is a string in the form "detect:::". Detect strings allow for automatic determination of Linux block device paths from known basic information about disks by leveraging "lsscsi" on the target host. The "NAME" should be some descriptive identifier, for instance the manufacturer (e.g. "INTEL"), the "HUMAN-SIZE" should be the labeled human-readable size of the device (e.g. "480GB", "1.92TB"), and "ID" specifies the Nth 0-indexed device which matches the "NAME" and "HUMAN-SIZE" values (e.g. "2" would match the third device with the corresponding "NAME" and "HUMAN-SIZE"). When matching against sizes, there is +/- 3% flexibility to account for base-1000 vs. base-1024 differences and rounding errors. The "NAME" may contain whitespace but if so the entire detect string should be quoted, and is case-insensitive. More information about detect strings can be found in the pvcbootstrapd manual. - - This volume group will be used for Ceph OSD database and WAL functionality if the '--ext-db' flag is passed to newly-created OSDs during 'pvc storage osd add'. DEVICE should be an extremely fast SSD device (NVMe, Intel Optane, etc.) which is significantly faster than the normal OSD disks and with very high write endurance. Only one OSD database volume group on a single physical device is supported per node, so it must be fast and large enough to act as an effective OSD database device for all OSDs on the node. Attempting to add additional database volume groups after the first will fail. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Destroy all data and create a new OSD database volume group on {}:{}".format( - node, device - ), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_ceph.ceph_osd_db_vg_add(config, node, device) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage osd add -############################################################################### -@click.command(name="add", short_help="Add new OSD.") -@click.argument("node") -@click.argument("device") -@click.option( - "-w", - "--weight", - "weight", - default=1.0, - show_default=True, - help="Weight of the OSD within the CRUSH map.", -) -@click.option( - "-d", - "--ext-db", - "ext_db_flag", - is_flag=True, - default=False, - help="Use an external database logical volume for this OSD.", -) -@click.option( - "-r", - "--ext-db-ratio", - "ext_db_ratio", - default=0.05, - show_default=True, - type=float, - help="Decimal ratio of the external database logical volume to the OSD size.", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the creation.", -) -@cluster_req -def ceph_osd_add(node, device, weight, ext_db_flag, ext_db_ratio, confirm_flag): - """ - Add a new Ceph OSD on node NODE with block device DEVICE. DEVICE must be a valid raw block device (e.g. '/dev/sda', '/dev/nvme0n1', '/dev/disk/by-path/...', '/dev/disk/by-id/...') or a "detect" string. Using partitions is not supported. - - A "detect" string is a string in the form "detect:::". Detect strings allow for automatic determination of Linux block device paths from known basic information about disks by leveraging "lsscsi" on the target host. The "NAME" should be some descriptive identifier, for instance the manufacturer (e.g. "INTEL"), the "HUMAN-SIZE" should be the labeled human-readable size of the device (e.g. "480GB", "1.92TB"), and "ID" specifies the Nth 0-indexed device which matches the "NAME" and "HUMAN-SIZE" values (e.g. "2" would match the third device with the corresponding "NAME" and "HUMAN-SIZE"). When matching against sizes, there is +/- 3% flexibility to account for base-1000 vs. base-1024 differences and rounding errors. The "NAME" may contain whitespace but if so the entire detect string should be quoted, and is case-insensitive. More information about detect strings can be found in the pvcbootstrapd manual. - - The weight of an OSD should reflect the ratio of the OSD to other OSDs in the storage cluster. For example, if all OSDs are the same size as recommended for PVC, 1 (the default) is a valid weight so that all are treated identically. If a new OSD is added later which is 4x the size of the existing OSDs, the new OSD's weight should then be 4 to tell the cluster that 4x the data can be stored on the OSD. Weights can also be tweaked for performance reasons, since OSDs with more data will incur more I/O load. For more information about CRUSH weights, please see the Ceph documentation. - - If '--ext-db' is specified, the OSD database and WAL will be placed on a new logical volume in NODE's OSD database volume group; it must exist or OSD creation will fail. See the 'pvc storage osd create-db-vg' command for more details. - - The default '--ext-db-ratio' of 0.05 (5%) is sufficient for most RBD workloads and OSD sizes, though this can be adjusted based on the sizes of the OSD(s) and the underlying database device. Ceph documentation recommends at least 0.02 (2%) for RBD use-cases, and higher values may improve WAL performance under write-heavy workloads with fewer OSDs per node. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Destroy all data and create a new OSD on {}:{}".format(node, device), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_ceph.ceph_osd_add( - config, node, device, weight, ext_db_flag, ext_db_ratio - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage osd replace -############################################################################### -@click.command(name="replace", short_help="Replace OSD block device.") -@click.argument("osdid") -@click.argument("device") -@click.option( - "-w", - "--weight", - "weight", - default=1.0, - show_default=True, - help="New weight of the OSD within the CRUSH map.", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def ceph_osd_replace(osdid, device, weight, confirm_flag): - """ - Replace the block device of an existing OSD with ID OSDID with DEVICE. Use this command to replace a failed or smaller OSD block device with a new one. - - DEVICE must be a valid raw block device (e.g. '/dev/sda', '/dev/nvme0n1', '/dev/disk/by-path/...', '/dev/disk/by-id/...') or a "detect" string. Using partitions is not supported. A "detect" string is a string in the form "detect:::". For details, see 'pvc storage osd add --help'. - - The weight of an OSD should reflect the ratio of the OSD to other OSDs in the storage cluster. For details, see 'pvc storage osd add --help'. Note that the current weight must be explicitly specified if it differs from the default. - - Existing IDs, external DB devices, etc. of the OSD will be preserved; data will be lost and rebuilt from the remaining healthy OSDs. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Replace OSD {} with block device {}".format(osdid, device), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_ceph.ceph_osd_replace(config, osdid, device, weight) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage osd refresh -############################################################################### -@click.command(name="refresh", short_help="Refresh (reimport) OSD device.") -@click.argument("osdid") -@click.argument("device") -@cluster_req -def ceph_osd_refresh(osdid, device): - """ - Refresh (reimport) the block DEVICE of an existing OSD with ID OSDID. Use this command to reimport a working OSD into a rebuilt/replaced node. - - DEVICE must be a valid raw block device (e.g. '/dev/sda', '/dev/nvme0n1', '/dev/disk/by-path/...', '/dev/disk/by-id/...') or a "detect" string. Using partitions is not supported. A "detect" string is a string in the form "detect:::". For details, see 'pvc storage osd add --help'. - - Existing data, IDs, weights, etc. of the OSD will be preserved. - - NOTE: If a device had an external DB device, this is not automatically handled at this time. It is best to remove and re-add the OSD instead. - """ - retcode, retmsg = pvc_ceph.ceph_osd_refresh(config, osdid, device) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage osd remove -############################################################################### -@click.command(name="remove", short_help="Remove OSD.") -@click.argument("osdid") -@click.option( - "-f", - "--force", - "force_flag", - is_flag=True, - default=False, - help="Force removal even if steps fail", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def ceph_osd_remove(osdid, force_flag, confirm_flag): - """ - Remove a Ceph OSD with ID OSDID. - - DANGER: This will completely remove the OSD from the cluster. OSDs will rebalance which will negatively affect performance and available space. It is STRONGLY RECOMMENDED to set an OSD out (using 'pvc storage osd out') and allow the cluster to fully rebalance (verified with 'pvc storage status') before removing an OSD. - - NOTE: The "-f"/"--force" option is useful after replacing a failed node, to ensure the OSD is removed even if the OSD in question does not properly exist on the node after a rebuild. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm("Remove OSD {}".format(osdid), prompt_suffix="? ", abort=True) - except Exception: - exit(0) - - retcode, retmsg = pvc_ceph.ceph_osd_remove(config, osdid, force_flag) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage osd in -############################################################################### -@click.command(name="in", short_help="Online OSD.") -@click.argument("osdid") -@cluster_req -def ceph_osd_in(osdid): - """ - Set a Ceph OSD with ID OSDID online. - """ - - retcode, retmsg = pvc_ceph.ceph_osd_state(config, osdid, "in") - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage osd out -############################################################################### -@click.command(name="out", short_help="Offline OSD.") -@click.argument("osdid") -@cluster_req -def ceph_osd_out(osdid): - """ - Set a Ceph OSD with ID OSDID offline. - """ - - retcode, retmsg = pvc_ceph.ceph_osd_state(config, osdid, "out") - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage osd set -############################################################################### -@click.command(name="set", short_help="Set property.") -@click.argument("osd_property") -@cluster_req -def ceph_osd_set(osd_property): - """ - Set a Ceph OSD property OSD_PROPERTY on the cluster. - - Valid properties are: - - full|pause|noup|nodown|noout|noin|nobackfill|norebalance|norecover|noscrub|nodeep-scrub|notieragent|sortbitwise|recovery_deletes|require_jewel_osds|require_kraken_osds - """ - - retcode, retmsg = pvc_ceph.ceph_osd_option(config, osd_property, "set") - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage osd unset -############################################################################### -@click.command(name="unset", short_help="Unset property.") -@click.argument("osd_property") -@cluster_req -def ceph_osd_unset(osd_property): - """ - Unset a Ceph OSD property OSD_PROPERTY on the cluster. - - Valid properties are: - - full|pause|noup|nodown|noout|noin|nobackfill|norebalance|norecover|noscrub|nodeep-scrub|notieragent|sortbitwise|recovery_deletes|require_jewel_osds|require_kraken_osds - """ - - retcode, retmsg = pvc_ceph.ceph_osd_option(config, osd_property, "unset") - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage osd list -############################################################################### -@click.command(name="list", short_help="List cluster OSDs.") -@click.argument("limit", default=None, required=False) -@cluster_req -def ceph_osd_list(limit): - """ - List all Ceph OSDs; optionally only match elements matching ID regex LIMIT. - """ - - retcode, retdata = pvc_ceph.ceph_osd_list(config, limit) - if retcode: - retdata = pvc_ceph.format_list_osd(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc storage pool -############################################################################### -@click.group( - name="pool", - short_help="Manage RBD pools in the PVC storage cluster.", - context_settings=CONTEXT_SETTINGS, -) -def ceph_pool(): - """ - Manage the Ceph RBD pools of the PVC cluster. - """ - pass - - -############################################################################### -# pvc storage pool add -############################################################################### -@click.command(name="add", short_help="Add new RBD pool.") -@click.argument("name") -@click.argument("pgs") -@click.option( - "-t", - "--tier", - "tier", - default="default", - show_default=True, - type=click.Choice(["default", "hdd", "ssd", "nvme"]), - help=""" - The device tier to limit the pool to. Default is all OSD tiers, and specific tiers can be specified instead. At least one full set of OSDs for a given tier must be present for the tier to be specified, or the pool creation will fail. - """, -) -@click.option( - "--replcfg", - "replcfg", - default="copies=3,mincopies=2", - show_default=True, - required=False, - help=""" - The replication configuration, specifying both a "copies" and "mincopies" value, separated by a comma, e.g. "copies=3,mincopies=2". The "copies" value specifies the total number of replicas and should not exceed the total number of nodes; the "mincopies" value specifies the minimum number of available copies to allow writes. For additional details please see the Cluster Architecture documentation. - """, -) -@cluster_req -def ceph_pool_add(name, pgs, tier, replcfg): - """ - Add a new Ceph RBD pool with name NAME and PGS placement groups. - - The placement group count must be a non-zero power of 2. - """ - - retcode, retmsg = pvc_ceph.ceph_pool_add(config, name, pgs, replcfg, tier) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage pool remove -############################################################################### -@click.command(name="remove", short_help="Remove RBD pool.") -@click.argument("name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def ceph_pool_remove(name, confirm_flag): - """ - Remove a Ceph RBD pool with name NAME and all volumes on it. - - DANGER: This will completely remove the pool and all volumes contained in it from the cluster. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove RBD pool {}".format(name), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_ceph.ceph_pool_remove(config, name) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage pool set-pgs -############################################################################### -@click.command(name="set-pgs", short_help="Set PGs of an RBD pool.") -@click.argument("name") -@click.argument("pgs") -@cluster_req -def ceph_pool_set_pgs(name, pgs): - """ - Set the placement groups (PGs) count for the pool NAME to PGS. - - The placement group count must be a non-zero power of 2. - - Placement group counts may be increased or decreased as required though frequent alteration is not recommended. - """ - - retcode, retmsg = pvc_ceph.ceph_pool_set_pgs(config, name, pgs) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage pool list -############################################################################### -@click.command(name="list", short_help="List cluster RBD pools.") -@click.argument("limit", default=None, required=False) -@cluster_req -def ceph_pool_list(limit): - """ - List all Ceph RBD pools; optionally only match elements matching name regex LIMIT. - """ - - retcode, retdata = pvc_ceph.ceph_pool_list(config, limit) - if retcode: - retdata = pvc_ceph.format_list_pool(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc storage volume -############################################################################### -@click.group( - name="volume", - short_help="Manage RBD volumes in the PVC storage cluster.", - context_settings=CONTEXT_SETTINGS, -) -def ceph_volume(): - """ - Manage the Ceph RBD volumes of the PVC cluster. - """ - pass - - -############################################################################### -# pvc storage volume add -############################################################################### -@click.command(name="add", short_help="Add new RBD volume.") -@click.argument("pool") -@click.argument("name") -@click.argument("size") -@cluster_req -def ceph_volume_add(pool, name, size): - """ - Add a new Ceph RBD volume with name NAME and size SIZE [in human units, e.g. 1024M, 20G, etc.] to pool POOL. - """ - - retcode, retmsg = pvc_ceph.ceph_volume_add(config, pool, name, size) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage volume upload -############################################################################### -@click.command(name="upload", short_help="Upload a local image file to RBD volume.") -@click.argument("pool") -@click.argument("name") -@click.argument("image_file") -@click.option( - "-f", - "--format", - "image_format", - default="raw", - show_default=True, - help="The format of the source image.", -) -@cluster_req -def ceph_volume_upload(pool, name, image_format, image_file): - """ - Upload a disk image file IMAGE_FILE to the RBD volume NAME in pool POOL. - - The volume NAME must exist in the pool before uploading to it, and must be large enough to fit the disk image in raw format. - - If the image format is "raw", the image is uploaded directly to the target volume without modification. Otherwise, it will be converted into raw format by "qemu-img convert" on the remote side before writing using a temporary volume. The image format must be a valid format recognized by "qemu-img", such as "vmdk" or "qcow2". - """ - - if not os.path.exists(image_file): - echo("ERROR: File '{}' does not exist!".format(image_file)) - exit(1) - - retcode, retmsg = pvc_ceph.ceph_volume_upload( - config, pool, name, image_format, image_file - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage volume remove -############################################################################### -@click.command(name="remove", short_help="Remove RBD volume.") -@click.argument("pool") -@click.argument("name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def ceph_volume_remove(pool, name, confirm_flag): - """ - Remove a Ceph RBD volume with name NAME from pool POOL. - - DANGER: This will completely remove the volume and all data contained in it. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove volume {}/{}".format(pool, name), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_ceph.ceph_volume_remove(config, pool, name) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage volume resize -############################################################################### -@click.command(name="resize", short_help="Resize RBD volume.") -@click.argument("pool") -@click.argument("name") -@click.argument("size") -@cluster_req -def ceph_volume_resize(pool, name, size): - """ - Resize an existing Ceph RBD volume with name NAME in pool POOL to size SIZE [in human units, e.g. 1024M, 20G, etc.]. - """ - retcode, retmsg = pvc_ceph.ceph_volume_modify(config, pool, name, new_size=size) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage volume rename -############################################################################### -@click.command(name="rename", short_help="Rename RBD volume.") -@click.argument("pool") -@click.argument("name") -@click.argument("new_name") -@cluster_req -def ceph_volume_rename(pool, name, new_name): - """ - Rename an existing Ceph RBD volume with name NAME in pool POOL to name NEW_NAME. - """ - retcode, retmsg = pvc_ceph.ceph_volume_modify(config, pool, name, new_name=new_name) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage volume clone -############################################################################### -@click.command(name="clone", short_help="Clone RBD volume.") -@click.argument("pool") -@click.argument("name") -@click.argument("new_name") -@cluster_req -def ceph_volume_clone(pool, name, new_name): - """ - Clone a Ceph RBD volume with name NAME in pool POOL to name NEW_NAME in pool POOL. - """ - retcode, retmsg = pvc_ceph.ceph_volume_clone(config, pool, name, new_name) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage volume list -############################################################################### -@click.command(name="list", short_help="List cluster RBD volumes.") -@click.argument("limit", default=None, required=False) -@click.option( - "-p", - "--pool", - "pool", - default=None, - show_default=True, - help="Show volumes from this pool only.", -) -@cluster_req -def ceph_volume_list(limit, pool): - """ - List all Ceph RBD volumes; optionally only match elements matching name regex LIMIT. - """ - - retcode, retdata = pvc_ceph.ceph_volume_list(config, limit, pool) - if retcode: - retdata = pvc_ceph.format_list_volume(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc storage volume snapshot -############################################################################### -@click.group( - name="snapshot", - short_help="Manage RBD volume snapshots in the PVC storage cluster.", - context_settings=CONTEXT_SETTINGS, -) -def ceph_volume_snapshot(): - """ - Manage the Ceph RBD volume snapshots of the PVC cluster. - """ - pass - - -############################################################################### -# pvc storage volume snapshot add -############################################################################### -@click.command(name="add", short_help="Add new RBD volume snapshot.") -@click.argument("pool") -@click.argument("volume") -@click.argument("name") -@cluster_req -def ceph_volume_snapshot_add(pool, volume, name): - """ - Add a snapshot with name NAME of Ceph RBD volume VOLUME in pool POOL. - """ - - retcode, retmsg = pvc_ceph.ceph_snapshot_add(config, pool, volume, name) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage volume snapshot rename -############################################################################### -@click.command(name="rename", short_help="Rename RBD volume snapshot.") -@click.argument("pool") -@click.argument("volume") -@click.argument("name") -@click.argument("new_name") -@cluster_req -def ceph_volume_snapshot_rename(pool, volume, name, new_name): - """ - Rename an existing Ceph RBD volume snapshot with name NAME to name NEW_NAME for volume VOLUME in pool POOL. - """ - retcode, retmsg = pvc_ceph.ceph_snapshot_modify( - config, pool, volume, name, new_name=new_name - ) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage volume snapshot remove -############################################################################### -@click.command(name="remove", short_help="Remove RBD volume snapshot.") -@click.argument("pool") -@click.argument("volume") -@click.argument("name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def ceph_volume_snapshot_remove(pool, volume, name, confirm_flag): - """ - Remove a Ceph RBD volume snapshot with name NAME from volume VOLUME in pool POOL. - - DANGER: This will completely remove the snapshot. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove snapshot {} for volume {}/{}".format(name, pool, volume), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retmsg = pvc_ceph.ceph_snapshot_remove(config, pool, volume, name) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc storage volume snapshot list -############################################################################### -@click.command(name="list", short_help="List cluster RBD volume shapshots.") -@click.argument("limit", default=None, required=False) -@click.option( - "-p", - "--pool", - "pool", - default=None, - show_default=True, - help="Show snapshots from this pool only.", -) -@click.option( - "-o", - "--volume", - "volume", - default=None, - show_default=True, - help="Show snapshots from this volume only.", -) -@cluster_req -def ceph_volume_snapshot_list(pool, volume, limit): - """ - List all Ceph RBD volume snapshots; optionally only match elements matching name regex LIMIT. - """ - - retcode, retdata = pvc_ceph.ceph_snapshot_list(config, limit, volume, pool) - if retcode: - retdata = pvc_ceph.format_list_snapshot(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner -############################################################################### -@click.group( - name="provisioner", - short_help="Manage PVC provisioner.", - context_settings=CONTEXT_SETTINGS, -) -def cli_provisioner(): - """ - Manage the PVC provisioner. - """ - pass - - -############################################################################### -# pvc provisioner template -############################################################################### -@click.group( - name="template", - short_help="Manage PVC provisioner templates.", - context_settings=CONTEXT_SETTINGS, -) -def provisioner_template(): - """ - Manage the PVC provisioner template system. - """ - pass - - -############################################################################### -# pvc provisioner template list -############################################################################### -@click.command(name="list", short_help="List all templates.") -@click.argument("limit", default=None, required=False) -@cluster_req -def provisioner_template_list(limit): - """ - List all templates in the PVC cluster provisioner. - """ - retcode, retdata = pvc_provisioner.template_list(config, limit) - if retcode: - retdata = pvc_provisioner.format_list_template(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template system -############################################################################### -@click.group( - name="system", - short_help="Manage PVC provisioner system templates.", - context_settings=CONTEXT_SETTINGS, -) -def provisioner_template_system(): - """ - Manage the PVC provisioner system templates. - """ - pass - - -############################################################################### -# pvc provisioner template system list -############################################################################### -@click.command(name="list", short_help="List all system templates.") -@click.argument("limit", default=None, required=False) -@cluster_req -def provisioner_template_system_list(limit): - """ - List all system templates in the PVC cluster provisioner. - """ - retcode, retdata = pvc_provisioner.template_list( - config, limit, template_type="system" - ) - if retcode: - retdata = pvc_provisioner.format_list_template(retdata, template_type="system") - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template system add -############################################################################### -@click.command(name="add", short_help="Add new system template.") -@click.argument("name") -@click.option( - "-u", "--vcpus", "vcpus", required=True, type=int, help="The number of vCPUs." -) -@click.option( - "-m", "--vram", "vram", required=True, type=int, help="The amount of vRAM (in MB)." -) -@click.option( - "-s/-S", - "--serial/--no-serial", - "serial", - is_flag=True, - default=False, - help="Enable the virtual serial console.", -) -@click.option( - "-n/-N", - "--vnc/--no-vnc", - "vnc", - is_flag=True, - default=False, - help="Enable/disable the VNC console.", -) -@click.option( - "-b", - "--vnc-bind", - "vnc_bind", - default=None, - help="Bind VNC to this IP address instead of localhost.", -) -@click.option( - "--node-limit", - "node_limit", - default=None, - help="Limit VM operation to this CSV list of node(s).", -) -@click.option( - "--node-selector", - "node_selector", - type=click.Choice( - ["mem", "memprov", "vcpus", "vms", "load", "none"], case_sensitive=False - ), - default="none", - help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.', -) -@click.option( - "--node-autostart", - "node_autostart", - is_flag=True, - default=False, - help="Autostart VM with their parent Node on first/next boot.", -) -@click.option( - "--migration-method", - "migration_method", - type=click.Choice(["none", "live", "shutdown"], case_sensitive=False), - default=None, # Use cluster default - help="The preferred migration method of the VM between nodes", -) -@cluster_req -def provisioner_template_system_add( - name, - vcpus, - vram, - serial, - vnc, - vnc_bind, - node_limit, - node_selector, - node_autostart, - migration_method, -): - """ - Add a new system template NAME to the PVC cluster provisioner. - - For details on the possible "--node-selector" values, please see help for the command "pvc vm define". - """ - params = dict() - params["name"] = name - params["vcpus"] = vcpus - params["vram"] = vram - params["serial"] = serial - params["vnc"] = vnc - if vnc: - params["vnc_bind"] = vnc_bind - if node_limit: - params["node_limit"] = node_limit - if node_selector: - params["node_selector"] = node_selector - if node_autostart: - params["node_autostart"] = node_autostart - if migration_method: - params["migration_method"] = migration_method - - retcode, retdata = pvc_provisioner.template_add( - config, params, template_type="system" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template system modify -############################################################################### -@click.command(name="modify", short_help="Modify an existing system template.") -@click.argument("name") -@click.option("-u", "--vcpus", "vcpus", type=int, help="The number of vCPUs.") -@click.option("-m", "--vram", "vram", type=int, help="The amount of vRAM (in MB).") -@click.option( - "-s/-S", - "--serial/--no-serial", - "serial", - is_flag=True, - default=None, - help="Enable the virtual serial console.", -) -@click.option( - "-n/-N", - "--vnc/--no-vnc", - "vnc", - is_flag=True, - default=None, - help="Enable/disable the VNC console.", -) -@click.option( - "-b", - "--vnc-bind", - "vnc_bind", - help="Bind VNC to this IP address instead of localhost.", -) -@click.option( - "--node-limit", "node_limit", help="Limit VM operation to this CSV list of node(s)." -) -@click.option( - "--node-selector", - "node_selector", - type=click.Choice( - ["mem", "memprov", "vcpus", "vms", "load", "none"], case_sensitive=False - ), - help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.', -) -@click.option( - "--node-autostart", - "node_autostart", - is_flag=True, - default=None, - help="Autostart VM with their parent Node on first/next boot.", -) -@click.option( - "--migration-method", - "migration_method", - type=click.Choice(["none", "live", "shutdown"], case_sensitive=False), - default=None, # Use cluster default - help="The preferred migration method of the VM between nodes", -) -@cluster_req -def provisioner_template_system_modify( - name, - vcpus, - vram, - serial, - vnc, - vnc_bind, - node_limit, - node_selector, - node_autostart, - migration_method, -): - """ - Add a new system template NAME to the PVC cluster provisioner. - - For details on the possible "--node-selector" values, please see help for the command "pvc vm define". - """ - params = dict() - params["vcpus"] = vcpus - params["vram"] = vram - params["serial"] = serial - params["vnc"] = vnc - params["vnc_bind"] = vnc_bind - params["node_limit"] = node_limit - params["node_selector"] = node_selector - params["node_autostart"] = node_autostart - params["migration_method"] = migration_method - - retcode, retdata = pvc_provisioner.template_modify( - config, params, name, template_type="system" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template system remove -############################################################################### -@click.command(name="remove", short_help="Remove system template.") -@click.argument("name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def provisioner_template_system_remove(name, confirm_flag): - """ - Remove system template NAME from the PVC cluster provisioner. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove system template {}".format(name), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retdata = pvc_provisioner.template_remove( - config, name, template_type="system" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template network -############################################################################### -@click.group( - name="network", - short_help="Manage PVC provisioner network templates.", - context_settings=CONTEXT_SETTINGS, -) -def provisioner_template_network(): - """ - Manage the PVC provisioner network templates. - """ - pass - - -############################################################################### -# pvc provisioner template network list -############################################################################### -@click.command(name="list", short_help="List all network templates.") -@click.argument("limit", default=None, required=False) -@cluster_req -def provisioner_template_network_list(limit): - """ - List all network templates in the PVC cluster provisioner. - """ - retcode, retdata = pvc_provisioner.template_list( - config, limit, template_type="network" - ) - if retcode: - retdata = pvc_provisioner.format_list_template(retdata, template_type="network") - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template network add -############################################################################### -@click.command(name="add", short_help="Add new network template.") -@click.argument("name") -@click.option( - "-m", - "--mac-template", - "mac_template", - default=None, - help="Use this template for MAC addresses.", -) -@cluster_req -def provisioner_template_network_add(name, mac_template): - """ - Add a new network template to the PVC cluster provisioner. - - MAC address templates are used to provide predictable MAC addresses for provisioned VMs. - The normal format of a MAC template is: - - {prefix}:XX:XX:{vmid}{netid} - - The {prefix} variable is replaced by the provisioner with a standard prefix ("52:54:01"), - which is different from the randomly-generated MAC prefix ("52:54:00") to avoid accidental - overlap of MAC addresses. - - The {vmid} variable is replaced by a single hexidecimal digit representing the VM's ID, - the numerical suffix portion of its name; VMs without a suffix numeral have ID 0. VMs with - IDs greater than 15 (hexidecimal "f") will wrap back to 0. - - The {netid} variable is replaced by the sequential identifier, starting at 0, of the - network VNI of the interface; for example, the first interface is 0, the second is 1, etc. - - The four X digits are use-configurable. Use these digits to uniquely define the MAC - address. - - Example: pvc provisioner template network add --mac-template "{prefix}:2f:1f:{vmid}{netid}" test-template - - The location of the two per-VM variables can be adjusted at the administrator's discretion, - or removed if not required (e.g. a single-network template, or template for a single VM). - In such situations, be careful to avoid accidental overlap with other templates' variable - portions. - """ - params = dict() - params["name"] = name - params["mac_template"] = mac_template - - retcode, retdata = pvc_provisioner.template_add( - config, params, template_type="network" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template network remove -############################################################################### -@click.command(name="remove", short_help="Remove network template.") -@click.argument("name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def provisioner_template_network_remove(name, confirm_flag): - """ - Remove network template MAME from the PVC cluster provisioner. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove network template {}".format(name), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retdata = pvc_provisioner.template_remove( - config, name, template_type="network" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template network vni -############################################################################### -@click.group( - name="vni", - short_help="Manage PVC provisioner network template VNIs.", - context_settings=CONTEXT_SETTINGS, -) -def provisioner_template_network_vni(): - """ - Manage the network VNIs in PVC provisioner network templates. - """ - pass - - -############################################################################### -# pvc provisioner template network vni add -############################################################################### -@click.command(name="add", short_help="Add network VNI to network template.") -@click.argument("name") -@click.argument("vni") -@cluster_req -def provisioner_template_network_vni_add(name, vni): - """ - Add a new network VNI to network template NAME. - - Networks will be added to VMs in the order they are added and displayed within the template. - """ - params = dict() - - retcode, retdata = pvc_provisioner.template_element_add( - config, name, vni, params, element_type="net", template_type="network" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template network vni remove -############################################################################### -@click.command(name="remove", short_help="Remove network VNI from network template.") -@click.argument("name") -@click.argument("vni") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def provisioner_template_network_vni_remove(name, vni, confirm_flag): - """ - Remove network VNI from network template NAME. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove VNI {} from network template {}".format(vni, name), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retdata = pvc_provisioner.template_element_remove( - config, name, vni, element_type="net", template_type="network" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template storage -############################################################################### -@click.group( - name="storage", - short_help="Manage PVC provisioner storage templates.", - context_settings=CONTEXT_SETTINGS, -) -def provisioner_template_storage(): - """ - Manage the PVC provisioner storage templates. - """ - pass - - -############################################################################### -# pvc provisioner template storage list -############################################################################### -@click.command(name="list", short_help="List all storage templates.") -@click.argument("limit", default=None, required=False) -@cluster_req -def provisioner_template_storage_list(limit): - """ - List all storage templates in the PVC cluster provisioner. - """ - retcode, retdata = pvc_provisioner.template_list( - config, limit, template_type="storage" - ) - if retcode: - retdata = pvc_provisioner.format_list_template(retdata, template_type="storage") - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template storage add -############################################################################### -@click.command(name="add", short_help="Add new storage template.") -@click.argument("name") -@cluster_req -def provisioner_template_storage_add(name): - """ - Add a new storage template to the PVC cluster provisioner. - """ - params = dict() - params["name"] = name - - retcode, retdata = pvc_provisioner.template_add( - config, params, template_type="storage" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template storage remove -############################################################################### -@click.command(name="remove", short_help="Remove storage template.") -@click.argument("name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def provisioner_template_storage_remove(name, confirm_flag): - """ - Remove storage template NAME from the PVC cluster provisioner. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove storage template {}".format(name), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retdata = pvc_provisioner.template_remove( - config, name, template_type="storage" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template storage disk -############################################################################### -@click.group( - name="disk", - short_help="Manage PVC provisioner storage template disks.", - context_settings=CONTEXT_SETTINGS, -) -def provisioner_template_storage_disk(): - """ - Manage the disks in PVC provisioner storage templates. - """ - pass - - -############################################################################### -# pvc provisioner template storage disk add -############################################################################### -@click.command(name="add", short_help="Add disk to storage template.") -@click.argument("name") -@click.argument("disk") -@click.option( - "-p", "--pool", "pool", required=True, help="The storage pool for the disk." -) -@click.option( - "-i", - "--source-volume", - "source_volume", - default=None, - help="The source volume to clone", -) -@click.option( - "-s", "--size", "size", type=int, default=None, help="The size of the disk (in GB)." -) -@click.option( - "-f", "--filesystem", "filesystem", default=None, help="The filesystem of the disk." -) -@click.option( - "--fsarg", - "fsargs", - default=None, - multiple=True, - help="Additional argument for filesystem creation, in arg=value format without leading dashes.", -) -@click.option( - "-m", - "--mountpoint", - "mountpoint", - default=None, - help="The target Linux mountpoint of the disk; requires a filesystem.", -) -@cluster_req -def provisioner_template_storage_disk_add( - name, disk, pool, source_volume, size, filesystem, fsargs, mountpoint -): - """ - Add a new DISK to storage template NAME. - - DISK must be a Linux-style sdX/vdX disk identifier, such as "sda" or "vdb". All disks in a template must use the same identifier format. - - Disks will be added to VMs in sdX/vdX order. For disks with mountpoints, ensure this order is sensible. - """ - - if source_volume and (size or filesystem or mountpoint): - echo( - 'The "--source-volume" option is not compatible with the "--size", "--filesystem", or "--mountpoint" options.' - ) - exit(1) - - params = dict() - params["pool"] = pool - params["source_volume"] = source_volume - params["disk_size"] = size - if filesystem: - params["filesystem"] = filesystem - if filesystem and fsargs: - dash_fsargs = list() - for arg in fsargs: - arg_len = len(arg.split("=")[0]) - if arg_len == 1: - dash_fsargs.append("-" + arg) - else: - dash_fsargs.append("--" + arg) - params["filesystem_arg"] = dash_fsargs - if filesystem and mountpoint: - params["mountpoint"] = mountpoint - - retcode, retdata = pvc_provisioner.template_element_add( - config, name, disk, params, element_type="disk", template_type="storage" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner template storage disk remove -############################################################################### -@click.command(name="remove", short_help="Remove disk from storage template.") -@click.argument("name") -@click.argument("disk") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def provisioner_template_storage_disk_remove(name, disk, confirm_flag): - """ - Remove DISK from storage template NAME. - - DISK must be a Linux-style disk identifier such as "sda" or "vdb". - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove disk {} from storage template {}".format(disk, name), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retdata = pvc_provisioner.template_element_remove( - config, name, disk, element_type="disk", template_type="storage" - ) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner userdata -############################################################################### -@click.group( - name="userdata", - short_help="Manage PVC provisioner userdata documents.", - context_settings=CONTEXT_SETTINGS, -) -def provisioner_userdata(): - """ - Manage userdata documents in the PVC provisioner. - """ - pass - - -############################################################################### -# pvc provisioner userdata list -############################################################################### -@click.command(name="list", short_help="List all userdata documents.") -@click.argument("limit", default=None, required=False) -@click.option( - "-f", - "--full", - "full", - is_flag=True, - default=False, - help="Show all lines of the document instead of first 4.", -) -@cluster_req -def provisioner_userdata_list(limit, full): - """ - List all userdata documents in the PVC cluster provisioner. - """ - retcode, retdata = pvc_provisioner.userdata_list(config, limit) - if retcode: - if not full: - lines = 4 - else: - lines = None - retdata = pvc_provisioner.format_list_userdata(retdata, lines) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner userdata show -############################################################################### -@click.command(name="show", short_help="Show contents of userdata documents.") -@click.argument("name") -@cluster_req -def provisioner_userdata_show(name): - """ - Show the full contents of userdata document NAME. - """ - retcode, retdata = pvc_provisioner.userdata_show(config, name) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner userdata add -############################################################################### -@click.command(name="add", short_help="Define userdata document from file.") -@click.argument("name") -@click.argument("filename", type=click.File()) -@cluster_req -def provisioner_userdata_add(name, filename): - """ - Add a new userdata document NAME from file FILENAME. - """ - - # Open the YAML file - userdata = filename.read() - filename.close() - try: - yaml.load(userdata, Loader=yaml.SafeLoader) - except Exception as e: - echo("Error: Userdata document is malformed") - cleanup(False, e) - - params = dict() - params["name"] = name - params["data"] = userdata.strip() - - retcode, retmsg = pvc_provisioner.userdata_add(config, params) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc provisioner userdata modify -############################################################################### -@click.command(name="modify", short_help="Modify existing userdata document.") -@click.option( - "-e", - "--editor", - "editor", - is_flag=True, - help="Use local editor to modify existing document.", -) -@click.argument("name") -@click.argument("filename", type=click.File(), default=None, required=False) -@cluster_req -def provisioner_userdata_modify(name, filename, editor): - """ - Modify existing userdata document NAME, either in-editor or with replacement FILE. - """ - - if editor is False and filename is None: - cleanup(False, 'Either a file or the "--editor" option must be specified.') - - if editor is True: - # Grab the current config - retcode, retdata = pvc_provisioner.userdata_info(config, name) - if not retcode: - echo(retdata) - exit(1) - current_userdata = retdata["userdata"].strip() - - new_userdata = click.edit( - text=current_userdata, require_save=True, extension=".yaml" - ) - if new_userdata is None: - echo("Aborting with no modifications.") - exit(0) - else: - new_userdata = new_userdata.strip() - - # Show a diff and confirm - echo("Pending modifications:") - echo("") - diff = list( - difflib.unified_diff( - current_userdata.split("\n"), - new_userdata.split("\n"), - fromfile="current", - tofile="modified", - fromfiledate="", - tofiledate="", - n=3, - lineterm="", - ) - ) - for line in diff: - if re.match(r"^\+", line) is not None: - echo(colorama.Fore.GREEN + line + colorama.Fore.RESET) - elif re.match(r"^\-", line) is not None: - echo(colorama.Fore.RED + line + colorama.Fore.RESET) - elif re.match(r"^\^", line) is not None: - echo(colorama.Fore.BLUE + line + colorama.Fore.RESET) - else: - echo(line) - echo("") - - click.confirm("Write modifications to cluster?", abort=True) - - userdata = new_userdata - - # We're operating in replace mode - else: - # Open the new file - userdata = filename.read().strip() - filename.close() - - try: - yaml.load(userdata, Loader=yaml.SafeLoader) - except Exception as e: - echo("Error: Userdata document is malformed") - cleanup(False, e) - - params = dict() - params["data"] = userdata - - retcode, retmsg = pvc_provisioner.userdata_modify(config, name, params) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc provisioner userdata remove -############################################################################### -@click.command(name="remove", short_help="Remove userdata document.") -@click.argument("name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def provisioner_userdata_remove(name, confirm_flag): - """ - Remove userdata document NAME from the PVC cluster provisioner. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove userdata document {}".format(name), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retdata = pvc_provisioner.userdata_remove(config, name) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner script -############################################################################### -@click.group( - name="script", - short_help="Manage PVC provisioner scripts.", - context_settings=CONTEXT_SETTINGS, -) -def provisioner_script(): - """ - Manage scripts in the PVC provisioner. - """ - pass - - -############################################################################### -# pvc provisioner script list -############################################################################### -@click.command(name="list", short_help="List all scripts.") -@click.argument("limit", default=None, required=False) -@click.option( - "-f", - "--full", - "full", - is_flag=True, - default=False, - help="Show all lines of the document instead of first 4.", -) -@cluster_req -def provisioner_script_list(limit, full): - """ - List all scripts in the PVC cluster provisioner. - """ - retcode, retdata = pvc_provisioner.script_list(config, limit) - if retcode: - if not full: - lines = 4 - else: - lines = None - retdata = pvc_provisioner.format_list_script(retdata, lines) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner script show -############################################################################### -@click.command(name="show", short_help="Show contents of script documents.") -@click.argument("name") -@cluster_req -def provisioner_script_show(name): - """ - Show the full contents of script document NAME. - """ - retcode, retdata = pvc_provisioner.script_show(config, name) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner script add -############################################################################### -@click.command(name="add", short_help="Define script from file.") -@click.argument("name") -@click.argument("filename", type=click.File()) -@cluster_req -def provisioner_script_add(name, filename): - """ - Add a new script NAME from file FILENAME. - """ - - # Open the XML file - script = filename.read() - filename.close() - - params = dict() - params["name"] = name - params["data"] = script.strip() - - retcode, retmsg = pvc_provisioner.script_add(config, params) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc provisioner script modify -############################################################################### -@click.command(name="modify", short_help="Modify existing script.") -@click.option( - "-e", - "--editor", - "editor", - is_flag=True, - help="Use local editor to modify existing document.", -) -@click.argument("name") -@click.argument("filename", type=click.File(), default=None, required=False) -@cluster_req -def provisioner_script_modify(name, filename, editor): - """ - Modify existing script NAME, either in-editor or with replacement FILE. - """ - - if editor is False and filename is None: - cleanup(False, 'Either a file or the "--editor" option must be specified.') - - if editor is True: - # Grab the current config - retcode, retdata = pvc_provisioner.script_info(config, name) - if not retcode: - echo(retdata) - exit(1) - current_script = retdata["script"].strip() - - new_script = click.edit(text=current_script, require_save=True, extension=".py") - if new_script is None: - echo("Aborting with no modifications.") - exit(0) - else: - new_script = new_script.strip() - - # Show a diff and confirm - echo("Pending modifications:") - echo("") - diff = list( - difflib.unified_diff( - current_script.split("\n"), - new_script.split("\n"), - fromfile="current", - tofile="modified", - fromfiledate="", - tofiledate="", - n=3, - lineterm="", - ) - ) - for line in diff: - if re.match(r"^\+", line) is not None: - echo(colorama.Fore.GREEN + line + colorama.Fore.RESET) - elif re.match(r"^\-", line) is not None: - echo(colorama.Fore.RED + line + colorama.Fore.RESET) - elif re.match(r"^\^", line) is not None: - echo(colorama.Fore.BLUE + line + colorama.Fore.RESET) - else: - echo(line) - echo("") - - click.confirm("Write modifications to cluster?", abort=True) - - script = new_script - - # We're operating in replace mode - else: - # Open the new file - script = filename.read().strip() - filename.close() - - params = dict() - params["data"] = script - - retcode, retmsg = pvc_provisioner.script_modify(config, name, params) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc provisioner script remove -############################################################################### -@click.command(name="remove", short_help="Remove script.") -@click.argument("name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def provisioner_script_remove(name, confirm_flag): - """ - Remove script NAME from the PVC cluster provisioner. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove provisioning script {}".format(name), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - retcode, retdata = pvc_provisioner.script_remove(config, name) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner ova -############################################################################### -@click.group( - name="ova", - short_help="Manage PVC provisioner OVA images.", - context_settings=CONTEXT_SETTINGS, -) -def provisioner_ova(): - """ - Manage ovas in the PVC provisioner. - """ - pass - - -############################################################################### -# pvc provisioner ova list -############################################################################### -@click.command(name="list", short_help="List all OVA images.") -@click.argument("limit", default=None, required=False) -@cluster_req -def provisioner_ova_list(limit): - """ - List all OVA images in the PVC cluster provisioner. - """ - retcode, retdata = pvc_provisioner.ova_list(config, limit) - if retcode: - retdata = pvc_provisioner.format_list_ova(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner ova upload -############################################################################### -@click.command(name="upload", short_help="Upload OVA file.") -@click.argument("name") -@click.argument("filename") -@click.option( - "-p", "--pool", "pool", required=True, help="The storage pool for the OVA images." -) -@cluster_req -def provisioner_ova_upload(name, filename, pool): - """ - Upload a new OVA image NAME from FILENAME. - - Only single-file (.ova) OVA/OVF images are supported. For multi-file (.ovf + .vmdk) OVF images, concatenate them with "tar" then upload the resulting file. - - Once uploaded, a provisioner system template and OVA-type profile, each named NAME, will be created to store the configuration of the OVA. - - Note that the provisioner profile for the OVA will not contain any network template definitions, and will ignore network definitions from the OVA itself. The administrator must modify the profile's network template as appropriate to set the desired network configuration. - - Storage templates, provisioning scripts, and arguments for OVA-type profiles will be ignored and should not be set. - """ - if not os.path.exists(filename): - echo("ERROR: File '{}' does not exist!".format(filename)) - exit(1) - - params = dict() - params["pool"] = pool - params["ova_size"] = os.path.getsize(filename) - - retcode, retdata = pvc_provisioner.ova_upload(config, name, filename, params) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner ova remove -############################################################################### -@click.command(name="remove", short_help="Remove OVA image.") -@click.argument("name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def provisioner_ova_remove(name, confirm_flag): - """ - Remove OVA image NAME from the PVC cluster provisioner. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove OVA image {}".format(name), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retdata = pvc_provisioner.ova_remove(config, name) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner profile -############################################################################### -@click.group( - name="profile", - short_help="Manage PVC provisioner profiless.", - context_settings=CONTEXT_SETTINGS, -) -def provisioner_profile(): - """ - Manage profiles in the PVC provisioner. - """ - pass - - -############################################################################### -# pvc provisioner profile list -############################################################################### -@click.command(name="list", short_help="List all profiles.") -@click.argument("limit", default=None, required=False) -@cluster_req -def provisioner_profile_list(limit): - """ - List all profiles in the PVC cluster provisioner. - """ - retcode, retdata = pvc_provisioner.profile_list(config, limit) - if retcode: - retdata = pvc_provisioner.format_list_profile(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner profile add -############################################################################### -@click.command(name="add", short_help="Add provisioner profile.") -@click.argument("name") -@click.option( - "-p", - "--profile-type", - "profile_type", - default="provisioner", - show_default=True, - type=click.Choice(["provisioner", "ova"], case_sensitive=False), - help="The type of profile.", -) -@click.option( - "-s", - "--system-template", - "system_template", - required=True, - help="The system template for the profile (required).", -) -@click.option( - "-n", - "--network-template", - "network_template", - help="The network template for the profile.", -) -@click.option( - "-t", - "--storage-template", - "storage_template", - help="The storage template for the profile.", -) -@click.option( - "-u", - "--userdata", - "userdata", - help="The userdata document for the profile.", -) -@click.option( - "-x", - "--script", - "script", - required=True, - help="The script for the profile (required).", -) -@click.option( - "-o", - "--ova", - "ova", - help="The OVA image for the profile; set automatically with 'provisioner ova upload'.", -) -@click.option( - "-a", - "--script-arg", - "script_args", - default=[], - multiple=True, - help="Additional argument to the script install() function in key=value format.", -) -@cluster_req -def provisioner_profile_add( - name, - profile_type, - system_template, - network_template, - storage_template, - userdata, - script, - ova, - script_args, -): - """ - Add a new provisioner profile NAME. - """ - params = dict() - params["name"] = name - params["profile_type"] = profile_type - params["system_template"] = system_template - params["network_template"] = network_template - params["storage_template"] = storage_template - params["userdata"] = userdata - params["script"] = script - params["ova"] = ova - params["arg"] = script_args - - retcode, retdata = pvc_provisioner.profile_add(config, params) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner profile modify -############################################################################### -@click.command(name="modify", short_help="Modify provisioner profile.") -@click.argument("name") -@click.option( - "-s", - "--system-template", - "system_template", - default=None, - help="The system template for the profile.", -) -@click.option( - "-n", - "--network-template", - "network_template", - default=None, - help="The network template for the profile.", -) -@click.option( - "-t", - "--storage-template", - "storage_template", - default=None, - help="The storage template for the profile.", -) -@click.option( - "-u", - "--userdata", - "userdata", - default=None, - help="The userdata document for the profile.", -) -@click.option( - "-x", "--script", "script", default=None, help="The script for the profile." -) -@click.option( - "-d", - "--delete-script-args", - "delete_script_args", - default=False, - is_flag=True, - help="Delete any existing script arguments.", -) -@click.option( - "-a", - "--script-arg", - "script_args", - default=None, - multiple=True, - help="Additional argument to the script install() function in key=value format.", -) -@cluster_req -def provisioner_profile_modify( - name, - system_template, - network_template, - storage_template, - userdata, - script, - delete_script_args, - script_args, -): - """ - Modify existing provisioner profile NAME. - """ - params = dict() - if system_template is not None: - params["system_template"] = system_template - if network_template is not None: - params["network_template"] = network_template - if storage_template is not None: - params["storage_template"] = storage_template - if userdata is not None: - params["userdata"] = userdata - if script is not None: - params["script"] = script - if delete_script_args: - params["arg"] = [] - if script_args is not None: - params["arg"] = script_args - - retcode, retdata = pvc_provisioner.profile_modify(config, name, params) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner profile remove -############################################################################### -@click.command(name="remove", short_help="Remove profile.") -@click.argument("name") -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the removal", -) -@cluster_req -def provisioner_profile_remove(name, confirm_flag): - """ - Remove profile NAME from the PVC cluster provisioner. - """ - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove profile {}".format(name), prompt_suffix="? ", abort=True - ) - except Exception: - exit(0) - - retcode, retdata = pvc_provisioner.profile_remove(config, name) - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner create -############################################################################### -@click.command(name="create", short_help="Create new VM.") -@click.argument("name") -@click.argument("profile") -@click.option( - "-a", - "--script-arg", - "script_args", - default=[], - multiple=True, - help="Additional argument to the script install() function in key=value format.", -) -@click.option( - "-d/-D", - "--define/--no-define", - "define_flag", - is_flag=True, - default=True, - show_default=True, - help="Define the VM automatically during provisioning.", -) -@click.option( - "-s/-S", - "--start/--no-start", - "start_flag", - is_flag=True, - default=True, - show_default=True, - help="Start the VM automatically upon completion of provisioning.", -) -@click.option( - "-w", - "--wait", - "wait_flag", - is_flag=True, - default=False, - help="Wait for provisioning to complete, showing progress", -) -@cluster_req -def provisioner_create(name, profile, wait_flag, define_flag, start_flag, script_args): - """ - Create a new VM NAME with profile PROFILE. - - The "--no-start" flag can be used to prevent automatic startup of the VM once provisioning - is completed. This can be useful for the administrator to preform additional actions to - the VM after provisioning is completed. Note that the VM will remain in "provision" state - until its state is explicitly changed (e.g. with "pvc vm start"). - - The "--no-define" flag implies "--no-start", and can be used to prevent definition of the - created VM on the PVC cluster. This can be useful for the administrator to create a "template" - set of VM disks via the normal provisioner, but without ever starting the resulting VM. The - resulting disk(s) can then be used as source volumes in other disk templates. - - The "--script-arg" option can be specified as many times as required to pass additional, - VM-specific arguments to the provisioner install() function, beyond those set by the profile. - """ - if not define_flag: - start_flag = False - - retcode, retdata = pvc_provisioner.vm_create( - config, name, profile, wait_flag, define_flag, start_flag, script_args - ) - - if retcode and wait_flag: - task_id = retdata - - echo("Task ID: {}".format(task_id)) - echo("") - - # Wait for the task to start - echo("Waiting for task to start...", nl=False) - while True: - time.sleep(1) - task_status = pvc_provisioner.task_status(config, task_id, is_watching=True) - if task_status.get("state") != "PENDING": - break - echo(".", nl=False) - echo(" done.") - echo("") - - # Start following the task state, updating progress as we go - total_task = task_status.get("total") - with click.progressbar(length=total_task, show_eta=False) as bar: - last_task = 0 - maxlen = 0 - while True: - time.sleep(1) - if task_status.get("state") != "RUNNING": - break - if task_status.get("current") > last_task: - current_task = int(task_status.get("current")) - bar.update(current_task - last_task) - last_task = current_task - # The extensive spaces at the end cause this to overwrite longer previous messages - curlen = len(str(task_status.get("status"))) - if curlen > maxlen: - maxlen = curlen - lendiff = maxlen - curlen - overwrite_whitespace = " " * lendiff - echo( - " " + task_status.get("status") + overwrite_whitespace, - nl=False, - ) - task_status = pvc_provisioner.task_status( - config, task_id, is_watching=True - ) - if task_status.get("state") == "SUCCESS": - bar.update(total_task - last_task) - - echo("") - retdata = task_status.get("state") + ": " + task_status.get("status") - - cleanup(retcode, retdata) - - -############################################################################### -# pvc provisioner status -############################################################################### -@click.command(name="status", short_help="Show status of provisioner job.") -@click.argument("job", required=False, default=None) -@cluster_req -def provisioner_status(job): - """ - Show status of provisioner job JOB or a list of jobs. - """ - retcode, retdata = pvc_provisioner.task_status(config, job) - if job is None and retcode: - retdata = pvc_provisioner.format_list_task(retdata) - cleanup(retcode, retdata) - - -############################################################################### -# pvc maintenance -############################################################################### -@click.group( - name="maintenance", - short_help="Manage PVC cluster maintenance state.", - context_settings=CONTEXT_SETTINGS, -) -def cli_maintenance(): - """ - Manage the maintenance mode of the PVC cluster. - """ - pass - - -############################################################################### -# pvc maintenance on -############################################################################### -@click.command(name="on", short_help="Enable cluster maintenance mode.") -@cluster_req -def maintenance_on(): - """ - Enable maintenance mode on the PVC cluster. - """ - retcode, retdata = pvc_cluster.maintenance_mode(config, "true") - cleanup(retcode, retdata) - - -############################################################################### -# pvc maintenance off -############################################################################### -@click.command(name="off", short_help="Disable cluster maintenance mode.") -@cluster_req -def maintenance_off(): - """ - Disable maintenance mode on the PVC cluster. - """ - retcode, retdata = pvc_cluster.maintenance_mode(config, "false") - cleanup(retcode, retdata) - - -############################################################################### -# pvc status -############################################################################### -@click.command(name="status", short_help="Show current cluster status.") -@click.option( - "-f", - "--format", - "oformat", - default="plain", - show_default=True, - type=click.Choice(["plain", "short", "json", "json-pretty"]), - help="Output format of cluster status information.", -) -@cluster_req -def status_cluster(oformat): - """ - Show basic information and health for the active PVC cluster. - - Output formats: - - plain: Full text, full colour output for human-readability. - - short: Health-only, full colour output for human-readability. - - json: Compact JSON representation for machine parsing. - - json-pretty: Pretty-printed JSON representation for machine parsing or human-readability. - """ - - retcode, retdata = pvc_cluster.get_info(config) - if retcode: - retdata = pvc_cluster.format_info(retdata, oformat) - cleanup(retcode, retdata) - - -############################################################################### -# pvc task -############################################################################### -@click.group( - name="task", - short_help="Perform PVC cluster tasks.", - context_settings=CONTEXT_SETTINGS, -) -def cli_task(): - """ - Perform administrative tasks against the PVC cluster. - """ - pass - - -############################################################################### -# pvc task backup -############################################################################### -@click.command(name="backup", short_help="Create JSON backup of cluster.") -@click.option( - "-f", - "--file", - "filename", - default=None, - type=click.File(mode="w"), - help="Write backup data to this file.", -) -@cluster_req -def task_backup(filename): - """ - Create a JSON-format backup of the cluster Zookeeper database. - """ - - retcode, retdata = pvc_cluster.backup(config) - if retcode: - if filename is not None: - json.dump(json.loads(retdata), filename) - cleanup(retcode, 'Backup written to "{}".'.format(filename.name)) - else: - cleanup(retcode, retdata) - else: - cleanup(retcode, retdata) - - -############################################################################### -# pvc task restore -############################################################################### -@click.command(name="restore", short_help="Restore JSON backup to cluster.") -@click.option( - "-f", - "--file", - "filename", - required=True, - default=None, - type=click.File(), - help="Read backup data from this file.", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the restore", -) -@cluster_req -def task_restore(filename, confirm_flag): - """ - Restore the JSON backup data from a file to the cluster. - """ - - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - 'Replace all existing cluster data from coordinators with backup file "{}"'.format( - filename.name - ), - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - cluster_data = json.loads(filename.read()) - retcode, retmsg = pvc_cluster.restore(config, cluster_data) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc task init -############################################################################### -@click.command(name="init", short_help="Initialize a new cluster.") -@click.option( - "-o", - "--overwrite", - "overwrite_flag", - is_flag=True, - default=False, - help="Remove and overwrite any existing data", -) -@click.option( - "-y", - "--yes", - "confirm_flag", - is_flag=True, - default=False, - help="Confirm the initialization", -) -@cluster_req -def task_init(confirm_flag, overwrite_flag): - """ - Perform initialization of a new PVC cluster. - - If the '-o'/'--overwrite' option is specified, all existing data in the cluster will be deleted - before new, empty data is written. - - It is not advisable to do this against a running cluster - all node daemons should be stopped - first and the API daemon started manually before running this command. - """ - - if not confirm_flag and not config["unsafe"]: - try: - click.confirm( - "Remove all existing cluster data from coordinators and initialize a new cluster", - prompt_suffix="? ", - abort=True, - ) - except Exception: - exit(0) - - # Easter-egg - echo("Some music while we're Layin' Pipe? https://youtu.be/sw8S_Kv89IU") - - retcode, retmsg = pvc_cluster.initialize(config, overwrite_flag) - cleanup(retcode, retmsg) - - -############################################################################### -# pvc -############################################################################### -@click.group(context_settings=CONTEXT_SETTINGS) -@click.option( - "-c", - "--cluster", - "_cluster", - envvar="PVC_CLUSTER", - default=None, - help="Cluster to connect to.", -) -@click.option( - "-v", - "--debug", - "_debug", - envvar="PVC_DEBUG", - is_flag=True, - default=False, - help="Additional debug details.", -) -@click.option( - "-q", - "--quiet", - "_quiet", - envvar="PVC_QUIET", - is_flag=True, - default=False, - help="Suppress cluster connection information.", -) -@click.option( - "-u", - "--unsafe", - "_unsafe", - envvar="PVC_UNSAFE", - is_flag=True, - default=False, - help='Allow unsafe operations without confirmation/"--yes" argument.', -) -@click.option( - "--colour", - "--color", - "_colour", - envvar="PVC_COLOUR", - is_flag=True, - default=False, - help="Force colourized output.", -) -@click.option( - "--version", is_flag=True, callback=print_version, expose_value=False, is_eager=True -) -def cli(_cluster, _debug, _quiet, _unsafe, _colour): - """ - Parallel Virtual Cluster CLI management tool - - Environment variables: - - "PVC_CLUSTER": Set the cluster to access instead of using --cluster/-c - - "PVC_DEBUG": Enable additional debugging details instead of using --debug/-v - - "PVC_QUIET": Suppress stderr connection output from client instead of using --quiet/-q - - "PVC_UNSAFE": Always suppress confirmations instead of needing --unsafe/-u or --yes/-y; USE WITH EXTREME CARE - - "PVC_COLOUR": Force colour on the output even if Click determines it is not a console (e.g. with 'watch') - - If no PVC_CLUSTER/--cluster is specified, attempts first to load the "local" cluster, checking - for an API configuration in "/etc/pvc/pvcapid.yaml". If this is also not found, abort. - """ - - global config - store_data = get_store(store_path) - config = get_config(store_data, _cluster) - - # There is only one cluster and no local cluster, so even if nothing was passed, use it - if len(store_data) == 1 and _cluster is None and config.get("badcfg", None): - config = get_config(store_data, list(store_data.keys())[0]) - - if not config.get("badcfg", None): - config["debug"] = _debug - config["unsafe"] = _unsafe - config["colour"] = _colour - config["quiet"] = _quiet - - audit() - - -# -# Click command tree -# -cli_cluster.add_command(cluster_add) -cli_cluster.add_command(cluster_remove) -cli_cluster.add_command(cluster_list) -cli_cluster.add_command(cluster_detail) - -cli_node.add_command(node_secondary) -cli_node.add_command(node_primary) -cli_node.add_command(node_flush) -cli_node.add_command(node_ready) -cli_node.add_command(node_unflush) -cli_node.add_command(node_log) -cli_node.add_command(node_info) -cli_node.add_command(node_list) - -vm_tags.add_command(vm_tags_get) -vm_tags.add_command(vm_tags_add) -vm_tags.add_command(vm_tags_remove) - -vm_vcpu.add_command(vm_vcpu_get) -vm_vcpu.add_command(vm_vcpu_set) - -vm_memory.add_command(vm_memory_get) -vm_memory.add_command(vm_memory_set) - -vm_network.add_command(vm_network_get) -vm_network.add_command(vm_network_add) -vm_network.add_command(vm_network_remove) - -vm_volume.add_command(vm_volume_get) -vm_volume.add_command(vm_volume_add) -vm_volume.add_command(vm_volume_remove) - -cli_vm.add_command(vm_define) -cli_vm.add_command(vm_meta) -cli_vm.add_command(vm_modify) -cli_vm.add_command(vm_rename) -cli_vm.add_command(vm_undefine) -cli_vm.add_command(vm_remove) -cli_vm.add_command(vm_dump) -cli_vm.add_command(vm_start) -cli_vm.add_command(vm_restart) -cli_vm.add_command(vm_shutdown) -cli_vm.add_command(vm_stop) -cli_vm.add_command(vm_disable) -cli_vm.add_command(vm_move) -cli_vm.add_command(vm_migrate) -cli_vm.add_command(vm_unmigrate) -cli_vm.add_command(vm_flush_locks) -cli_vm.add_command(vm_tags) -cli_vm.add_command(vm_vcpu) -cli_vm.add_command(vm_memory) -cli_vm.add_command(vm_network) -cli_vm.add_command(vm_volume) -cli_vm.add_command(vm_info) -cli_vm.add_command(vm_log) -cli_vm.add_command(vm_list) - -cli_network.add_command(net_add) -cli_network.add_command(net_modify) -cli_network.add_command(net_remove) -cli_network.add_command(net_info) -cli_network.add_command(net_list) -cli_network.add_command(net_dhcp) -cli_network.add_command(net_acl) -cli_network.add_command(net_sriov) - -net_dhcp.add_command(net_dhcp_list) -net_dhcp.add_command(net_dhcp_add) -net_dhcp.add_command(net_dhcp_remove) - -net_acl.add_command(net_acl_add) -net_acl.add_command(net_acl_remove) -net_acl.add_command(net_acl_list) - -net_sriov.add_command(net_sriov_pf) -net_sriov.add_command(net_sriov_vf) - -net_sriov_pf.add_command(net_sriov_pf_list) - -net_sriov_vf.add_command(net_sriov_vf_list) -net_sriov_vf.add_command(net_sriov_vf_info) -net_sriov_vf.add_command(net_sriov_vf_set) - -ceph_benchmark.add_command(ceph_benchmark_run) -ceph_benchmark.add_command(ceph_benchmark_info) -ceph_benchmark.add_command(ceph_benchmark_list) - -ceph_osd.add_command(ceph_osd_create_db_vg) -ceph_osd.add_command(ceph_osd_add) -ceph_osd.add_command(ceph_osd_replace) -ceph_osd.add_command(ceph_osd_refresh) -ceph_osd.add_command(ceph_osd_remove) -ceph_osd.add_command(ceph_osd_in) -ceph_osd.add_command(ceph_osd_out) -ceph_osd.add_command(ceph_osd_set) -ceph_osd.add_command(ceph_osd_unset) -ceph_osd.add_command(ceph_osd_list) - -ceph_pool.add_command(ceph_pool_add) -ceph_pool.add_command(ceph_pool_remove) -ceph_pool.add_command(ceph_pool_set_pgs) -ceph_pool.add_command(ceph_pool_list) - -ceph_volume.add_command(ceph_volume_add) -ceph_volume.add_command(ceph_volume_upload) -ceph_volume.add_command(ceph_volume_resize) -ceph_volume.add_command(ceph_volume_rename) -ceph_volume.add_command(ceph_volume_clone) -ceph_volume.add_command(ceph_volume_remove) -ceph_volume.add_command(ceph_volume_list) -ceph_volume.add_command(ceph_volume_snapshot) - -ceph_volume_snapshot.add_command(ceph_volume_snapshot_add) -ceph_volume_snapshot.add_command(ceph_volume_snapshot_rename) -ceph_volume_snapshot.add_command(ceph_volume_snapshot_remove) -ceph_volume_snapshot.add_command(ceph_volume_snapshot_list) - -cli_storage.add_command(ceph_status) -cli_storage.add_command(ceph_util) -cli_storage.add_command(ceph_benchmark) -cli_storage.add_command(ceph_osd) -cli_storage.add_command(ceph_pool) -cli_storage.add_command(ceph_volume) - -provisioner_template_system.add_command(provisioner_template_system_list) -provisioner_template_system.add_command(provisioner_template_system_add) -provisioner_template_system.add_command(provisioner_template_system_modify) -provisioner_template_system.add_command(provisioner_template_system_remove) - -provisioner_template_network.add_command(provisioner_template_network_list) -provisioner_template_network.add_command(provisioner_template_network_add) -provisioner_template_network.add_command(provisioner_template_network_remove) -provisioner_template_network.add_command(provisioner_template_network_vni) - -provisioner_template_network_vni.add_command(provisioner_template_network_vni_add) -provisioner_template_network_vni.add_command(provisioner_template_network_vni_remove) - -provisioner_template_storage.add_command(provisioner_template_storage_list) -provisioner_template_storage.add_command(provisioner_template_storage_add) -provisioner_template_storage.add_command(provisioner_template_storage_remove) -provisioner_template_storage.add_command(provisioner_template_storage_disk) - -provisioner_template_storage_disk.add_command(provisioner_template_storage_disk_add) -provisioner_template_storage_disk.add_command(provisioner_template_storage_disk_remove) - -provisioner_template.add_command(provisioner_template_system) -provisioner_template.add_command(provisioner_template_network) -provisioner_template.add_command(provisioner_template_storage) -provisioner_template.add_command(provisioner_template_list) - -provisioner_userdata.add_command(provisioner_userdata_list) -provisioner_userdata.add_command(provisioner_userdata_show) -provisioner_userdata.add_command(provisioner_userdata_add) -provisioner_userdata.add_command(provisioner_userdata_modify) -provisioner_userdata.add_command(provisioner_userdata_remove) - -provisioner_script.add_command(provisioner_script_list) -provisioner_script.add_command(provisioner_script_show) -provisioner_script.add_command(provisioner_script_add) -provisioner_script.add_command(provisioner_script_modify) -provisioner_script.add_command(provisioner_script_remove) - -provisioner_ova.add_command(provisioner_ova_list) -provisioner_ova.add_command(provisioner_ova_upload) -provisioner_ova.add_command(provisioner_ova_remove) - -provisioner_profile.add_command(provisioner_profile_list) -provisioner_profile.add_command(provisioner_profile_add) -provisioner_profile.add_command(provisioner_profile_modify) -provisioner_profile.add_command(provisioner_profile_remove) - -cli_provisioner.add_command(provisioner_template) -cli_provisioner.add_command(provisioner_userdata) -cli_provisioner.add_command(provisioner_script) -cli_provisioner.add_command(provisioner_ova) -cli_provisioner.add_command(provisioner_profile) -cli_provisioner.add_command(provisioner_create) -cli_provisioner.add_command(provisioner_status) - -cli_maintenance.add_command(maintenance_on) -cli_maintenance.add_command(maintenance_off) - -cli_task.add_command(task_backup) -cli_task.add_command(task_restore) -cli_task.add_command(task_init) - -cli.add_command(cli_cluster) -cli.add_command(cli_node) -cli.add_command(cli_vm) -cli.add_command(cli_network) -cli.add_command(cli_storage) -cli.add_command(cli_provisioner) -cli.add_command(cli_maintenance) -cli.add_command(cli_task) -cli.add_command(status_cluster) - - -# -# Main entry point -# -def main(): - return cli(obj={}) - - -if __name__ == "__main__": - main() diff --git a/client-cli-old/scripts/README b/client-cli-old/scripts/README deleted file mode 100644 index 34590a49..00000000 --- a/client-cli-old/scripts/README +++ /dev/null @@ -1,32 +0,0 @@ -# PVC helper scripts - -These helper scripts are included with the PVC client to aid administrators in some meta-functions. - -The following scripts are provided for use: - -## `migrate_vm` - -Migrates a VM, with downtime, from one PVC cluster to another. - -`migrate_vm ` - -### Arguments - - * `vm`: The virtual machine to migrate - * `source_cluster`: The source PVC cluster; must be a valid cluster to the local PVC client - * `destination_cluster`: The destination PVC cluster; must be a valid cluster to the local PVC client - -## `import_vm` - -Imports a VM from another platform into a PVC cluster. - -## `export_vm` - -Exports a (stopped) VM from a PVC cluster to another platform. - -`export_vm ` - -### Arguments - - * `vm`: The virtual machine to migrate - * `source_cluster`: The source PVC cluster; must be a valid cluster to the local PVC client diff --git a/client-cli-old/scripts/export_vm b/client-cli-old/scripts/export_vm deleted file mode 100755 index e5c4e451..00000000 --- a/client-cli-old/scripts/export_vm +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env bash - -# export_vm - Exports a VM from a PVC cluster to local files -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -set -o errexit -set -o pipefail - -usage() { - echo -e "Export a VM from a PVC cluster to local files." - echo -e "Usage:" - echo -e " $0 []" - echo -e "" - echo -e "Important information:" - echo -e " * The local user must have valid SSH access to the primary coordinator in the source_cluster." - echo -e " * The user on the cluster primary coordinator must have 'sudo' access." - echo -e " * If the VM is not in 'stop' state, it will be shut down." - echo -e " * Do not switch the cluster primary coordinator while the script is running." - echo -e " * Ensure you have enough space in to store all VM disk images." -} - -fail() { - echo -e "$@" - exit 1 -} - -# Arguments -if [[ -z ${1} || -z ${2} ]]; then - usage - exit 1 -fi -source_vm="${1}" -source_cluster="${2}" -if [[ -n "${3}" ]]; then - destination_directory="${3}" -else - destination_directory="." -fi - -# Verify the cluster is reachable -pvc -c ${source_cluster} status &>/dev/null || fail "Specified source_cluster is not accessible" - -# Determine the connection IP -cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${source_cluster}" | awk '{ print $2 }' )" - -# Attempt to connect to the cluster address -ssh ${cluster_address} which pvc &>/dev/null || fail "Could not SSH to source_cluster primary coordinator host" - -# Verify that the VM exists -pvc -c ${source_cluster} vm info ${source_vm} &>/dev/null || fail "Specified VM is not present on the cluster" - -echo "Verification complete." - -# Shut down the VM -echo -n "Shutting down VM..." -set +o errexit -pvc -c ${source_cluster} vm shutdown ${source_vm} &>/dev/null -shutdown_success=$? -while ! pvc -c ${source_cluster} vm info ${source_vm} 2>/dev/null | grep '^State' | grep -q -E 'stop|disable'; do - sleep 1 - echo -n "." -done -set -o errexit -echo " done." - -# Dump the XML file -echo -n "Exporting VM configuration file... " -pvc -c ${source_cluster} vm dump ${source_vm} 1> ${destination_directory}/${source_vm}.xml 2>/dev/null -echo "done". - -# Determine the list of volumes in this VM -volume_list="$( pvc -c ${source_cluster} vm info --long ${source_vm} 2>/dev/null | grep -w 'rbd' | awk '{ print $3 }' )" -for volume in ${volume_list}; do - volume_pool="$( awk -F '/' '{ print $1 }' <<<"${volume}" )" - volume_name="$( awk -F '/' '{ print $2 }' <<<"${volume}" )" - volume_size="$( pvc -c ${source_cluster} storage volume list -p ${volume_pool} ${volume_name} 2>/dev/null | grep "^${volume_name}" | awk '{ print $3 }' )" - echo -n "Exporting disk ${volume_name} (${volume_size})... " - ssh ${cluster_address} sudo rbd map ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to map volume ${volume}" - ssh ${cluster_address} sudo dd if="/dev/rbd/${volume_pool}/${volume_name}" bs=1M 2>/dev/null | dd bs=1M of="${destination_directory}/${volume_name}.img" 2>/dev/null - ssh ${cluster_address} sudo rbd unmap ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to unmap volume ${volume}" - echo "done." -done diff --git a/client-cli-old/scripts/force_single_node b/client-cli-old/scripts/force_single_node deleted file mode 100755 index 804a3c1c..00000000 --- a/client-cli-old/scripts/force_single_node +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env bash - -# force_single_node - Manually promote a single coordinator node from a degraded cluster -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -set -o errexit -set -o pipefail - -usage() { - echo -e "Manually promote a single coordinator node from a degraded cluster" - echo -e "" - echo -e "DANGER: This action will cause a permanent split-brain within the cluster" - echo -e " which will have to be corrected manually upon cluster restoration." - echo -e "" - echo -e "This script is primarily designed for small clusters in situations where 2" - echo -e "of the 3 coordinators have become unreachable or shut down. It will promote" - echo -e "the remaining lone_node to act as a standalone coordinator, allowing basic" - echo -e "cluster functionality to continue in a heavily degraded state until the" - echo -e "situation can be rectified. This should only be done in exceptional cases" - echo -e "as a disaster recovery mechanism when the remaining nodes will remain down" - echo -e "for a significant amount of time but some VMs are required to run. In general," - echo -e "use of this script is not advisable." - echo -e "" - echo -e "Usage:" - echo -e " $0 " - echo -e "" - echo -e "Important information:" - echo -e " * The lone_node must be a fully-qualified name that is directly reachable from" - echo -e " the local system via SSH." - echo -e " * The local user must have valid SSH access to the lone_node in the cluster." - echo -e " * The user on the cluster node must have 'sudo' access." -} - -fail() { - echo -e "$@" - exit 1 -} - -# Arguments -if [[ -z ${1} || -z ${2} ]]; then - usage - exit 1 -fi -target_cluster="${1}" -lone_node="${2}" -lone_node_shortname="${lone_node%%.*}" - -# Attempt to connect to the node -ssh ${lone_node} which pvc &>/dev/null || fail "Could not SSH to the lone_node host" - -echo "Verification complete." - -echo -n "Allowing Ceph single-node operation... " -temp_monmap="$( ssh ${lone_node} mktemp )" -ssh ${lone_node} "sudo systemctl stop ceph-mon@${lone_node_shortname}" &>/dev/null -ssh ${lone_node} "ceph-mon -i ${lone_node_shortname} --extract-monmap ${temp_monmap}" &>/dev/null -ssh ${lone_node} "sudo cp ${tmp_monmap} /etc/ceph/monmap.orig" &>/dev/null -mon_list="$( ssh ${lone_node} strings ${temp_monmap} | sort | uniq )" -for mon in ${mon_list}; do - if [[ ${mon} == ${lone_node_shortname} ]]; then - continue - fi - ssh ${lone_node} "sudo monmaptool ${temp_monmap} --rm ${mon}" &>/dev/null -done -ssh ${lone_node} "sudo ceph-mon -i ${lone_node_shortname} --inject-monmap ${temp_monmap}" &>/dev/null -ssh ${lone_node} "sudo systemctl start ceph-mon@${lone_node_shortname}" &>/dev/null -sleep 5 -ssh ${lone_node} "sudo ceph osd set noout" &>/dev/null -echo "done." -echo -e "Restoration steps:" -echo -e " sudo systemctl stop ceph-mon@${lone_node_shortname}" -echo -e " sudo ceph-mon -i ${lone_node_shortname} --inject-monmap /etc/ceph/monmap.orig" -echo -e " sudo systemctl start ceph-mon@${lone_node_shortname}" -echo -e " sudo ceph osd unset noout" - -echo -n "Allowing Zookeeper single-node operation... " -temp_zoocfg="$( ssh ${lone_node} mktemp )" -ssh ${lone_node} "sudo systemctl stop zookeeper" -ssh ${lone_node} "sudo awk -v lone_node=${lone_node_shortname} '{ -FS="=|:" -if ( $1 ~ /^server/ ){ - if ($2 == lone_node) { - print $0 - } else { - print "#" $0 - } -} else { - print $0 -} -}' /etc/zookeeper/conf/zoo.cfg > ${temp_zoocfg}" -ssh ${lone_node} "sudo mv /etc/zookeeper/conf/zoo.cfg /etc/zookeeper/conf/zoo.cfg.orig" -ssh ${lone_node} "sudo mv ${temp_zoocfg} /etc/zookeeper/conf/zoo.cfg" -ssh ${lone_node} "sudo systemctl start zookeeper" -echo "done." -echo -e "Restoration steps:" -echo -e " sudo systemctl stop zookeeper" -echo -e " sudo mv /etc/zookeeper/conf/zoo.cfg.orig /etc/zookeeper/conf/zoo.cfg" -echo -e " sudo systemctl start zookeeper" -ssh ${lone_node} "sudo systemctl stop ceph-mon@${lone_node_shortname}" - -echo "" -ssh ${lone_node} "sudo pvc status 2>/dev/null" diff --git a/client-cli-old/scripts/import_vm b/client-cli-old/scripts/import_vm deleted file mode 100755 index 446c6012..00000000 --- a/client-cli-old/scripts/import_vm +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env bash - -# import_vm - Imports a VM to a PVC cluster from local files -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -set -o errexit -set -o pipefail - -usage() { - echo -e "Import a VM to a PVC cluster from local files." - echo -e "Usage:" - echo -e " $0 [] [...]" - echo -e "" - echo -e "Important information:" - echo -e " * At least one disk must be specified; all disks that are present in vm_configuration_file" - echo -e " should be specified, though this is not strictly requireda." - echo -e " * Do not switch the cluster primary coordinator while the script is running." - echo -e " * Ensure you have enough space on the destination cluster to store all VM disks." -} - -fail() { - echo -e "$@" - exit 1 -} - -# Arguments -if [[ -z ${1} || -z ${2} || -z ${3} || -z ${4} ]]; then - usage - exit 1 -fi -destination_cluster="${1}"; shift -destination_pool="${1}"; shift -vm_config_file="${1}"; shift -vm_disk_files=( ${@} ) - -# Verify the cluster is reachable -pvc -c ${destination_cluster} status &>/dev/null || fail "Specified destination_cluster is not accessible" - -# Determine the connection IP -cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${destination_cluster}" | awk '{ print $2 }' )" - -echo "Verification complete." - -# Determine information about the VM from the config file -parse_xml_field() { - field="${1}" - line="$( grep -F "<${field}>" ${vm_config_file} )" - awk -F '>|<' '{ print $3 }' <<<"${line}" -} -vm_name="$( parse_xml_field name )" -echo "Importing VM ${vm_name}..." -pvc -c ${destination_cluster} vm define ${vm_config_file} 2>/dev/null - -# Create the disks on the cluster -for disk_file in ${vm_disk_files[@]}; do - disk_file_basename="$( basename ${disk_file} )" - disk_file_ext="${disk_file_basename##*.}" - disk_file_name="$( basename ${disk_file_basename} .${disk_file_ext} )" - disk_file_size="$( stat --format="%s" ${disk_file} )" - - echo "Importing disk ${disk_file_name}... " - pvc -c ${destination_cluster} storage volume add ${destination_pool} ${disk_file_name} ${disk_file_size}B 2>/dev/null - pvc -c ${destination_cluster} storage volume upload ${destination_pool} ${disk_file_name} ${disk_file} 2>/dev/null -done diff --git a/client-cli-old/scripts/migrate_vm b/client-cli-old/scripts/migrate_vm deleted file mode 100755 index f8d8e73e..00000000 --- a/client-cli-old/scripts/migrate_vm +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env bash - -# migrate_vm - Exports a VM from a PVC cluster to another PVC cluster -# Part of the Parallel Virtual Cluster (PVC) system -# -# Copyright (C) 2018-2022 Joshua M. Boniface -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### - -set -o errexit -set -o pipefail - -usage() { - echo -e "Export a VM from a PVC cluster to another PVC cluster." - echo -e "Usage:" - echo -e " $0 " - echo -e "" - echo -e "Important information:" - echo -e " * The local user must have valid SSH access to the primary coordinator in the source_cluster." - echo -e " * The user on the cluster primary coordinator must have 'sudo' access." - echo -e " * If the VM is not in 'stop' state, it will be shut down." - echo -e " * Do not switch the cluster primary coordinator on either cluster while the script is running." - echo -e " * Ensure you have enough space on the target cluster to store all VM disks." -} - -fail() { - echo -e "$@" - exit 1 -} - -# Arguments -if [[ -z ${1} || -z ${2} || -z ${3} || -z ${4} ]]; then - usage - exit 1 -fi -source_vm="${1}" -source_cluster="${2}" -destination_cluster="${3}" -destination_pool="${4}" - -# Verify each cluster is reachable -pvc -c ${source_cluster} status &>/dev/null || fail "Specified source_cluster is not accessible" -pvc -c ${destination_cluster} status &>/dev/null || fail "Specified destination_cluster is not accessible" - -# Determine the connection IPs -source_cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${source_cluster}" | awk '{ print $2 }' )" -destination_cluster_address="$( pvc cluster list 2>/dev/null | grep -i "^${destination_cluster}" | awk '{ print $2 }' )" - -# Attempt to connect to the cluster addresses -ssh ${source_cluster_address} which pvc &>/dev/null || fail "Could not SSH to source_cluster primary coordinator host" -ssh ${destination_cluster_address} which pvc &>/dev/null || fail "Could not SSH to destination_cluster primary coordinator host" - -# Verify that the VM exists -pvc -c ${source_cluster} vm info ${source_vm} &>/dev/null || fail "Specified VM is not present on the source cluster" - -echo "Verification complete." - -# Shut down the VM -echo -n "Shutting down VM..." -set +o errexit -pvc -c ${source_cluster} vm shutdown ${source_vm} &>/dev/null -shutdown_success=$? -while ! pvc -c ${source_cluster} vm info ${source_vm} 2>/dev/null | grep '^State' | grep -q -E 'stop|disable'; do - sleep 1 - echo -n "." -done -set -o errexit -echo " done." - -tempfile="$( mktemp )" - -# Dump the XML file -echo -n "Exporting VM configuration file from source cluster... " -pvc -c ${source_cluster} vm dump ${source_vm} 1> ${tempfile} 2>/dev/null -echo "done." - -# Import the XML file -echo -n "Importing VM configuration file to destination cluster... " -pvc -c ${destination_cluster} vm define ${tempfile} -echo "done." - -rm -f ${tempfile} - -# Determine the list of volumes in this VM -volume_list="$( pvc -c ${source_cluster} vm info --long ${source_vm} 2>/dev/null | grep -w 'rbd' | awk '{ print $3 }' )" - -# Parse and migrate each volume -for volume in ${volume_list}; do - volume_pool="$( awk -F '/' '{ print $1 }' <<<"${volume}" )" - volume_name="$( awk -F '/' '{ print $2 }' <<<"${volume}" )" - volume_size="$( pvc -c ${source_cluster} storage volume list -p ${volume_pool} ${volume_name} 2>/dev/null | grep "^${volume_name}" | awk '{ print $3 }' )" - echo "Transferring disk ${volume_name} (${volume_size})... " - pvc -c ${destination_cluster} storage volume add ${destination_pool} ${volume_name} ${volume_size} 2>/dev/null - ssh ${source_cluster_address} sudo rbd map ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to map volume ${volume} on source cluster" - ssh ${destination_cluster_address} sudo rbd map ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to map volume ${volume} on destination cluster" - ssh ${source_cluster_address} sudo dd if="/dev/rbd/${volume_pool}/${volume_name}" bs=1M 2>/dev/null | pv | ssh ${destination_cluster_address} sudo dd bs=1M of="/dev/rbd/${destination_pool}/${volume_name}" 2>/dev/null - ssh ${source_cluster_address} sudo rbd unmap ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to unmap volume ${volume} on source cluster" - ssh ${destination_cluster_address} sudo rbd unmap ${volume_pool}/${volume_name} &>/dev/null || fail "Failed to unmap volume ${volume} on destination cluster" -done - -if [[ ${shutdown_success} -eq 0 ]]; then - pvc -c ${destination_cluster} vm start ${source_vm} -fi diff --git a/client-cli-old/setup.py b/client-cli-old/setup.py deleted file mode 100644 index b0af6b58..00000000 --- a/client-cli-old/setup.py +++ /dev/null @@ -1,20 +0,0 @@ -from setuptools import setup - -setup( - name="pvc", - version="0.9.63", - packages=["pvc", "pvc.lib"], - install_requires=[ - "Click", - "PyYAML", - "lxml", - "colorama", - "requests", - "requests-toolbelt", - ], - entry_points={ - "console_scripts": [ - "pvc = pvc.pvc:cli", - ], - }, -)