diff --git a/cli-client-new/pvc.py b/cli-client-new/pvc.py new file mode 100755 index 00000000..bb643515 --- /dev/null +++ b/cli-client-new/pvc.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 + +# pvc.py - PVC client command-line interface (stub testing interface) +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2022 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +from pvc.cli.cli import cli + + +# +# Main entry point +# +def main(): + return cli(obj={}) + + +if __name__ == "__main__": + main() diff --git a/cli-client-new/pvc/__init__.py b/cli-client-new/pvc/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli-client-new/pvc/cli/cli.py b/cli-client-new/pvc/cli/cli.py new file mode 100644 index 00000000..3eb4a4d7 --- /dev/null +++ b/cli-client-new/pvc/cli/cli.py @@ -0,0 +1,609 @@ +#!/usr/bin/env python3 + +# cli.py - PVC Click CLI main library +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2023 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +from functools import wraps +from json import dumps as jdumps +from os import environ, makedirs, path +from pkg_resources import get_distribution + +from pvc.cli.helpers import * +from pvc.cli.parsers import * +from pvc.cli.formatters import * + +import click + + +############################################################################### +# Context and completion handler +############################################################################### + + +CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"], max_content_width=120) +IS_COMPLETION = True if environ.get("_PVC_COMPLETE", "") == "complete" else False + +CLI_CONFIG = dict() + +if not IS_COMPLETION: + cli_client_dir = environ.get("PVC_CLIENT_DIR", None) + home_dir = environ.get("HOME", None) + if cli_client_dir: + store_path = cli_client_dir + elif home_dir: + store_path = f"{home_dir}/.config/pvc" + else: + print( + "WARNING: No client or home configuration directory found; using /tmp instead" + ) + store_path = "/tmp/pvc" + + if not path.isdir(store_path): + makedirs(store_path) + + if not path.isfile(f"{store_path}/{DEFAULT_STORE_FILENAME}"): + update_store(store_path, {"local": DEFAULT_STORE_DATA}) + + +############################################################################### +# Local helper functions +############################################################################### + + +def echo(message, newline=True, err=False): + """ + Output a message with click.echo respecting our configuration + """ + + if CLI_CONFIG.get("colour", False): + colour = True + else: + colour = None + + click.echo(message=message, color=colour, nl=newline, err=err) + + +def finish(success=True, data=None, formatter=None): + """ + Output data to the terminal and exit based on code (T/F or integer code) + """ + + if data is not None: + if formatter is not None: + echo(formatter(data)) + else: + echo(data) + + # Allow passing + if isinstance(success, int): + exit(success) + + if success: + exit(0) + else: + exit(1) + + +def version(ctx, param, value): + """ + Show the version of the CLI client + """ + + if not value or ctx.resilient_parsing: + return + + version = get_distribution("pvc").version + echo(f"Parallel Virtual Cluster CLI client version {version}") + ctx.exit() + + +############################################################################### +# Click command decorators +############################################################################### + + +def connection_req(function): + """ + General Decorator: + Wraps a Click command which requires a connection to be set and validates that it is present + """ + + @wraps(function) + def validate_connection(*args, **kwargs): + if CLI_CONFIG.get("badcfg", None): + echo( + 'No connection specified and no local API configuration found. Use "pvc connection" to add a connection.' + ) + exit(1) + + if not CLI_CONFIG.get("quiet", False): + if CLI_CONFIG.get("api_scheme") == "https" and not CLI_CONFIG.get( + "verify_ssl" + ): + ssl_verify_msg = " (unverified)" + else: + ssl_verify_msg = "" + + echo( + f'''Using connection "{CLI_CONFIG.get('connection')}" - Host: "{CLI_CONFIG.get('api_host')}" Scheme: "{CLI_CONFIG.get('api_scheme')}{ssl_verify_msg}" Prefix: "{CLI_CONFIG.get('api_prefix')}"''', + stderr=True, + ) + echo( + "", + stderr=True, + ) + + return function(*args, **kwargs) + + return validate_connection + + +def restart_opt(function): + """ + Click Option Decorator: + Wraps a Click command which requires confirm_flag or unsafe option or asks for VM restart confirmation + """ + + @click.option( + "-r", + "--restart", + "restart_flag", + is_flag=True, + default=False, + help="Immediately restart VM to apply changes.", + ) + @wraps(function) + def confirm_action(*args, **kwargs): + confirm_action = True + if "restart_flag" in kwargs: + if not kwargs.get("restart_flag", False): + if not CLI_CONFIG.get("unsafe", False): + confirm_action = True + else: + confirm_action = False + else: + confirm_action = False + else: + confirm_action = False + + if confirm_action: + try: + click.confirm( + f"Restart VM {kwargs.get('vm')}", prompt_suffix="? ", abort=True + ) + except Exception: + echo("Changes will be applied on next VM start/restart.") + kwargs["restart_flag"] = False + + return function(*args, **kwargs) + + return confirm_action + + +def confirm_opt(message): + """ + Click Option Decorator with argument: + Wraps a Click command which requires confirm_flag or unsafe option or asks for confirmation with message + """ + + def confirm_decorator(function): + @click.option( + "-y", + "--yes", + "confirm_flag", + is_flag=True, + default=False, + help="Pre-confirm any unsafe operations.", + ) + @wraps(function) + def confirm_action(*args, **kwargs): + confirm_action = True + if "confirm_flag" in kwargs: + if not kwargs.get("confirm_flag", False): + if not CLI_CONFIG.get("unsafe", False): + confirm_action = True + else: + confirm_action = False + else: + confirm_action = False + else: + confirm_action = False + + if confirm_action: + try: + click.confirm(message, prompt_suffix="? ", abort=True) + except Exception: + exit(0) + + del kwargs["confirm_flag"] + + return function(*args, **kwargs) + + return confirm_action + + return confirm_decorator + + +def format_opt(formats, default_format="pretty"): + """ + Click Option Decorator with argument: + Wraps a Click command that can output in multiple formats; {formats} defines a dictionary of + formatting functions for the command with keys as valid format types + e.g. { "json": lambda d: json.dumps(d), "pretty": format_function_pretty, ... } + """ + + if default_format not in formats.keys(): + echo(f"Fatal code error: {default_format} not in {formats.keys()}") + exit(255) + + def format_decorator(function): + @click.option( + "-f", + "--format", + "output_format", + default=default_format, + show_default=True, + type=click.Choice(formats.keys()), + help="Output information in this format.", + ) + @wraps(function) + def format_action(*args, **kwargs): + kwargs["format_function"] = formats[kwargs["output_format"]] + + del kwargs["output_format"] + + return function(*args, **kwargs) + + return format_action + + return format_decorator + + +# Decorators example +@click.command(name="testing", short_help="Testing") # Click command +@connection_req # Require a connection to be set +@click.argument("vm") # A Click argument +@confirm_opt("Confirm this very dangerous task") # A "--yes" confirmation option +@restart_opt # A "--restart" confirmation option (adds 'restart_flag') +@format_opt( # A "--format" output option (adds 'format_function') + { + "pretty": lambda d: d, # This dictionary is of "type":"callable" entries, where each + "json": lambda d: jdumps( + d + ), # key is the nice name for the user to specify, and the value + "json-pretty": lambda d: jdumps( + d, indent=2 + ), # is a callable that takes in the provided data to format + }, + default_format="json-pretty", # Can also set a default if "pretty" shouldn't be the default +) +# Always in format {arguments}, {options}, {flags}, {format_function} +def testing(vm, restart_flag, format_function): + echo(vm) + echo(restart_flag) + echo(format_function) + + data = { + "athing": "value", + "anotherthing": 1234, + "thelist": ["a", "b", "c"], + } + + finish(True, data, format_function) + + +############################################################################### +# pvc connection +############################################################################### +@click.group( + name="connection", + short_help="Manage PVC cluster connections.", + context_settings=CONTEXT_SETTINGS, +) +def cli_connection(): + """ + Manage the PVC clusters this CLI client can connect to. + """ + pass + + +############################################################################### +# pvc connection add +############################################################################### +@click.command( + name="add", + short_help="Add connections to the client database.", +) +@click.argument("name") +@click.option( + "-d", + "--description", + "description", + required=False, + default="N/A", + help="A text description of the connection.", +) +@click.option( + "-a", + "--address", + "address", + required=True, + help="The IP address/hostname of the connection API.", +) +@click.option( + "-p", + "--port", + "port", + required=False, + default=7370, + show_default=True, + help="The port of the connection API.", +) +@click.option( + "-k", + "--api-key", + "api_key", + required=False, + default=None, + help="An API key to use for authentication, if required.", +) +@click.option( + "-s/-S", + "--ssl/--no-ssl", + "ssl_flag", + is_flag=True, + default=False, + help="Whether or not to use SSL for the API connection. [default: False]", +) +def cli_connection_add(name, description, address, port, api_key, ssl_flag): + """ + Add the PVC connection NAME to the database of the local CLI client. + + Adding a connection with an existing NAME will replace the existing connection. + """ + + # Set the scheme based on {ssl_flag} + scheme = "https" if ssl_flag else "http" + + # Get the store data + connections_config = get_store(store_path) + + # Add (or update) the new connection details + connections_config[name] = { + "description": description, + "host": address, + "port": port, + "scheme": scheme, + "api_key": api_key, + } + + # Update the store data + update_store(store_path, connections_config) + + finish( + True, + f"""Added connection "{name}" ({scheme}://{address}:{port}) to client database""", + ) + + +############################################################################### +# pvc connection remove +############################################################################### +@click.command( + name="remove", + short_help="Remove connections from the client database.", +) +@click.argument("name") +def cli_connection_remove(name): + """ + Remove the PVC connection NAME from the database of the local CLI client. + """ + + # Get the store data + connections_config = get_store(store_path) + + # Remove the entry matching the name + try: + connections_config.pop(name) + except KeyError: + finish(False, f"""No connection found with name "{name}" in local database""") + + # Update the store data + update_store(store_path, connections_config) + + finish(True, f"""Removed connection "{name}" from client database""") + + +############################################################################### +# pvc connection list +############################################################################### +@click.command( + name="list", + short_help="List connections in the client database.", +) +@click.option( + "-k", + "--show-keys", + "show_keys_flag", + is_flag=True, + default=False, + help="Show secure API keys.", +) +@format_opt( + { + "pretty": cli_connection_list_format_pretty, + "raw": lambda d: "\n".join([c["name"] for c in d]), + "json": lambda d: jdumps(d), + "json-pretty": lambda d: jdumps(d, indent=2), + } +) +def cli_connection_list(show_keys_flag, format_function): + """ + List all PVC connections in the database of the local CLI client. + + \b + Format options: + "pretty": Output a nice tabular list of all details. + "raw": Output connection names one per line. + "json": Output in unformatted JSON. + "json-pretty": Output in formatted JSON. + """ + + connections_config = get_store(store_path) + connections_data = cli_connection_list_parser(connections_config, show_keys_flag) + finish(True, connections_data, format_function) + + +############################################################################### +# pvc connection detail +############################################################################### +@click.command( + name="detail", + short_help="List status of all connections in the client database.", +) +@format_opt( + { + "pretty": cli_connection_detail_format_pretty, + "json": lambda d: jdumps(d), + "json-pretty": lambda d: jdumps(d, indent=2), + } +) +def cli_connection_detail(format_function): + """ + List the status and information of all PVC cluster in the database of the local CLI client. + + \b + Format options: + "pretty": Output a nice tabular list of all details. + "json": Output in unformatted JSON. + "json-pretty": Output in formatted JSON. + """ + + echo("Gathering information from all clusters... ", newline=False, err=True) + connections_config = get_store(store_path) + connections_data = cli_connection_detail_parser(connections_config) + echo("done.", err=True) + echo("", err=True) + finish(True, connections_data, format_function) + + +############################################################################### +# pvc +############################################################################### +@click.group(context_settings=CONTEXT_SETTINGS) +@click.option( + "-c", + "--connection", + "_connection", + envvar="PVC_CONNECTION", + default=None, + help="Cluster to connect to.", +) +@click.option( + "-v", + "--debug", + "_debug", + envvar="PVC_DEBUG", + is_flag=True, + default=False, + help="Additional debug details.", +) +@click.option( + "-q", + "--quiet", + "_quiet", + envvar="PVC_QUIET", + is_flag=True, + default=False, + help="Suppress connection connection information.", +) +@click.option( + "-u", + "--unsafe", + "_unsafe", + envvar="PVC_UNSAFE", + is_flag=True, + default=False, + help='Allow unsafe operations without confirmation/"--yes" argument.', +) +@click.option( + "--colour", + "--color", + "_colour", + envvar="PVC_COLOUR", + is_flag=True, + default=False, + help="Force colourized output.", +) +@click.option( + "--version", + is_flag=True, + callback=version, + expose_value=False, + is_eager=True, + help="Show CLI version and exit.", +) +def cli(_connection, _debug, _quiet, _unsafe, _colour): + """ + Parallel Virtual Cluster CLI management tool + + Environment variables: + + "PVC_CONNECTION": Set the connection to access instead of using --connection/-c + + "PVC_DEBUG": Enable additional debugging details instead of using --debug/-v + + "PVC_QUIET": Suppress stderr connection output from client instead of using --quiet/-q + + "PVC_UNSAFE": Always suppress confirmations instead of needing --unsafe/-u or --yes/-y; USE WITH EXTREME CARE + + "PVC_COLOUR": Force colour on the output even if Click determines it is not a console (e.g. with 'watch') + + If a "-c"/"--connection"/"PVC_CONNECTION" is not specified, the CLI will attempt to read a "local" connection + from the API configuration at "/etc/pvc/pvcapid.yaml". If no such configuration is found, the command will + abort with an error. This applies to all commands except those under "connection". + """ + + global CLI_CONFIG + store_data = get_store(store_path) + CLI_CONFIG = get_config(store_data, _connection) + + # There is only one connection and no local connection, so even if nothing was passed, use it + if len(store_data) == 1 and _connection is None and CLI_CONFIG.get("badcfg", None): + CLI_CONFIG = get_config(store_data, list(store_data.keys())[0]) + + if not CLI_CONFIG.get("badcfg", None): + CLI_CONFIG["debug"] = _debug + CLI_CONFIG["unsafe"] = _unsafe + CLI_CONFIG["colour"] = _colour + CLI_CONFIG["quiet"] = _quiet + + audit() + + +############################################################################### +# Click command tree +############################################################################### + +cli_connection.add_command(cli_connection_add) +cli_connection.add_command(cli_connection_remove) +cli_connection.add_command(cli_connection_list) +cli_connection.add_command(cli_connection_detail) +cli.add_command(cli_connection) +# cli.add_command(testing) diff --git a/cli-client-new/pvc/cli/formatters.py b/cli-client-new/pvc/cli/formatters.py new file mode 100644 index 00000000..77857bb8 --- /dev/null +++ b/cli-client-new/pvc/cli/formatters.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python3 + +# formatters.py - PVC Click CLI output formatters library +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2023 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +# import colorama + + +# Define colour values for use in formatters +ansii = { + "red": "\033[91m", + "blue": "\033[94m", + "cyan": "\033[96m", + "green": "\033[92m", + "yellow": "\033[93m", + "purple": "\033[95m", + "bold": "\033[1m", + "end": "\033[0m", +} + + +def cli_connection_list_format_pretty(data): + """ + Pretty format the output of cli_connection_list + """ + + # Set the fields data + fields = { + "name": {"header": "Name", "length": len("Name") + 1}, + "description": {"header": "Description", "length": len("Description") + 1}, + "address": {"header": "Address", "length": len("Address") + 1}, + "port": {"header": "Port", "length": len("Port") + 1}, + "scheme": {"header": "Scheme", "length": len("Scheme") + 1}, + "api_key": {"header": "API Key", "length": len("API Key") + 1}, + } + + # Parse each connection and adjust field lengths + for connection in data: + for field, length in [(f, fields[f]["length"]) for f in fields]: + _length = len(str(connection[field])) + if _length > length: + length = len(str(connection[field])) + 1 + + fields[field]["length"] = length + + # Create the output object and define the line format + output = list() + line = "{bold}{name: <{lname}} {desc: <{ldesc}} {addr: <{laddr}} {port: <{lport}} {schm: <{lschm}} {akey: <{lakey}}{end}" + + # Add the header line + output.append( + line.format( + bold=ansii["bold"], + end=ansii["end"], + name=fields["name"]["header"], + lname=fields["name"]["length"], + desc=fields["description"]["header"], + ldesc=fields["description"]["length"], + addr=fields["address"]["header"], + laddr=fields["address"]["length"], + port=fields["port"]["header"], + lport=fields["port"]["length"], + schm=fields["scheme"]["header"], + lschm=fields["scheme"]["length"], + akey=fields["api_key"]["header"], + lakey=fields["api_key"]["length"], + ) + ) + + # Add a line per connection + for connection in data: + output.append( + line.format( + bold="", + end="", + name=connection["name"], + lname=fields["name"]["length"], + desc=connection["description"], + ldesc=fields["description"]["length"], + addr=connection["address"], + laddr=fields["address"]["length"], + port=connection["port"], + lport=fields["port"]["length"], + schm=connection["scheme"], + lschm=fields["scheme"]["length"], + akey=connection["api_key"], + lakey=fields["api_key"]["length"], + ) + ) + + return "\n".join(output) + + +def cli_connection_detail_format_pretty(data): + """ + Pretty format the output of cli_connection_detail + """ + + # Set the fields data + fields = { + "name": {"header": "Name", "length": len("Name") + 1}, + "description": {"header": "Description", "length": len("Description") + 1}, + "health": {"header": "Health", "length": len("Health") + 1}, + "primary_node": {"header": "Primary", "length": len("Primary") + 1}, + "pvc_version": {"header": "Version", "length": len("Version") + 1}, + "nodes": {"header": "Nodes", "length": len("Nodes") + 1}, + "vms": {"header": "VMs", "length": len("VMs") + 1}, + "networks": {"header": "Networks", "length": len("Networks") + 1}, + "osds": {"header": "OSDs", "length": len("OSDs") + 1}, + "pools": {"header": "Pools", "length": len("Pools") + 1}, + "volumes": {"header": "Volumes", "length": len("Volumes") + 1}, + "snapshots": {"header": "Snapshots", "length": len("Snapshots") + 1}, + } + + # Parse each connection and adjust field lengths + for connection in data: + for field, length in [(f, fields[f]["length"]) for f in fields]: + _length = len(str(connection[field])) + if _length > length: + length = len(str(connection[field])) + 1 + + fields[field]["length"] = length + + # Create the output object and define the line format + output = list() + line = "{bold}{name: <{lname}} {desc: <{ldesc}} {chlth}{hlth: <{lhlth}}{endc} {prin: <{lprin}} {vers: <{lvers}} {nods: <{lnods}} {vms: <{lvms}} {nets: <{lnets}} {osds: <{losds}} {pols: <{lpols}} {vols: <{lvols}} {snts: <{lsnts}}{end}" + + # Add the header line + output.append( + line.format( + bold=ansii["bold"], + end=ansii["end"], + chlth="", + endc="", + name=fields["name"]["header"], + lname=fields["name"]["length"], + desc=fields["description"]["header"], + ldesc=fields["description"]["length"], + hlth=fields["health"]["header"], + lhlth=fields["health"]["length"], + prin=fields["primary_node"]["header"], + lprin=fields["primary_node"]["length"], + vers=fields["pvc_version"]["header"], + lvers=fields["pvc_version"]["length"], + nods=fields["nodes"]["header"], + lnods=fields["nodes"]["length"], + vms=fields["vms"]["header"], + lvms=fields["vms"]["length"], + nets=fields["networks"]["header"], + lnets=fields["networks"]["length"], + osds=fields["osds"]["header"], + losds=fields["osds"]["length"], + pols=fields["pools"]["header"], + lpols=fields["pools"]["length"], + vols=fields["volumes"]["header"], + lvols=fields["volumes"]["length"], + snts=fields["snapshots"]["header"], + lsnts=fields["snapshots"]["length"], + ) + ) + + # Add a line per connection + for connection in data: + if connection["health"] == "N/A": + health_value = "N/A" + health_colour = ansii["purple"] + else: + health_value = f"{connection['health']}%" + if connection["maintenance"] == "true": + health_colour = ansii["blue"] + elif connection["health"] > 90: + health_colour = ansii["green"] + elif connection["health"] > 50: + health_colour = ansii["yellow"] + else: + health_colour = ansii["red"] + + output.append( + line.format( + bold="", + end="", + chlth=health_colour, + endc=ansii["end"], + name=connection["name"], + lname=fields["name"]["length"], + desc=connection["description"], + ldesc=fields["description"]["length"], + hlth=health_value, + lhlth=fields["health"]["length"], + prin=connection["primary_node"], + lprin=fields["primary_node"]["length"], + vers=connection["pvc_version"], + lvers=fields["pvc_version"]["length"], + nods=connection["nodes"], + lnods=fields["nodes"]["length"], + vms=connection["vms"], + lvms=fields["vms"]["length"], + nets=connection["networks"], + lnets=fields["networks"]["length"], + osds=connection["osds"], + losds=fields["osds"]["length"], + pols=connection["pools"], + lpols=fields["pools"]["length"], + vols=connection["volumes"], + lvols=fields["volumes"]["length"], + snts=connection["snapshots"], + lsnts=fields["snapshots"]["length"], + ) + ) + + return "\n".join(output) diff --git a/cli-client-new/pvc/cli/helpers.py b/cli-client-new/pvc/cli/helpers.py new file mode 100644 index 00000000..0c4e45d6 --- /dev/null +++ b/cli-client-new/pvc/cli/helpers.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 + +# helpers.py - PVC Click CLI helper function library +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2023 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +from click import echo +from distutils.util import strtobool +from json import load as jload +from json import dump as jdump +from os import chmod, environ, getpid, path +from socket import gethostname +from sys import argv +from syslog import syslog, openlog, closelog, LOG_AUTH +from yaml import load as yload +from yaml import BaseLoader + + +DEFAULT_STORE_DATA = {"cfgfile": "/etc/pvc/pvcapid.yaml"} +DEFAULT_STORE_FILENAME = "pvc.json" +DEFAULT_API_PREFIX = "/api/v1" +DEFAULT_NODE_HOSTNAME = gethostname().split(".")[0] + + +def audit(): + """ + Log an audit message to the local syslog AUTH facility + """ + + args = argv + args[0] = "pvc" + pid = getpid() + + openlog(facility=LOG_AUTH, ident=f"{args[0]}[{pid}]") + syslog( + f"""client audit: command "{' '.join(args)}" by user {environ.get('USER', None)}""" + ) + closelog() + + +def read_config_from_yaml(cfgfile): + """ + Read the PVC API configuration from the local API configuration file + """ + + try: + with open(cfgfile) as fh: + api_config = yload(fh, Loader=BaseLoader)["pvc"]["api"] + + host = api_config["listen_address"] + port = api_config["listen_port"] + scheme = "https" if strtobool(api_config["ssl"]["enabled"]) else "http" + api_key = ( + api_config["authentication"]["tokens"][0]["token"] + if strtobool(api_config["authentication"]["enabled"]) + else None + ) + except KeyError: + echo("Invalid API YAML found, ignoring.") + host = None + port = None + scheme = None + api_key = None + + return cfgfile, host, port, scheme, api_key + + +def get_config(store_data, cluster=None): + """ + Load CLI configuration from store data + """ + + if store_data is None: + return {"badcfg": True} + + cluster_details = store_data.get(cluster, None) + + if not cluster_details: + cluster = "local" + cluster_details = DEFAULT_STORE_DATA + + if cluster_details.get("cfgfile", None) is not None: + if path.isfile(cluster_details.get("cfgfile", None)): + description, host, port, scheme, api_key = read_config_from_yaml( + cluster_details.get("cfgfile", None) + ) + if None in [description, host, port, scheme]: + return {"badcfg": True} + else: + return {"badcfg": True} + else: + # This is a static configuration, get the details directly + description = cluster_details["description"] + host = cluster_details["host"] + port = cluster_details["port"] + scheme = cluster_details["scheme"] + api_key = cluster_details["api_key"] + + config = dict() + config["debug"] = False + config["cluster"] = cluster + config["description"] = description + config["api_host"] = f"{host}:{port}" + config["api_scheme"] = scheme + config["api_key"] = api_key + config["api_prefix"] = DEFAULT_API_PREFIX + if cluster == "local": + config["verify_ssl"] = False + else: + config["verify_ssl"] = bool( + strtobool(environ.get("PVC_CLIENT_VERIFY_SSL", "True")) + ) + + return config + + +def get_store(store_path): + """ + Load store information from the store path + """ + + store_file = f"{store_path}/{DEFAULT_STORE_FILENAME}" + + with open(store_file) as fh: + try: + store_data = jload(fh) + return store_data + except Exception: + return dict() + + +def update_store(store_path, store_data): + """ + Update store information to the store path, creating it (with sensible permissions) if needed + """ + + store_file = f"{store_path}/{DEFAULT_STORE_FILENAME}" + + if not path.exists(store_file): + with open(store_file, "w") as fh: + fh.write("") + chmod(store_file, int(environ.get("PVC_CLIENT_DB_PERMS", "600"), 8)) + + with open(store_file, "w") as fh: + jdump(store_data, fh, sort_keys=True, indent=4) diff --git a/cli-client-new/pvc/cli/parsers.py b/cli-client-new/pvc/cli/parsers.py new file mode 100644 index 00000000..7ea16987 --- /dev/null +++ b/cli-client-new/pvc/cli/parsers.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 + +# parsers.py - PVC Click CLI data parser function library +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2023 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +from os import path +from re import sub + +from pvc.cli.helpers import read_config_from_yaml, get_config + +import pvc.lib.cluster + + +def cli_connection_list_parser(connections_config, show_keys_flag): + """ + Parse connections_config into formatable data for cli_connection_list + """ + + connections_data = list() + + for connection, details in connections_config.items(): + if details.get("cfgfile", None) is not None: + if path.isfile(details.get("cfgfile")): + description, address, port, scheme, api_key = read_config_from_yaml( + details.get("cfgfile") + ) + else: + description, address, port, scheme, api_key = ( + None, + None, + None, + None, + None, + ) + if not show_keys_flag: + api_key = sub(r"[a-z0-9]", "x", api_key) + connections_data.append( + { + "name": connection, + "description": description, + "address": address, + "port": port, + "scheme": scheme, + "api_key": api_key, + } + ) + else: + if not show_keys_flag: + details["api_key"] = sub(r"[a-z0-9]", "x", details["api_key"]) + connections_data.append( + { + "name": connection, + "description": details["description"], + "address": details["host"], + "port": details["port"], + "scheme": details["scheme"], + "api_key": details["api_key"], + } + ) + + return connections_data + + +def cli_connection_detail_parser(connections_config): + """ + Parse connections_config into formatable data for cli_connection_detail + """ + connections_data = list() + for connection, details in connections_config.items(): + cluster_config = get_config(connections_config, cluster=connection) + # Connect to each API and gather cluster status + retcode, retdata = pvc.lib.cluster.get_info(cluster_config) + if retcode == 0: + # Create dummy data of N/A for all fields + connections_data.append( + { + "name": cluster_config["cluster"], + "description": cluster_config["description"], + "health": "N/A", + "maintenance": "N/A", + "primary_node": "N/A", + "pvc_version": "N/A", + "nodes": "N/A", + "vms": "N/A", + "networks": "N/A", + "osds": "N/A", + "pools": "N/A", + "volumes": "N/A", + "snapshots": "N/A", + } + ) + else: + # Normalize data into nice formattable version + connections_data.append( + { + "name": cluster_config["cluster"], + "description": cluster_config["description"], + "health": retdata.get("cluster_health", {}).get("health", "N/A"), + "maintenance": retdata.get("maintenance", "N/A"), + "primary_node": retdata.get("primary_node", "N/A"), + "pvc_version": retdata.get("pvc_version", "N/A"), + "nodes": retdata.get("nodes", {}).get("total", "N/A"), + "vms": retdata.get("vms", {}).get("total", "N/A"), + "networks": retdata.get("networks", "N/A"), + "osds": retdata.get("osds", {}).get("total", "N/A"), + "pools": retdata.get("pools", "N/A"), + "volumes": retdata.get("volumes", "N/A"), + "snapshots": retdata.get("snapshots", "N/A"), + } + ) + + return connections_data diff --git a/cli-client-new/pvc/lib/__init__.py b/cli-client-new/pvc/lib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/cli-client-new/pvc/lib/ansiprint.py b/cli-client-new/pvc/lib/ansiprint.py new file mode 100644 index 00000000..3a7ce394 --- /dev/null +++ b/cli-client-new/pvc/lib/ansiprint.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 + +# ansiprint.py - Printing function for formatted messages +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2022 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +import datetime + + +# ANSII colours for output +def red(): + return "\033[91m" + + +def blue(): + return "\033[94m" + + +def cyan(): + return "\033[96m" + + +def green(): + return "\033[92m" + + +def yellow(): + return "\033[93m" + + +def purple(): + return "\033[95m" + + +def bold(): + return "\033[1m" + + +def end(): + return "\033[0m" + + +# Print function +def echo(message, prefix, state): + # Get the date + date = "{} - ".format(datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S.%f")) + endc = end() + + # Continuation + if state == "c": + date = "" + colour = "" + prompt = " " + # OK + elif state == "o": + colour = green() + prompt = ">>> " + # Error + elif state == "e": + colour = red() + prompt = ">>> " + # Warning + elif state == "w": + colour = yellow() + prompt = ">>> " + # Tick + elif state == "t": + colour = purple() + prompt = ">>> " + # Information + elif state == "i": + colour = blue() + prompt = ">>> " + else: + colour = bold() + prompt = ">>> " + + # Append space to prefix + if prefix != "": + prefix = prefix + " " + + print(colour + prompt + endc + date + prefix + message) diff --git a/cli-client-new/pvc/lib/ceph.py b/cli-client-new/pvc/lib/ceph.py new file mode 100644 index 00000000..7738f3f2 --- /dev/null +++ b/cli-client-new/pvc/lib/ceph.py @@ -0,0 +1,2606 @@ +#!/usr/bin/env python3 + +# ceph.py - PVC CLI client function library, Ceph cluster functions +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2022 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +import math + +from json import dumps, loads +from requests_toolbelt.multipart.encoder import ( + MultipartEncoder, + MultipartEncoderMonitor, +) + +import pvc.lib.ansiprint as ansiprint +from pvc.lib.common import UploadProgressBar, call_api + +# +# Supplemental functions +# + +# Matrix of human-to-byte values +byte_unit_matrix = { + "B": 1, + "K": 1024, + "M": 1024 * 1024, + "G": 1024 * 1024 * 1024, + "T": 1024 * 1024 * 1024 * 1024, + "P": 1024 * 1024 * 1024 * 1024 * 1024, +} + +# Matrix of human-to-metric values +ops_unit_matrix = { + "": 1, + "K": 1000, + "M": 1000 * 1000, + "G": 1000 * 1000 * 1000, + "T": 1000 * 1000 * 1000 * 1000, + "P": 1000 * 1000 * 1000 * 1000 * 1000, +} + + +# Format byte sizes to/from human-readable units +def format_bytes_tohuman(databytes): + datahuman = "" + for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get, reverse=True): + new_bytes = int(math.ceil(databytes / byte_unit_matrix[unit])) + # Round up if 5 or more digits + if new_bytes > 9999: + # We can jump down another level + continue + else: + # We're at the end, display with this size + datahuman = "{}{}".format(new_bytes, unit) + + return datahuman + + +def format_bytes_fromhuman(datahuman): + # Trim off human-readable character + dataunit = datahuman[-1] + datasize = int(datahuman[:-1]) + databytes = datasize * byte_unit_matrix[dataunit] + return "{}B".format(databytes) + + +# Format ops sizes to/from human-readable units +def format_ops_tohuman(dataops): + datahuman = "" + for unit in sorted(ops_unit_matrix, key=ops_unit_matrix.get, reverse=True): + new_ops = int(math.ceil(dataops / ops_unit_matrix[unit])) + # Round up if 6 or more digits + if new_ops > 99999: + # We can jump down another level + continue + else: + # We're at the end, display with this size + datahuman = "{}{}".format(new_ops, unit) + + return datahuman + + +def format_ops_fromhuman(datahuman): + # Trim off human-readable character + dataunit = datahuman[-1] + datasize = int(datahuman[:-1]) + dataops = datasize * ops_unit_matrix[dataunit] + return "{}".format(dataops) + + +def format_pct_tohuman(datapct): + datahuman = "{0:.1f}".format(float(datapct * 100.0)) + return datahuman + + +# +# Status functions +# +def ceph_status(config): + """ + Get status of the Ceph cluster + + API endpoint: GET /api/v1/storage/ceph/status + API arguments: + API schema: {json_data_object} + """ + response = call_api(config, "get", "/storage/ceph/status") + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ceph_util(config): + """ + Get utilization of the Ceph cluster + + API endpoint: GET /api/v1/storage/ceph/utilization + API arguments: + API schema: {json_data_object} + """ + response = call_api(config, "get", "/storage/ceph/utilization") + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def format_raw_output(status_data): + ainformation = list() + ainformation.append( + "{bold}Ceph cluster {stype} (primary node {end}{blue}{primary}{end}{bold}){end}\n".format( + bold=ansiprint.bold(), + end=ansiprint.end(), + blue=ansiprint.blue(), + stype=status_data["type"], + primary=status_data["primary_node"], + ) + ) + ainformation.append(status_data["ceph_data"]) + ainformation.append("") + + return "\n".join(ainformation) + + +# +# OSD DB VG functions +# +def ceph_osd_db_vg_add(config, node, device): + """ + Add new Ceph OSD database volume group + + API endpoint: POST /api/v1/storage/ceph/osddb + API arguments: node={node}, device={device} + API schema: {"message":"{data}"} + """ + params = {"node": node, "device": device} + response = call_api(config, "post", "/storage/ceph/osddb", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +# +# OSD functions +# +def ceph_osd_info(config, osd): + """ + Get information about Ceph OSD + + API endpoint: GET /api/v1/storage/ceph/osd/{osd} + API arguments: + API schema: {json_data_object} + """ + response = call_api(config, "get", "/storage/ceph/osd/{osd}".format(osd=osd)) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "OSD not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ceph_osd_list(config, limit): + """ + Get list information about Ceph OSDs (limited by {limit}) + + API endpoint: GET /api/v1/storage/ceph/osd + API arguments: limit={limit} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + + response = call_api(config, "get", "/storage/ceph/osd", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ceph_osd_add(config, node, device, weight, ext_db_flag, ext_db_ratio): + """ + Add new Ceph OSD + + API endpoint: POST /api/v1/storage/ceph/osd + API arguments: node={node}, device={device}, weight={weight}, ext_db={ext_db_flag}, ext_db_ratio={ext_db_ratio} + API schema: {"message":"{data}"} + """ + params = { + "node": node, + "device": device, + "weight": weight, + "ext_db": ext_db_flag, + "ext_db_ratio": ext_db_ratio, + } + response = call_api(config, "post", "/storage/ceph/osd", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_osd_replace(config, osdid, device, weight): + """ + Replace an existing Ceph OSD with a new device + + API endpoint: POST /api/v1/storage/ceph/osd/{osdid} + API arguments: device={device}, weight={weight} + API schema: {"message":"{data}"} + """ + params = {"device": device, "weight": weight, "yes-i-really-mean-it": "yes"} + response = call_api(config, "post", f"/storage/ceph/osd/{osdid}", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_osd_refresh(config, osdid, device): + """ + Refresh (reimport) an existing Ceph OSD with device {device} + + API endpoint: PUT /api/v1/storage/ceph/osd/{osdid} + API arguments: device={device} + API schema: {"message":"{data}"} + """ + params = { + "device": device, + } + response = call_api(config, "put", f"/storage/ceph/osd/{osdid}", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_osd_remove(config, osdid, force_flag): + """ + Remove Ceph OSD + + API endpoint: DELETE /api/v1/storage/ceph/osd/{osdid} + API arguments: + API schema: {"message":"{data}"} + """ + params = {"force": force_flag, "yes-i-really-mean-it": "yes"} + response = call_api( + config, "delete", "/storage/ceph/osd/{osdid}".format(osdid=osdid), params=params + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_osd_state(config, osdid, state): + """ + Set state of Ceph OSD + + API endpoint: POST /api/v1/storage/ceph/osd/{osdid}/state + API arguments: state={state} + API schema: {"message":"{data}"} + """ + params = {"state": state} + response = call_api( + config, + "post", + "/storage/ceph/osd/{osdid}/state".format(osdid=osdid), + params=params, + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_osd_option(config, option, action): + """ + Set cluster option of Ceph OSDs + + API endpoint: POST /api/v1/storage/ceph/option + API arguments: option={option}, action={action} + API schema: {"message":"{data}"} + """ + params = {"option": option, "action": action} + response = call_api(config, "post", "/storage/ceph/option", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def getOutputColoursOSD(osd_information): + # Set the UP status + if osd_information["stats"]["up"] == 1: + osd_up_flag = "Yes" + osd_up_colour = ansiprint.green() + else: + osd_up_flag = "No" + osd_up_colour = ansiprint.red() + + # Set the IN status + if osd_information["stats"]["in"] == 1: + osd_in_flag = "Yes" + osd_in_colour = ansiprint.green() + else: + osd_in_flag = "No" + osd_in_colour = ansiprint.red() + + return osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour + + +def format_list_osd(osd_list): + # Handle empty list + if not osd_list: + osd_list = list() + + osd_list_output = [] + + osd_id_length = 3 + osd_node_length = 5 + osd_device_length = 6 + osd_db_device_length = 9 + osd_up_length = 4 + osd_in_length = 4 + osd_size_length = 5 + osd_weight_length = 3 + osd_reweight_length = 5 + osd_pgs_length = 4 + osd_used_length = 5 + osd_free_length = 6 + osd_util_length = 6 + osd_var_length = 5 + osd_wrops_length = 4 + osd_wrdata_length = 5 + osd_rdops_length = 4 + osd_rddata_length = 5 + + for osd_information in osd_list: + try: + # If this happens, the node hasn't checked in fully yet, so use some dummy data + if osd_information["stats"]["node"] == "|": + for key in osd_information["stats"].keys(): + if ( + osd_information["stats"][key] == "|" + or osd_information["stats"][key] is None + ): + osd_information["stats"][key] = "N/A" + for key in osd_information.keys(): + if osd_information[key] is None: + osd_information[key] = "N/A" + else: + for key in osd_information["stats"].keys(): + if key in ["utilization", "var"] and isinstance( + osd_information["stats"][key], float + ): + osd_information["stats"][key] = round( + osd_information["stats"][key], 2 + ) + except KeyError: + print( + f"Details for OSD {osd_information['id']} missing required keys, skipping." + ) + continue + + # Deal with the size to human readable + osd_information["stats"]["size"] = osd_information["stats"]["kb"] * 1024 + for datatype in "size", "wr_data", "rd_data": + databytes = osd_information["stats"][datatype] + if isinstance(databytes, int): + databytes_formatted = format_bytes_tohuman(databytes) + else: + databytes_formatted = databytes + osd_information["stats"][datatype] = databytes_formatted + for datatype in "wr_ops", "rd_ops": + dataops = osd_information["stats"][datatype] + if isinstance(dataops, int): + dataops_formatted = format_ops_tohuman(dataops) + else: + dataops_formatted = dataops + osd_information["stats"][datatype] = dataops_formatted + + # Set the OSD ID length + _osd_id_length = len(osd_information["id"]) + 1 + if _osd_id_length > osd_id_length: + osd_id_length = _osd_id_length + + # Set the OSD node length + _osd_node_length = len(osd_information["node"]) + 1 + if _osd_node_length > osd_node_length: + osd_node_length = _osd_node_length + + # Set the OSD device length + _osd_device_length = len(osd_information["device"]) + 1 + if _osd_device_length > osd_device_length: + osd_device_length = _osd_device_length + + # Set the OSD db_device length + _osd_db_device_length = len(osd_information["db_device"]) + 1 + if _osd_db_device_length > osd_db_device_length: + osd_db_device_length = _osd_db_device_length + + # Set the size and length + _osd_size_length = len(str(osd_information["stats"]["size"])) + 1 + if _osd_size_length > osd_size_length: + osd_size_length = _osd_size_length + + # Set the weight and length + _osd_weight_length = len(str(osd_information["stats"]["weight"])) + 1 + if _osd_weight_length > osd_weight_length: + osd_weight_length = _osd_weight_length + + # Set the reweight and length + _osd_reweight_length = len(str(osd_information["stats"]["reweight"])) + 1 + if _osd_reweight_length > osd_reweight_length: + osd_reweight_length = _osd_reweight_length + + # Set the pgs and length + _osd_pgs_length = len(str(osd_information["stats"]["pgs"])) + 1 + if _osd_pgs_length > osd_pgs_length: + osd_pgs_length = _osd_pgs_length + + # Set the used/available/utlization%/variance and lengths + _osd_used_length = len(osd_information["stats"]["used"]) + 1 + if _osd_used_length > osd_used_length: + osd_used_length = _osd_used_length + + _osd_free_length = len(osd_information["stats"]["avail"]) + 1 + if _osd_free_length > osd_free_length: + osd_free_length = _osd_free_length + + _osd_util_length = len(str(osd_information["stats"]["utilization"])) + 1 + if _osd_util_length > osd_util_length: + osd_util_length = _osd_util_length + + _osd_var_length = len(str(osd_information["stats"]["var"])) + 1 + if _osd_var_length > osd_var_length: + osd_var_length = _osd_var_length + + # Set the read/write IOPS/data and length + _osd_wrops_length = len(osd_information["stats"]["wr_ops"]) + 1 + if _osd_wrops_length > osd_wrops_length: + osd_wrops_length = _osd_wrops_length + + _osd_wrdata_length = len(osd_information["stats"]["wr_data"]) + 1 + if _osd_wrdata_length > osd_wrdata_length: + osd_wrdata_length = _osd_wrdata_length + + _osd_rdops_length = len(osd_information["stats"]["rd_ops"]) + 1 + if _osd_rdops_length > osd_rdops_length: + osd_rdops_length = _osd_rdops_length + + _osd_rddata_length = len(osd_information["stats"]["rd_data"]) + 1 + if _osd_rddata_length > osd_rddata_length: + osd_rddata_length = _osd_rddata_length + + # Format the output header + osd_list_output.append( + "{bold}{osd_header: <{osd_header_length}} {state_header: <{state_header_length}} {details_header: <{details_header_length}} {read_header: <{read_header_length}} {write_header: <{write_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + osd_header_length=osd_id_length + + osd_node_length + + osd_device_length + + osd_db_device_length + + 3, + state_header_length=osd_up_length + osd_in_length + 1, + details_header_length=osd_size_length + + osd_pgs_length + + osd_weight_length + + osd_reweight_length + + osd_used_length + + osd_free_length + + osd_util_length + + osd_var_length + + 7, + read_header_length=osd_rdops_length + osd_rddata_length + 1, + write_header_length=osd_wrops_length + osd_wrdata_length + 1, + osd_header="OSDs " + + "".join( + [ + "-" + for _ in range( + 5, + osd_id_length + + osd_node_length + + osd_device_length + + osd_db_device_length + + 2, + ) + ] + ), + state_header="State " + + "".join(["-" for _ in range(6, osd_up_length + osd_in_length)]), + details_header="Details " + + "".join( + [ + "-" + for _ in range( + 8, + osd_size_length + + osd_pgs_length + + osd_weight_length + + osd_reweight_length + + osd_used_length + + osd_free_length + + osd_util_length + + osd_var_length + + 6, + ) + ] + ), + read_header="Read " + + "".join(["-" for _ in range(5, osd_rdops_length + osd_rddata_length)]), + write_header="Write " + + "".join(["-" for _ in range(6, osd_wrops_length + osd_wrdata_length)]), + ) + ) + + osd_list_output.append( + "{bold}\ +{osd_id: <{osd_id_length}} \ +{osd_node: <{osd_node_length}} \ +{osd_device: <{osd_device_length}} \ +{osd_db_device: <{osd_db_device_length}} \ +{osd_up: <{osd_up_length}} \ +{osd_in: <{osd_in_length}} \ +{osd_size: <{osd_size_length}} \ +{osd_pgs: <{osd_pgs_length}} \ +{osd_weight: <{osd_weight_length}} \ +{osd_reweight: <{osd_reweight_length}} \ +{osd_used: <{osd_used_length}} \ +{osd_free: <{osd_free_length}} \ +{osd_util: <{osd_util_length}} \ +{osd_var: <{osd_var_length}} \ +{osd_rdops: <{osd_rdops_length}} \ +{osd_rddata: <{osd_rddata_length}} \ +{osd_wrops: <{osd_wrops_length}} \ +{osd_wrdata: <{osd_wrdata_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + osd_id_length=osd_id_length, + osd_node_length=osd_node_length, + osd_device_length=osd_device_length, + osd_db_device_length=osd_db_device_length, + osd_up_length=osd_up_length, + osd_in_length=osd_in_length, + osd_size_length=osd_size_length, + osd_pgs_length=osd_pgs_length, + osd_weight_length=osd_weight_length, + osd_reweight_length=osd_reweight_length, + osd_used_length=osd_used_length, + osd_free_length=osd_free_length, + osd_util_length=osd_util_length, + osd_var_length=osd_var_length, + osd_wrops_length=osd_wrops_length, + osd_wrdata_length=osd_wrdata_length, + osd_rdops_length=osd_rdops_length, + osd_rddata_length=osd_rddata_length, + osd_id="ID", + osd_node="Node", + osd_device="Block", + osd_db_device="DB Block", + osd_up="Up", + osd_in="In", + osd_size="Size", + osd_pgs="PGs", + osd_weight="Wt", + osd_reweight="ReWt", + osd_used="Used", + osd_free="Free", + osd_util="Util%", + osd_var="Var", + osd_wrops="OPS", + osd_wrdata="Data", + osd_rdops="OPS", + osd_rddata="Data", + ) + ) + + for osd_information in sorted(osd_list, key=lambda x: int(x["id"])): + osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour = getOutputColoursOSD( + osd_information + ) + + osd_db_device = osd_information["db_device"] + if not osd_db_device: + osd_db_device = "N/A" + + # Format the output header + osd_list_output.append( + "{bold}\ +{osd_id: <{osd_id_length}} \ +{osd_node: <{osd_node_length}} \ +{osd_device: <{osd_device_length}} \ +{osd_db_device: <{osd_db_device_length}} \ +{osd_up_colour}{osd_up: <{osd_up_length}}{end_colour} \ +{osd_in_colour}{osd_in: <{osd_in_length}}{end_colour} \ +{osd_size: <{osd_size_length}} \ +{osd_pgs: <{osd_pgs_length}} \ +{osd_weight: <{osd_weight_length}} \ +{osd_reweight: <{osd_reweight_length}} \ +{osd_used: <{osd_used_length}} \ +{osd_free: <{osd_free_length}} \ +{osd_util: <{osd_util_length}} \ +{osd_var: <{osd_var_length}} \ +{osd_rdops: <{osd_rdops_length}} \ +{osd_rddata: <{osd_rddata_length}} \ +{osd_wrops: <{osd_wrops_length}} \ +{osd_wrdata: <{osd_wrdata_length}} \ +{end_bold}".format( + bold="", + end_bold="", + end_colour=ansiprint.end(), + osd_id_length=osd_id_length, + osd_node_length=osd_node_length, + osd_device_length=osd_device_length, + osd_db_device_length=osd_db_device_length, + osd_up_length=osd_up_length, + osd_in_length=osd_in_length, + osd_size_length=osd_size_length, + osd_pgs_length=osd_pgs_length, + osd_weight_length=osd_weight_length, + osd_reweight_length=osd_reweight_length, + osd_used_length=osd_used_length, + osd_free_length=osd_free_length, + osd_util_length=osd_util_length, + osd_var_length=osd_var_length, + osd_wrops_length=osd_wrops_length, + osd_wrdata_length=osd_wrdata_length, + osd_rdops_length=osd_rdops_length, + osd_rddata_length=osd_rddata_length, + osd_id=osd_information["id"], + osd_node=osd_information["node"], + osd_device=osd_information["device"], + osd_db_device=osd_db_device, + osd_up_colour=osd_up_colour, + osd_up=osd_up_flag, + osd_in_colour=osd_in_colour, + osd_in=osd_in_flag, + osd_size=osd_information["stats"]["size"], + osd_pgs=osd_information["stats"]["pgs"], + osd_weight=osd_information["stats"]["weight"], + osd_reweight=osd_information["stats"]["reweight"], + osd_used=osd_information["stats"]["used"], + osd_free=osd_information["stats"]["avail"], + osd_util=osd_information["stats"]["utilization"], + osd_var=osd_information["stats"]["var"], + osd_wrops=osd_information["stats"]["wr_ops"], + osd_wrdata=osd_information["stats"]["wr_data"], + osd_rdops=osd_information["stats"]["rd_ops"], + osd_rddata=osd_information["stats"]["rd_data"], + ) + ) + + return "\n".join(osd_list_output) + + +# +# Pool functions +# +def ceph_pool_info(config, pool): + """ + Get information about Ceph OSD + + API endpoint: GET /api/v1/storage/ceph/pool/{pool} + API arguments: + API schema: {json_data_object} + """ + response = call_api(config, "get", "/storage/ceph/pool/{pool}".format(pool=pool)) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "Pool not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ceph_pool_list(config, limit): + """ + Get list information about Ceph pools (limited by {limit}) + + API endpoint: GET /api/v1/storage/ceph/pool + API arguments: limit={limit} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + + response = call_api(config, "get", "/storage/ceph/pool", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ceph_pool_add(config, pool, pgs, replcfg, tier): + """ + Add new Ceph pool + + API endpoint: POST /api/v1/storage/ceph/pool + API arguments: pool={pool}, pgs={pgs}, replcfg={replcfg}, tier={tier} + API schema: {"message":"{data}"} + """ + params = {"pool": pool, "pgs": pgs, "replcfg": replcfg, "tier": tier} + response = call_api(config, "post", "/storage/ceph/pool", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_pool_remove(config, pool): + """ + Remove Ceph pool + + API endpoint: DELETE /api/v1/storage/ceph/pool/{pool} + API arguments: + API schema: {"message":"{data}"} + """ + params = {"yes-i-really-mean-it": "yes"} + response = call_api( + config, "delete", "/storage/ceph/pool/{pool}".format(pool=pool), params=params + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_pool_set_pgs(config, pool, pgs): + """ + Set the PGs of a Ceph pool + + API endpoint: PUT /api/v1/storage/ceph/pool/{pool} + API arguments: {"pgs": "{pgs}"} + API schema: {"message":"{data}"} + """ + params = {"pgs": pgs} + response = call_api( + config, "put", "/storage/ceph/pool/{pool}".format(pool=pool), params=params + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def format_list_pool(pool_list): + # Handle empty list + if not pool_list: + pool_list = list() + + pool_list_output = [] + + pool_name_length = 5 + pool_id_length = 3 + pool_tier_length = 5 + pool_pgs_length = 4 + pool_used_length = 5 + pool_usedpct_length = 6 + pool_free_length = 5 + pool_num_objects_length = 6 + pool_num_clones_length = 7 + pool_num_copies_length = 7 + pool_num_degraded_length = 9 + pool_read_ops_length = 4 + pool_read_data_length = 5 + pool_write_ops_length = 4 + pool_write_data_length = 5 + + for pool_information in pool_list: + # Deal with the size to human readable + for datatype in ["free_bytes", "used_bytes", "write_bytes", "read_bytes"]: + databytes = pool_information["stats"][datatype] + databytes_formatted = format_bytes_tohuman(int(databytes)) + pool_information["stats"][datatype] = databytes_formatted + for datatype in ["write_ops", "read_ops"]: + dataops = pool_information["stats"][datatype] + dataops_formatted = format_ops_tohuman(int(dataops)) + pool_information["stats"][datatype] = dataops_formatted + for datatype in ["used_percent"]: + datapct = pool_information["stats"][datatype] + datapct_formatted = format_pct_tohuman(float(datapct)) + pool_information["stats"][datatype] = datapct_formatted + + # Set the Pool name length + _pool_name_length = len(pool_information["name"]) + 1 + if _pool_name_length > pool_name_length: + pool_name_length = _pool_name_length + + # Set the id and length + _pool_id_length = len(str(pool_information["stats"]["id"])) + 1 + if _pool_id_length > pool_id_length: + pool_id_length = _pool_id_length + + # Set the tier and length + _pool_tier_length = len(str(pool_information["tier"])) + 1 + if _pool_tier_length > pool_tier_length: + pool_tier_length = _pool_tier_length + + # Set the pgs and length + _pool_pgs_length = len(str(pool_information["pgs"])) + 1 + if _pool_pgs_length > pool_pgs_length: + pool_pgs_length = _pool_pgs_length + + # Set the used and length + _pool_used_length = len(str(pool_information["stats"]["used_bytes"])) + 1 + if _pool_used_length > pool_used_length: + pool_used_length = _pool_used_length + + # Set the usedpct and length + _pool_usedpct_length = len(str(pool_information["stats"]["used_percent"])) + 1 + if _pool_usedpct_length > pool_usedpct_length: + pool_usedpct_length = _pool_usedpct_length + + # Set the free and length + _pool_free_length = len(str(pool_information["stats"]["free_bytes"])) + 1 + if _pool_free_length > pool_free_length: + pool_free_length = _pool_free_length + + # Set the num_objects and length + _pool_num_objects_length = ( + len(str(pool_information["stats"]["num_objects"])) + 1 + ) + if _pool_num_objects_length > pool_num_objects_length: + pool_num_objects_length = _pool_num_objects_length + + # Set the num_clones and length + _pool_num_clones_length = ( + len(str(pool_information["stats"]["num_object_clones"])) + 1 + ) + if _pool_num_clones_length > pool_num_clones_length: + pool_num_clones_length = _pool_num_clones_length + + # Set the num_copies and length + _pool_num_copies_length = ( + len(str(pool_information["stats"]["num_object_copies"])) + 1 + ) + if _pool_num_copies_length > pool_num_copies_length: + pool_num_copies_length = _pool_num_copies_length + + # Set the num_degraded and length + _pool_num_degraded_length = ( + len(str(pool_information["stats"]["num_objects_degraded"])) + 1 + ) + if _pool_num_degraded_length > pool_num_degraded_length: + pool_num_degraded_length = _pool_num_degraded_length + + # Set the read/write IOPS/data and length + _pool_write_ops_length = len(str(pool_information["stats"]["write_ops"])) + 1 + if _pool_write_ops_length > pool_write_ops_length: + pool_write_ops_length = _pool_write_ops_length + + _pool_write_data_length = len(pool_information["stats"]["write_bytes"]) + 1 + if _pool_write_data_length > pool_write_data_length: + pool_write_data_length = _pool_write_data_length + + _pool_read_ops_length = len(str(pool_information["stats"]["read_ops"])) + 1 + if _pool_read_ops_length > pool_read_ops_length: + pool_read_ops_length = _pool_read_ops_length + + _pool_read_data_length = len(pool_information["stats"]["read_bytes"]) + 1 + if _pool_read_data_length > pool_read_data_length: + pool_read_data_length = _pool_read_data_length + + # Format the output header + pool_list_output.append( + "{bold}{pool_header: <{pool_header_length}} {objects_header: <{objects_header_length}} {read_header: <{read_header_length}} {write_header: <{write_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + pool_header_length=pool_id_length + + pool_name_length + + pool_tier_length + + pool_pgs_length + + pool_used_length + + pool_usedpct_length + + pool_free_length + + 6, + objects_header_length=pool_num_objects_length + + pool_num_clones_length + + pool_num_copies_length + + pool_num_degraded_length + + 3, + read_header_length=pool_read_ops_length + pool_read_data_length + 1, + write_header_length=pool_write_ops_length + pool_write_data_length + 1, + pool_header="Pools " + + "".join( + [ + "-" + for _ in range( + 6, + pool_id_length + + pool_name_length + + pool_tier_length + + pool_pgs_length + + pool_used_length + + pool_usedpct_length + + pool_free_length + + 5, + ) + ] + ), + objects_header="Objects " + + "".join( + [ + "-" + for _ in range( + 8, + pool_num_objects_length + + pool_num_clones_length + + pool_num_copies_length + + pool_num_degraded_length + + 2, + ) + ] + ), + read_header="Read " + + "".join( + ["-" for _ in range(5, pool_read_ops_length + pool_read_data_length)] + ), + write_header="Write " + + "".join( + ["-" for _ in range(6, pool_write_ops_length + pool_write_data_length)] + ), + ) + ) + + pool_list_output.append( + "{bold}\ +{pool_id: <{pool_id_length}} \ +{pool_name: <{pool_name_length}} \ +{pool_tier: <{pool_tier_length}} \ +{pool_pgs: <{pool_pgs_length}} \ +{pool_used: <{pool_used_length}} \ +{pool_usedpct: <{pool_usedpct_length}} \ +{pool_free: <{pool_free_length}} \ +{pool_objects: <{pool_objects_length}} \ +{pool_clones: <{pool_clones_length}} \ +{pool_copies: <{pool_copies_length}} \ +{pool_degraded: <{pool_degraded_length}} \ +{pool_read_ops: <{pool_read_ops_length}} \ +{pool_read_data: <{pool_read_data_length}} \ +{pool_write_ops: <{pool_write_ops_length}} \ +{pool_write_data: <{pool_write_data_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + pool_id_length=pool_id_length, + pool_name_length=pool_name_length, + pool_tier_length=pool_tier_length, + pool_pgs_length=pool_pgs_length, + pool_used_length=pool_used_length, + pool_usedpct_length=pool_usedpct_length, + pool_free_length=pool_free_length, + pool_objects_length=pool_num_objects_length, + pool_clones_length=pool_num_clones_length, + pool_copies_length=pool_num_copies_length, + pool_degraded_length=pool_num_degraded_length, + pool_write_ops_length=pool_write_ops_length, + pool_write_data_length=pool_write_data_length, + pool_read_ops_length=pool_read_ops_length, + pool_read_data_length=pool_read_data_length, + pool_id="ID", + pool_name="Name", + pool_tier="Tier", + pool_pgs="PGs", + pool_used="Used", + pool_usedpct="Used%", + pool_free="Free", + pool_objects="Count", + pool_clones="Clones", + pool_copies="Copies", + pool_degraded="Degraded", + pool_write_ops="OPS", + pool_write_data="Data", + pool_read_ops="OPS", + pool_read_data="Data", + ) + ) + + for pool_information in sorted(pool_list, key=lambda x: int(x["stats"]["id"])): + # Format the output header + pool_list_output.append( + "{bold}\ +{pool_id: <{pool_id_length}} \ +{pool_name: <{pool_name_length}} \ +{pool_tier: <{pool_tier_length}} \ +{pool_pgs: <{pool_pgs_length}} \ +{pool_used: <{pool_used_length}} \ +{pool_usedpct: <{pool_usedpct_length}} \ +{pool_free: <{pool_free_length}} \ +{pool_objects: <{pool_objects_length}} \ +{pool_clones: <{pool_clones_length}} \ +{pool_copies: <{pool_copies_length}} \ +{pool_degraded: <{pool_degraded_length}} \ +{pool_read_ops: <{pool_read_ops_length}} \ +{pool_read_data: <{pool_read_data_length}} \ +{pool_write_ops: <{pool_write_ops_length}} \ +{pool_write_data: <{pool_write_data_length}} \ +{end_bold}".format( + bold="", + end_bold="", + pool_id_length=pool_id_length, + pool_name_length=pool_name_length, + pool_tier_length=pool_tier_length, + pool_pgs_length=pool_pgs_length, + pool_used_length=pool_used_length, + pool_usedpct_length=pool_usedpct_length, + pool_free_length=pool_free_length, + pool_objects_length=pool_num_objects_length, + pool_clones_length=pool_num_clones_length, + pool_copies_length=pool_num_copies_length, + pool_degraded_length=pool_num_degraded_length, + pool_write_ops_length=pool_write_ops_length, + pool_write_data_length=pool_write_data_length, + pool_read_ops_length=pool_read_ops_length, + pool_read_data_length=pool_read_data_length, + pool_id=pool_information["stats"]["id"], + pool_name=pool_information["name"], + pool_tier=pool_information["tier"], + pool_pgs=pool_information["pgs"], + pool_used=pool_information["stats"]["used_bytes"], + pool_usedpct=pool_information["stats"]["used_percent"], + pool_free=pool_information["stats"]["free_bytes"], + pool_objects=pool_information["stats"]["num_objects"], + pool_clones=pool_information["stats"]["num_object_clones"], + pool_copies=pool_information["stats"]["num_object_copies"], + pool_degraded=pool_information["stats"]["num_objects_degraded"], + pool_write_ops=pool_information["stats"]["write_ops"], + pool_write_data=pool_information["stats"]["write_bytes"], + pool_read_ops=pool_information["stats"]["read_ops"], + pool_read_data=pool_information["stats"]["read_bytes"], + ) + ) + + return "\n".join(pool_list_output) + + +# +# Volume functions +# +def ceph_volume_info(config, pool, volume): + """ + Get information about Ceph volume + + API endpoint: GET /api/v1/storage/ceph/volume/{pool}/{volume} + API arguments: + API schema: {json_data_object} + """ + response = call_api( + config, + "get", + "/storage/ceph/volume/{pool}/{volume}".format(volume=volume, pool=pool), + ) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "Volume not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ceph_volume_list(config, limit, pool): + """ + Get list information about Ceph volumes (limited by {limit} and by {pool}) + + API endpoint: GET /api/v1/storage/ceph/volume + API arguments: limit={limit}, pool={pool} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + if pool: + params["pool"] = pool + + response = call_api(config, "get", "/storage/ceph/volume", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ceph_volume_add(config, pool, volume, size): + """ + Add new Ceph volume + + API endpoint: POST /api/v1/storage/ceph/volume + API arguments: volume={volume}, pool={pool}, size={size} + API schema: {"message":"{data}"} + """ + params = {"volume": volume, "pool": pool, "size": size} + response = call_api(config, "post", "/storage/ceph/volume", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_volume_upload(config, pool, volume, image_format, image_file): + """ + Upload a disk image to a Ceph volume + + API endpoint: POST /api/v1/storage/ceph/volume/{pool}/{volume}/upload + API arguments: image_format={image_format} + API schema: {"message":"{data}"} + """ + import click + + bar = UploadProgressBar( + image_file, end_message="Parsing file on remote side...", end_nl=False + ) + upload_data = MultipartEncoder( + fields={ + "file": ("filename", open(image_file, "rb"), "application/octet-stream") + } + ) + upload_monitor = MultipartEncoderMonitor(upload_data, bar.update) + + headers = {"Content-Type": upload_monitor.content_type} + params = {"image_format": image_format} + + response = call_api( + config, + "post", + "/storage/ceph/volume/{}/{}/upload".format(pool, volume), + headers=headers, + params=params, + data=upload_monitor, + ) + + click.echo("done.") + click.echo() + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_volume_remove(config, pool, volume): + """ + Remove Ceph volume + + API endpoint: DELETE /api/v1/storage/ceph/volume/{pool}/{volume} + API arguments: + API schema: {"message":"{data}"} + """ + response = call_api( + config, + "delete", + "/storage/ceph/volume/{pool}/{volume}".format(volume=volume, pool=pool), + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_volume_modify(config, pool, volume, new_name=None, new_size=None): + """ + Modify Ceph volume + + API endpoint: PUT /api/v1/storage/ceph/volume/{pool}/{volume} + API arguments: + API schema: {"message":"{data}"} + """ + + params = dict() + if new_name: + params["new_name"] = new_name + if new_size: + params["new_size"] = new_size + + response = call_api( + config, + "put", + "/storage/ceph/volume/{pool}/{volume}".format(volume=volume, pool=pool), + params=params, + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_volume_clone(config, pool, volume, new_volume): + """ + Clone Ceph volume + + API endpoint: POST /api/v1/storage/ceph/volume/{pool}/{volume} + API arguments: new_volume={new_volume + API schema: {"message":"{data}"} + """ + params = {"new_volume": new_volume} + response = call_api( + config, + "post", + "/storage/ceph/volume/{pool}/{volume}/clone".format(volume=volume, pool=pool), + params=params, + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def format_list_volume(volume_list): + # Handle empty list + if not volume_list: + volume_list = list() + + volume_list_output = [] + + volume_name_length = 5 + volume_pool_length = 5 + volume_size_length = 5 + volume_objects_length = 8 + volume_order_length = 6 + volume_format_length = 7 + volume_features_length = 10 + + for volume_information in volume_list: + # Set the Volume name length + _volume_name_length = len(volume_information["name"]) + 1 + if _volume_name_length > volume_name_length: + volume_name_length = _volume_name_length + + # Set the Volume pool length + _volume_pool_length = len(volume_information["pool"]) + 1 + if _volume_pool_length > volume_pool_length: + volume_pool_length = _volume_pool_length + + # Set the size and length + _volume_size_length = len(str(volume_information["stats"]["size"])) + 1 + if _volume_size_length > volume_size_length: + volume_size_length = _volume_size_length + + # Set the num_objects and length + _volume_objects_length = len(str(volume_information["stats"]["objects"])) + 1 + if _volume_objects_length > volume_objects_length: + volume_objects_length = _volume_objects_length + + # Set the order and length + _volume_order_length = len(str(volume_information["stats"]["order"])) + 1 + if _volume_order_length > volume_order_length: + volume_order_length = _volume_order_length + + # Set the format and length + _volume_format_length = len(str(volume_information["stats"]["format"])) + 1 + if _volume_format_length > volume_format_length: + volume_format_length = _volume_format_length + + # Set the features and length + _volume_features_length = ( + len(str(",".join(volume_information["stats"]["features"]))) + 1 + ) + if _volume_features_length > volume_features_length: + volume_features_length = _volume_features_length + + # Format the output header + volume_list_output.append( + "{bold}{volume_header: <{volume_header_length}} {details_header: <{details_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + volume_header_length=volume_name_length + volume_pool_length + 1, + details_header_length=volume_size_length + + volume_objects_length + + volume_order_length + + volume_format_length + + volume_features_length + + 4, + volume_header="Volumes " + + "".join(["-" for _ in range(8, volume_name_length + volume_pool_length)]), + details_header="Details " + + "".join( + [ + "-" + for _ in range( + 8, + volume_size_length + + volume_objects_length + + volume_order_length + + volume_format_length + + volume_features_length + + 3, + ) + ] + ), + ) + ) + + volume_list_output.append( + "{bold}\ +{volume_name: <{volume_name_length}} \ +{volume_pool: <{volume_pool_length}} \ +{volume_size: <{volume_size_length}} \ +{volume_objects: <{volume_objects_length}} \ +{volume_order: <{volume_order_length}} \ +{volume_format: <{volume_format_length}} \ +{volume_features: <{volume_features_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + volume_name_length=volume_name_length, + volume_pool_length=volume_pool_length, + volume_size_length=volume_size_length, + volume_objects_length=volume_objects_length, + volume_order_length=volume_order_length, + volume_format_length=volume_format_length, + volume_features_length=volume_features_length, + volume_name="Name", + volume_pool="Pool", + volume_size="Size", + volume_objects="Objects", + volume_order="Order", + volume_format="Format", + volume_features="Features", + ) + ) + + for volume_information in sorted(volume_list, key=lambda v: v["pool"] + v["name"]): + volume_list_output.append( + "{bold}\ +{volume_name: <{volume_name_length}} \ +{volume_pool: <{volume_pool_length}} \ +{volume_size: <{volume_size_length}} \ +{volume_objects: <{volume_objects_length}} \ +{volume_order: <{volume_order_length}} \ +{volume_format: <{volume_format_length}} \ +{volume_features: <{volume_features_length}} \ +{end_bold}".format( + bold="", + end_bold="", + volume_name_length=volume_name_length, + volume_pool_length=volume_pool_length, + volume_size_length=volume_size_length, + volume_objects_length=volume_objects_length, + volume_order_length=volume_order_length, + volume_format_length=volume_format_length, + volume_features_length=volume_features_length, + volume_name=volume_information["name"], + volume_pool=volume_information["pool"], + volume_size=volume_information["stats"]["size"], + volume_objects=volume_information["stats"]["objects"], + volume_order=volume_information["stats"]["order"], + volume_format=volume_information["stats"]["format"], + volume_features=",".join(volume_information["stats"]["features"]), + ) + ) + + return "\n".join(volume_list_output) + + +# +# Snapshot functions +# +def ceph_snapshot_info(config, pool, volume, snapshot): + """ + Get information about Ceph snapshot + + API endpoint: GET /api/v1/storage/ceph/snapshot/{pool}/{volume}/{snapshot} + API arguments: + API schema: {json_data_object} + """ + response = call_api( + config, + "get", + "/storage/ceph/snapshot/{pool}/{volume}/{snapshot}".format( + snapshot=snapshot, volume=volume, pool=pool + ), + ) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "Snapshot not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ceph_snapshot_list(config, limit, volume, pool): + """ + Get list information about Ceph snapshots (limited by {limit}, by {pool}, or by {volume}) + + API endpoint: GET /api/v1/storage/ceph/snapshot + API arguments: limit={limit}, volume={volume}, pool={pool} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + if volume: + params["volume"] = volume + if pool: + params["pool"] = pool + + response = call_api(config, "get", "/storage/ceph/snapshot", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ceph_snapshot_add(config, pool, volume, snapshot): + """ + Add new Ceph snapshot + + API endpoint: POST /api/v1/storage/ceph/snapshot + API arguments: snapshot={snapshot}, volume={volume}, pool={pool} + API schema: {"message":"{data}"} + """ + params = {"snapshot": snapshot, "volume": volume, "pool": pool} + response = call_api(config, "post", "/storage/ceph/snapshot", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_snapshot_remove(config, pool, volume, snapshot): + """ + Remove Ceph snapshot + + API endpoint: DELETE /api/v1/storage/ceph/snapshot/{pool}/{volume}/{snapshot} + API arguments: + API schema: {"message":"{data}"} + """ + response = call_api( + config, + "delete", + "/storage/ceph/snapshot/{pool}/{volume}/{snapshot}".format( + snapshot=snapshot, volume=volume, pool=pool + ), + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ceph_snapshot_modify(config, pool, volume, snapshot, new_name=None): + """ + Modify Ceph snapshot + + API endpoint: PUT /api/v1/storage/ceph/snapshot/{pool}/{volume}/{snapshot} + API arguments: + API schema: {"message":"{data}"} + """ + + params = dict() + if new_name: + params["new_name"] = new_name + + response = call_api( + config, + "put", + "/storage/ceph/snapshot/{pool}/{volume}/{snapshot}".format( + snapshot=snapshot, volume=volume, pool=pool + ), + params=params, + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def format_list_snapshot(snapshot_list): + # Handle empty list + if not snapshot_list: + snapshot_list = list() + + snapshot_list_output = [] + + snapshot_name_length = 5 + snapshot_volume_length = 7 + snapshot_pool_length = 5 + + for snapshot_information in snapshot_list: + snapshot_name = snapshot_information["snapshot"] + snapshot_volume = snapshot_information["volume"] + snapshot_pool = snapshot_information["pool"] + + # Set the Snapshot name length + _snapshot_name_length = len(snapshot_name) + 1 + if _snapshot_name_length > snapshot_name_length: + snapshot_name_length = _snapshot_name_length + + # Set the Snapshot volume length + _snapshot_volume_length = len(snapshot_volume) + 1 + if _snapshot_volume_length > snapshot_volume_length: + snapshot_volume_length = _snapshot_volume_length + + # Set the Snapshot pool length + _snapshot_pool_length = len(snapshot_pool) + 1 + if _snapshot_pool_length > snapshot_pool_length: + snapshot_pool_length = _snapshot_pool_length + + # Format the output header + snapshot_list_output.append( + "{bold}{snapshot_header: <{snapshot_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + snapshot_header_length=snapshot_name_length + + snapshot_volume_length + + snapshot_pool_length + + 2, + snapshot_header="Snapshots " + + "".join( + [ + "-" + for _ in range( + 10, + snapshot_name_length + + snapshot_volume_length + + snapshot_pool_length + + 1, + ) + ] + ), + ) + ) + + snapshot_list_output.append( + "{bold}\ +{snapshot_name: <{snapshot_name_length}} \ +{snapshot_volume: <{snapshot_volume_length}} \ +{snapshot_pool: <{snapshot_pool_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + snapshot_name_length=snapshot_name_length, + snapshot_volume_length=snapshot_volume_length, + snapshot_pool_length=snapshot_pool_length, + snapshot_name="Name", + snapshot_volume="Volume", + snapshot_pool="Pool", + ) + ) + + for snapshot_information in sorted( + snapshot_list, key=lambda s: s["pool"] + s["volume"] + s["snapshot"] + ): + snapshot_name = snapshot_information["snapshot"] + snapshot_volume = snapshot_information["volume"] + snapshot_pool = snapshot_information["pool"] + snapshot_list_output.append( + "{bold}\ +{snapshot_name: <{snapshot_name_length}} \ +{snapshot_volume: <{snapshot_volume_length}} \ +{snapshot_pool: <{snapshot_pool_length}} \ +{end_bold}".format( + bold="", + end_bold="", + snapshot_name_length=snapshot_name_length, + snapshot_volume_length=snapshot_volume_length, + snapshot_pool_length=snapshot_pool_length, + snapshot_name=snapshot_name, + snapshot_volume=snapshot_volume, + snapshot_pool=snapshot_pool, + ) + ) + + return "\n".join(snapshot_list_output) + + +# +# Benchmark functions +# +def ceph_benchmark_run(config, pool): + """ + Run a storage benchmark against {pool} + + API endpoint: POST /api/v1/storage/ceph/benchmark + API arguments: pool={pool} + API schema: {message} + """ + params = {"pool": pool} + response = call_api(config, "post", "/storage/ceph/benchmark", params=params) + + if response.status_code == 202: + retvalue = True + retdata = "Task ID: {}".format(response.json()["task_id"]) + else: + retvalue = False + retdata = response.json().get("message", "") + + return retvalue, retdata + + +def ceph_benchmark_list(config, job): + """ + View results of one or more previous benchmark runs + + API endpoint: GET /api/v1/storage/ceph/benchmark + API arguments: job={job} + API schema: {results} + """ + if job is not None: + params = {"job": job} + else: + params = {} + + response = call_api(config, "get", "/storage/ceph/benchmark", params=params) + + if response.status_code == 200: + retvalue = True + retdata = response.json() + else: + retvalue = False + retdata = response.json().get("message", "") + + return retvalue, retdata + + +def get_benchmark_list_results_legacy(benchmark_data): + if isinstance(benchmark_data, str): + benchmark_data = loads(benchmark_data) + benchmark_bandwidth = dict() + benchmark_iops = dict() + for test in ["seq_read", "seq_write", "rand_read_4K", "rand_write_4K"]: + benchmark_bandwidth[test] = format_bytes_tohuman( + int(benchmark_data[test]["overall"]["bandwidth"]) * 1024 + ) + benchmark_iops[test] = format_ops_tohuman( + int(benchmark_data[test]["overall"]["iops"]) + ) + + return benchmark_bandwidth, benchmark_iops + + +def get_benchmark_list_results_json(benchmark_data): + benchmark_bandwidth = dict() + benchmark_iops = dict() + for test in ["seq_read", "seq_write", "rand_read_4K", "rand_write_4K"]: + benchmark_test_data = benchmark_data[test] + active_class = None + for io_class in ["read", "write"]: + if benchmark_test_data["jobs"][0][io_class]["io_bytes"] > 0: + active_class = io_class + if active_class is not None: + benchmark_bandwidth[test] = format_bytes_tohuman( + int(benchmark_test_data["jobs"][0][active_class]["bw_bytes"]) + ) + benchmark_iops[test] = format_ops_tohuman( + int(benchmark_test_data["jobs"][0][active_class]["iops"]) + ) + + return benchmark_bandwidth, benchmark_iops + + +def get_benchmark_list_results(benchmark_format, benchmark_data): + if benchmark_format == 0: + benchmark_bandwidth, benchmark_iops = get_benchmark_list_results_legacy( + benchmark_data + ) + elif benchmark_format == 1: + benchmark_bandwidth, benchmark_iops = get_benchmark_list_results_json( + benchmark_data + ) + + seq_benchmark_bandwidth = "{} / {}".format( + benchmark_bandwidth["seq_read"], benchmark_bandwidth["seq_write"] + ) + seq_benchmark_iops = "{} / {}".format( + benchmark_iops["seq_read"], benchmark_iops["seq_write"] + ) + rand_benchmark_bandwidth = "{} / {}".format( + benchmark_bandwidth["rand_read_4K"], benchmark_bandwidth["rand_write_4K"] + ) + rand_benchmark_iops = "{} / {}".format( + benchmark_iops["rand_read_4K"], benchmark_iops["rand_write_4K"] + ) + + return ( + seq_benchmark_bandwidth, + seq_benchmark_iops, + rand_benchmark_bandwidth, + rand_benchmark_iops, + ) + + +def format_list_benchmark(config, benchmark_information): + benchmark_list_output = [] + + benchmark_job_length = 20 + benchmark_format_length = 6 + benchmark_bandwidth_length = dict() + benchmark_iops_length = dict() + + # For this output, we're only showing the Sequential (seq_read and seq_write) and 4k Random (rand_read_4K and rand_write_4K) results since we're showing them for each test result. + for test in ["seq_read", "seq_write", "rand_read_4K", "rand_write_4K"]: + benchmark_bandwidth_length[test] = 7 + benchmark_iops_length[test] = 6 + + benchmark_seq_bw_length = 15 + benchmark_seq_iops_length = 10 + benchmark_rand_bw_length = 15 + benchmark_rand_iops_length = 10 + + for benchmark in benchmark_information: + benchmark_job = benchmark["job"] + benchmark_format = benchmark.get("test_format", 0) # noqa: F841 + + _benchmark_job_length = len(benchmark_job) + if _benchmark_job_length > benchmark_job_length: + benchmark_job_length = _benchmark_job_length + + if benchmark["benchmark_result"] == "Running": + continue + + benchmark_data = benchmark["benchmark_result"] + ( + seq_benchmark_bandwidth, + seq_benchmark_iops, + rand_benchmark_bandwidth, + rand_benchmark_iops, + ) = get_benchmark_list_results(benchmark_format, benchmark_data) + + _benchmark_seq_bw_length = len(seq_benchmark_bandwidth) + 1 + if _benchmark_seq_bw_length > benchmark_seq_bw_length: + benchmark_seq_bw_length = _benchmark_seq_bw_length + + _benchmark_seq_iops_length = len(seq_benchmark_iops) + 1 + if _benchmark_seq_iops_length > benchmark_seq_iops_length: + benchmark_seq_iops_length = _benchmark_seq_iops_length + + _benchmark_rand_bw_length = len(rand_benchmark_bandwidth) + 1 + if _benchmark_rand_bw_length > benchmark_rand_bw_length: + benchmark_rand_bw_length = _benchmark_rand_bw_length + + _benchmark_rand_iops_length = len(rand_benchmark_iops) + 1 + if _benchmark_rand_iops_length > benchmark_rand_iops_length: + benchmark_rand_iops_length = _benchmark_rand_iops_length + + # Format the output header line 1 + benchmark_list_output.append( + "{bold}\ +{benchmark_job: <{benchmark_job_length}} \ +{seq_header: <{seq_header_length}} \ +{rand_header: <{rand_header_length}}\ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + benchmark_job_length=benchmark_job_length + benchmark_format_length + 1, + seq_header_length=benchmark_seq_bw_length + benchmark_seq_iops_length + 1, + rand_header_length=benchmark_rand_bw_length + + benchmark_rand_iops_length + + 1, + benchmark_job="Benchmarks " + + "".join( + [ + "-" + for _ in range( + 11, benchmark_job_length + benchmark_format_length + 2 + ) + ] + ), + seq_header="Sequential (4M blocks) " + + "".join( + [ + "-" + for _ in range( + 23, benchmark_seq_bw_length + benchmark_seq_iops_length + ) + ] + ), + rand_header="Random (4K blocks) " + + "".join( + [ + "-" + for _ in range( + 19, benchmark_rand_bw_length + benchmark_rand_iops_length + ) + ] + ), + ) + ) + + benchmark_list_output.append( + "{bold}\ +{benchmark_job: <{benchmark_job_length}} \ +{benchmark_format: <{benchmark_format_length}} \ +{seq_benchmark_bandwidth: <{seq_benchmark_bandwidth_length}} \ +{seq_benchmark_iops: <{seq_benchmark_iops_length}} \ +{rand_benchmark_bandwidth: <{rand_benchmark_bandwidth_length}} \ +{rand_benchmark_iops: <{rand_benchmark_iops_length}}\ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + benchmark_job_length=benchmark_job_length, + benchmark_format_length=benchmark_format_length, + seq_benchmark_bandwidth_length=benchmark_seq_bw_length, + seq_benchmark_iops_length=benchmark_seq_iops_length, + rand_benchmark_bandwidth_length=benchmark_rand_bw_length, + rand_benchmark_iops_length=benchmark_rand_iops_length, + benchmark_job="Job", + benchmark_format="Format", + seq_benchmark_bandwidth="R/W Bandwith/s", + seq_benchmark_iops="R/W IOPS", + rand_benchmark_bandwidth="R/W Bandwith/s", + rand_benchmark_iops="R/W IOPS", + ) + ) + + for benchmark in benchmark_information: + benchmark_job = benchmark["job"] + benchmark_format = benchmark.get("test_format", 0) # noqa: F841 + + if benchmark["benchmark_result"] == "Running": + seq_benchmark_bandwidth = "Running" + seq_benchmark_iops = "Running" + rand_benchmark_bandwidth = "Running" + rand_benchmark_iops = "Running" + else: + benchmark_data = benchmark["benchmark_result"] + ( + seq_benchmark_bandwidth, + seq_benchmark_iops, + rand_benchmark_bandwidth, + rand_benchmark_iops, + ) = get_benchmark_list_results(benchmark_format, benchmark_data) + + benchmark_list_output.append( + "{bold}\ +{benchmark_job: <{benchmark_job_length}} \ +{benchmark_format: <{benchmark_format_length}} \ +{seq_benchmark_bandwidth: <{seq_benchmark_bandwidth_length}} \ +{seq_benchmark_iops: <{seq_benchmark_iops_length}} \ +{rand_benchmark_bandwidth: <{rand_benchmark_bandwidth_length}} \ +{rand_benchmark_iops: <{rand_benchmark_iops_length}}\ +{end_bold}".format( + bold="", + end_bold="", + benchmark_job_length=benchmark_job_length, + benchmark_format_length=benchmark_format_length, + seq_benchmark_bandwidth_length=benchmark_seq_bw_length, + seq_benchmark_iops_length=benchmark_seq_iops_length, + rand_benchmark_bandwidth_length=benchmark_rand_bw_length, + rand_benchmark_iops_length=benchmark_rand_iops_length, + benchmark_job=benchmark_job, + benchmark_format=benchmark_format, + seq_benchmark_bandwidth=seq_benchmark_bandwidth, + seq_benchmark_iops=seq_benchmark_iops, + rand_benchmark_bandwidth=rand_benchmark_bandwidth, + rand_benchmark_iops=rand_benchmark_iops, + ) + ) + + return "\n".join(benchmark_list_output) + + +def format_info_benchmark(config, oformat, benchmark_information): + # This matrix is a list of the possible format functions for a benchmark result + # It is extensable in the future should newer formats be required. + benchmark_matrix = { + 0: format_info_benchmark_legacy, + 1: format_info_benchmark_json, + } + + benchmark_version = benchmark_information[0]["test_format"] + + if oformat == "json-pretty": + return dumps(benchmark_information, indent=4) + elif oformat == "json": + return dumps(benchmark_information) + else: + return benchmark_matrix[benchmark_version](config, benchmark_information[0]) + + +def format_info_benchmark_legacy(config, benchmark_information): + if benchmark_information["benchmark_result"] == "Running": + return "Benchmark test is still running." + + benchmark_details = benchmark_information["benchmark_result"] + + # Format a nice output; do this line-by-line then concat the elements at the end + ainformation = [] + ainformation.append( + "{}Storage Benchmark details:{}".format(ansiprint.bold(), ansiprint.end()) + ) + + nice_test_name_map = { + "seq_read": "Sequential Read (4M blocks)", + "seq_write": "Sequential Write (4M blocks)", + "rand_read_4M": "Random Read (4M blocks)", + "rand_write_4M": "Random Write (4M blocks)", + "rand_read_4K": "Random Read (4K blocks)", + "rand_write_4K": "Random Write (4K blocks)", + "rand_read_4K_lowdepth": "Random Read (4K blocks, single-queue)", + "rand_write_4K_lowdepth": "Random Write (4K blocks, single-queue)", + } + + test_name_length = 30 + overall_label_length = 12 + overall_column_length = 8 + bandwidth_label_length = 9 + bandwidth_column_length = 10 + iops_column_length = 6 + latency_column_length = 8 + cpuutil_label_length = 11 + cpuutil_column_length = 9 + + # Work around old results that did not have these tests + if "rand_read_4K_lowdepth" not in benchmark_details: + del nice_test_name_map["rand_read_4K_lowdepth"] + del nice_test_name_map["rand_write_4K_lowdepth"] + + for test in benchmark_details: + # Work around old results that had these obsolete tests + if test == "rand_read_256K" or test == "rand_write_256K": + continue + + _test_name_length = len(nice_test_name_map[test]) + if _test_name_length > test_name_length: + test_name_length = _test_name_length + + for element in benchmark_details[test]["overall"]: + _element_length = len(benchmark_details[test]["overall"][element]) + if _element_length > overall_column_length: + overall_column_length = _element_length + + for element in benchmark_details[test]["bandwidth"]: + try: + _element_length = len( + format_bytes_tohuman( + int(float(benchmark_details[test]["bandwidth"][element])) + ) + ) + except Exception: + _element_length = len(benchmark_details[test]["bandwidth"][element]) + if _element_length > bandwidth_column_length: + bandwidth_column_length = _element_length + + for element in benchmark_details[test]["iops"]: + try: + _element_length = len( + format_ops_tohuman( + int(float(benchmark_details[test]["iops"][element])) + ) + ) + except Exception: + _element_length = len(benchmark_details[test]["iops"][element]) + if _element_length > iops_column_length: + iops_column_length = _element_length + + for element in benchmark_details[test]["latency"]: + _element_length = len(benchmark_details[test]["latency"][element]) + if _element_length > latency_column_length: + latency_column_length = _element_length + + for element in benchmark_details[test]["cpu"]: + _element_length = len(benchmark_details[test]["cpu"][element]) + if _element_length > cpuutil_column_length: + cpuutil_column_length = _element_length + + for test in benchmark_details: + # Work around old results that had these obsolete tests + if test == "rand_read_256K" or test == "rand_write_256K": + continue + + ainformation.append("") + + test_details = benchmark_details[test] + + # Top row (Headers) + ainformation.append( + "{bold}\ +{test_name: <{test_name_length}} \ +{overall_label: <{overall_label_length}} \ +{overall: <{overall_length}} \ +{bandwidth_label: <{bandwidth_label_length}} \ +{bandwidth: <{bandwidth_length}} \ +{iops: <{iops_length}} \ +{latency: <{latency_length}} \ +{cpuutil_label: <{cpuutil_label_length}} \ +{cpuutil: <{cpuutil_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + test_name="Test:", + test_name_length=test_name_length, + overall_label="", + overall_label_length=overall_label_length, + overall="General", + overall_length=overall_column_length, + bandwidth_label="", + bandwidth_label_length=bandwidth_label_length, + bandwidth="Bandwidth", + bandwidth_length=bandwidth_column_length, + iops="IOPS", + iops_length=iops_column_length, + latency="Latency (μs)", + latency_length=latency_column_length, + cpuutil_label="", + cpuutil_label_length=cpuutil_label_length, + cpuutil="CPU Util", + cpuutil_length=cpuutil_column_length, + ) + ) + # Second row (Test, Size, Min, User)) + ainformation.append( + "{bold}\ +{test_name: <{test_name_length}} \ +{overall_label: >{overall_label_length}} \ +{overall: <{overall_length}} \ +{bandwidth_label: >{bandwidth_label_length}} \ +{bandwidth: <{bandwidth_length}} \ +{iops: <{iops_length}} \ +{latency: <{latency_length}} \ +{cpuutil_label: >{cpuutil_label_length}} \ +{cpuutil: <{cpuutil_length}} \ +{end_bold}".format( + bold="", + end_bold="", + test_name=nice_test_name_map[test], + test_name_length=test_name_length, + overall_label="Test Size:", + overall_label_length=overall_label_length, + overall=format_bytes_tohuman( + int(test_details["overall"]["iosize"]) * 1024 + ), + overall_length=overall_column_length, + bandwidth_label="Min:", + bandwidth_label_length=bandwidth_label_length, + bandwidth=format_bytes_tohuman( + int(test_details["bandwidth"]["min"]) * 1024 + ), + bandwidth_length=bandwidth_column_length, + iops=format_ops_tohuman(int(test_details["iops"]["min"])), + iops_length=iops_column_length, + latency=test_details["latency"]["min"], + latency_length=latency_column_length, + cpuutil_label="User:", + cpuutil_label_length=cpuutil_label_length, + cpuutil=test_details["cpu"]["user"], + cpuutil_length=cpuutil_column_length, + ) + ) + # Third row (blank, BW/s, Max, System)) + ainformation.append( + "{bold}\ +{test_name: <{test_name_length}} \ +{overall_label: >{overall_label_length}} \ +{overall: <{overall_length}} \ +{bandwidth_label: >{bandwidth_label_length}} \ +{bandwidth: <{bandwidth_length}} \ +{iops: <{iops_length}} \ +{latency: <{latency_length}} \ +{cpuutil_label: >{cpuutil_label_length}} \ +{cpuutil: <{cpuutil_length}} \ +{end_bold}".format( + bold="", + end_bold="", + test_name="", + test_name_length=test_name_length, + overall_label="Bandwidth/s:", + overall_label_length=overall_label_length, + overall=format_bytes_tohuman( + int(test_details["overall"]["bandwidth"]) * 1024 + ), + overall_length=overall_column_length, + bandwidth_label="Max:", + bandwidth_label_length=bandwidth_label_length, + bandwidth=format_bytes_tohuman( + int(test_details["bandwidth"]["max"]) * 1024 + ), + bandwidth_length=bandwidth_column_length, + iops=format_ops_tohuman(int(test_details["iops"]["max"])), + iops_length=iops_column_length, + latency=test_details["latency"]["max"], + latency_length=latency_column_length, + cpuutil_label="System:", + cpuutil_label_length=cpuutil_label_length, + cpuutil=test_details["cpu"]["system"], + cpuutil_length=cpuutil_column_length, + ) + ) + # Fourth row (blank, IOPS, Mean, CtxSq)) + ainformation.append( + "{bold}\ +{test_name: <{test_name_length}} \ +{overall_label: >{overall_label_length}} \ +{overall: <{overall_length}} \ +{bandwidth_label: >{bandwidth_label_length}} \ +{bandwidth: <{bandwidth_length}} \ +{iops: <{iops_length}} \ +{latency: <{latency_length}} \ +{cpuutil_label: >{cpuutil_label_length}} \ +{cpuutil: <{cpuutil_length}} \ +{end_bold}".format( + bold="", + end_bold="", + test_name="", + test_name_length=test_name_length, + overall_label="IOPS:", + overall_label_length=overall_label_length, + overall=format_ops_tohuman(int(test_details["overall"]["iops"])), + overall_length=overall_column_length, + bandwidth_label="Mean:", + bandwidth_label_length=bandwidth_label_length, + bandwidth=format_bytes_tohuman( + int(float(test_details["bandwidth"]["mean"])) * 1024 + ), + bandwidth_length=bandwidth_column_length, + iops=format_ops_tohuman(int(float(test_details["iops"]["mean"]))), + iops_length=iops_column_length, + latency=test_details["latency"]["mean"], + latency_length=latency_column_length, + cpuutil_label="CtxSw:", + cpuutil_label_length=cpuutil_label_length, + cpuutil=test_details["cpu"]["ctxsw"], + cpuutil_length=cpuutil_column_length, + ) + ) + # Fifth row (blank, Runtime, StdDev, MajFault)) + ainformation.append( + "{bold}\ +{test_name: <{test_name_length}} \ +{overall_label: >{overall_label_length}} \ +{overall: <{overall_length}} \ +{bandwidth_label: >{bandwidth_label_length}} \ +{bandwidth: <{bandwidth_length}} \ +{iops: <{iops_length}} \ +{latency: <{latency_length}} \ +{cpuutil_label: >{cpuutil_label_length}} \ +{cpuutil: <{cpuutil_length}} \ +{end_bold}".format( + bold="", + end_bold="", + test_name="", + test_name_length=test_name_length, + overall_label="Runtime (s):", + overall_label_length=overall_label_length, + overall=int(test_details["overall"]["runtime"]) / 1000.0, + overall_length=overall_column_length, + bandwidth_label="StdDev:", + bandwidth_label_length=bandwidth_label_length, + bandwidth=format_bytes_tohuman( + int(float(test_details["bandwidth"]["stdev"])) * 1024 + ), + bandwidth_length=bandwidth_column_length, + iops=format_ops_tohuman(int(float(test_details["iops"]["stdev"]))), + iops_length=iops_column_length, + latency=test_details["latency"]["stdev"], + latency_length=latency_column_length, + cpuutil_label="MajFault:", + cpuutil_label_length=cpuutil_label_length, + cpuutil=test_details["cpu"]["majfault"], + cpuutil_length=cpuutil_column_length, + ) + ) + # Sixth row (blank, blank, Samples, MinFault)) + ainformation.append( + "{bold}\ +{test_name: <{test_name_length}} \ +{overall_label: >{overall_label_length}} \ +{overall: <{overall_length}} \ +{bandwidth_label: >{bandwidth_label_length}} \ +{bandwidth: <{bandwidth_length}} \ +{iops: <{iops_length}} \ +{latency: <{latency_length}} \ +{cpuutil_label: >{cpuutil_label_length}} \ +{cpuutil: <{cpuutil_length}} \ +{end_bold}".format( + bold="", + end_bold="", + test_name="", + test_name_length=test_name_length, + overall_label="", + overall_label_length=overall_label_length, + overall="", + overall_length=overall_column_length, + bandwidth_label="Samples:", + bandwidth_label_length=bandwidth_label_length, + bandwidth=test_details["bandwidth"]["numsamples"], + bandwidth_length=bandwidth_column_length, + iops=test_details["iops"]["numsamples"], + iops_length=iops_column_length, + latency="", + latency_length=latency_column_length, + cpuutil_label="MinFault:", + cpuutil_label_length=cpuutil_label_length, + cpuutil=test_details["cpu"]["minfault"], + cpuutil_length=cpuutil_column_length, + ) + ) + + ainformation.append("") + + return "\n".join(ainformation) + + +def format_info_benchmark_json(config, benchmark_information): + if benchmark_information["benchmark_result"] == "Running": + return "Benchmark test is still running." + + benchmark_details = benchmark_information["benchmark_result"] + + # Format a nice output; do this line-by-line then concat the elements at the end + ainformation = [] + ainformation.append( + "{}Storage Benchmark details:{}".format(ansiprint.bold(), ansiprint.end()) + ) + + nice_test_name_map = { + "seq_read": "Sequential Read (4M blocks, queue depth 64)", + "seq_write": "Sequential Write (4M blocks, queue depth 64)", + "rand_read_4M": "Random Read (4M blocks, queue depth 64)", + "rand_write_4M": "Random Write (4M blocks queue depth 64)", + "rand_read_4K": "Random Read (4K blocks, queue depth 64)", + "rand_write_4K": "Random Write (4K blocks, queue depth 64)", + "rand_read_4K_lowdepth": "Random Read (4K blocks, queue depth 1)", + "rand_write_4K_lowdepth": "Random Write (4K blocks, queue depth 1)", + } + + for test in benchmark_details: + ainformation.append("") + + io_class = None + for _io_class in ["read", "write"]: + if benchmark_details[test]["jobs"][0][_io_class]["io_bytes"] > 0: + io_class = _io_class + if io_class is None: + continue + + job_details = benchmark_details[test]["jobs"][0] + + # Calculate the unified latency categories (in us) + latency_tree = list() + for field in job_details["latency_ns"]: + bucket = str(int(field) / 1000) + latency_tree.append((bucket, job_details["latency_ns"][field])) + for field in job_details["latency_us"]: + bucket = field + latency_tree.append((bucket, job_details["latency_us"][field])) + for field in job_details["latency_ms"]: + # That one annoying one + if field == ">=2000": + bucket = ">=2000000" + else: + bucket = str(int(field) * 1000) + latency_tree.append((bucket, job_details["latency_ms"][field])) + + # Find the minimum entry without a zero + useful_latency_tree = list() + for element in latency_tree: + if element[1] != 0: + useful_latency_tree.append(element) + + max_rows = 9 + if len(useful_latency_tree) > 9: + max_rows = len(useful_latency_tree) + elif len(useful_latency_tree) < 9: + while len(useful_latency_tree) < 9: + useful_latency_tree.append(("", "")) + + # Format the static data + overall_label = [ + "Overall BW/s:", + "Overall IOPS:", + "Total I/O:", + "Runtime (s):", + "User CPU %:", + "System CPU %:", + "Ctx Switches:", + "Major Faults:", + "Minor Faults:", + ] + while len(overall_label) < max_rows: + overall_label.append("") + + overall_data = [ + format_bytes_tohuman(int(job_details[io_class]["bw_bytes"])), + format_ops_tohuman(int(job_details[io_class]["iops"])), + format_bytes_tohuman(int(job_details[io_class]["io_bytes"])), + job_details["job_runtime"] / 1000, + job_details["usr_cpu"], + job_details["sys_cpu"], + job_details["ctx"], + job_details["majf"], + job_details["minf"], + ] + while len(overall_data) < max_rows: + overall_data.append("") + + bandwidth_label = [ + "Min:", + "Max:", + "Mean:", + "StdDev:", + "Samples:", + "", + "", + "", + "", + ] + while len(bandwidth_label) < max_rows: + bandwidth_label.append("") + + bandwidth_data = [ + format_bytes_tohuman(int(job_details[io_class]["bw_min"]) * 1024), + format_bytes_tohuman(int(job_details[io_class]["bw_max"]) * 1024), + format_bytes_tohuman(int(job_details[io_class]["bw_mean"]) * 1024), + format_bytes_tohuman(int(job_details[io_class]["bw_dev"]) * 1024), + job_details[io_class]["bw_samples"], + "", + "", + "", + "", + ] + while len(bandwidth_data) < max_rows: + bandwidth_data.append("") + + iops_data = [ + format_ops_tohuman(int(job_details[io_class]["iops_min"])), + format_ops_tohuman(int(job_details[io_class]["iops_max"])), + format_ops_tohuman(int(job_details[io_class]["iops_mean"])), + format_ops_tohuman(int(job_details[io_class]["iops_stddev"])), + job_details[io_class]["iops_samples"], + "", + "", + "", + "", + ] + while len(iops_data) < max_rows: + iops_data.append("") + + lat_data = [ + int(job_details[io_class]["lat_ns"]["min"]) / 1000, + int(job_details[io_class]["lat_ns"]["max"]) / 1000, + int(job_details[io_class]["lat_ns"]["mean"]) / 1000, + int(job_details[io_class]["lat_ns"]["stddev"]) / 1000, + "", + "", + "", + "", + "", + ] + while len(lat_data) < max_rows: + lat_data.append("") + + # Format the dynamic buckets + lat_bucket_label = list() + lat_bucket_data = list() + for element in useful_latency_tree: + lat_bucket_label.append(element[0]) + lat_bucket_data.append(element[1]) + + # Column default widths + overall_label_length = 0 + overall_column_length = 0 + bandwidth_label_length = 0 + bandwidth_column_length = 11 + iops_column_length = 4 + latency_column_length = 12 + latency_bucket_label_length = 0 + + # Column layout: + # General Bandwidth IOPS Latency Percentiles + # --------- ---------- -------- -------- --------------- + # Size Min Min Min A + # BW Max Max Max B + # IOPS Mean Mean Mean ... + # Runtime StdDev StdDev StdDev Z + # UsrCPU Samples Samples + # SysCPU + # CtxSw + # MajFault + # MinFault + + # Set column widths + for item in overall_label: + _item_length = len(str(item)) + if _item_length > overall_label_length: + overall_label_length = _item_length + + for item in overall_data: + _item_length = len(str(item)) + if _item_length > overall_column_length: + overall_column_length = _item_length + + test_name_length = len(nice_test_name_map[test]) + if test_name_length > overall_label_length + overall_column_length: + _diff = test_name_length - (overall_label_length + overall_column_length) + overall_column_length += _diff + + for item in bandwidth_label: + _item_length = len(str(item)) + if _item_length > bandwidth_label_length: + bandwidth_label_length = _item_length + + for item in bandwidth_data: + _item_length = len(str(item)) + if _item_length > bandwidth_column_length: + bandwidth_column_length = _item_length + + for item in iops_data: + _item_length = len(str(item)) + if _item_length > iops_column_length: + iops_column_length = _item_length + + for item in lat_data: + _item_length = len(str(item)) + if _item_length > latency_column_length: + latency_column_length = _item_length + + for item in lat_bucket_label: + _item_length = len(str(item)) + if _item_length > latency_bucket_label_length: + latency_bucket_label_length = _item_length + + # Top row (Headers) + ainformation.append( + "{bold}\ +{overall_label: <{overall_label_length}} \ +{bandwidth_label: <{bandwidth_label_length}} \ +{bandwidth: <{bandwidth_length}} \ +{iops: <{iops_length}} \ +{latency: <{latency_length}} \ +{latency_bucket_label: <{latency_bucket_label_length}} \ +{latency_bucket} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + overall_label=nice_test_name_map[test], + overall_label_length=overall_label_length, + bandwidth_label="", + bandwidth_label_length=bandwidth_label_length, + bandwidth="Bandwidth/s", + bandwidth_length=bandwidth_column_length, + iops="IOPS", + iops_length=iops_column_length, + latency="Latency (μs)", + latency_length=latency_column_length, + latency_bucket_label="Latency Buckets (μs/%)", + latency_bucket_label_length=latency_bucket_label_length, + latency_bucket="", + ) + ) + + for idx in range(0, max_rows): + # Top row (Headers) + ainformation.append( + "{bold}\ +{overall_label: >{overall_label_length}} \ +{overall: <{overall_length}} \ +{bandwidth_label: >{bandwidth_label_length}} \ +{bandwidth: <{bandwidth_length}} \ +{iops: <{iops_length}} \ +{latency: <{latency_length}} \ +{latency_bucket_label: >{latency_bucket_label_length}} \ +{latency_bucket} \ +{end_bold}".format( + bold="", + end_bold="", + overall_label=overall_label[idx], + overall_label_length=overall_label_length, + overall=overall_data[idx], + overall_length=overall_column_length, + bandwidth_label=bandwidth_label[idx], + bandwidth_label_length=bandwidth_label_length, + bandwidth=bandwidth_data[idx], + bandwidth_length=bandwidth_column_length, + iops=iops_data[idx], + iops_length=iops_column_length, + latency=lat_data[idx], + latency_length=latency_column_length, + latency_bucket_label=lat_bucket_label[idx], + latency_bucket_label_length=latency_bucket_label_length, + latency_bucket=lat_bucket_data[idx], + ) + ) + + return "\n".join(ainformation) diff --git a/cli-client-new/pvc/lib/cluster.py b/cli-client-new/pvc/lib/cluster.py new file mode 100644 index 00000000..c93fdb2d --- /dev/null +++ b/cli-client-new/pvc/lib/cluster.py @@ -0,0 +1,313 @@ +#!/usr/bin/env python3 + +# cluster.py - PVC CLI client function library, cluster management +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2022 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +import json + +import pvc.lib.ansiprint as ansiprint +from pvc.lib.common import call_api + + +def initialize(config, overwrite=False): + """ + Initialize the PVC cluster + + API endpoint: GET /api/v1/initialize + API arguments: overwrite, yes-i-really-mean-it + API schema: {json_data_object} + """ + params = {"yes-i-really-mean-it": "yes", "overwrite": overwrite} + response = call_api(config, "post", "/initialize", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def backup(config): + """ + Get a JSON backup of the cluster + + API endpoint: GET /api/v1/backup + API arguments: + API schema: {json_data_object} + """ + response = call_api(config, "get", "/backup") + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def restore(config, cluster_data): + """ + Restore a JSON backup to the cluster + + API endpoint: POST /api/v1/restore + API arguments: yes-i-really-mean-it + API schema: {json_data_object} + """ + cluster_data_json = json.dumps(cluster_data) + + params = {"yes-i-really-mean-it": "yes"} + data = {"cluster_data": cluster_data_json} + response = call_api(config, "post", "/restore", params=params, data=data) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def maintenance_mode(config, state): + """ + Enable or disable PVC cluster maintenance mode + + API endpoint: POST /api/v1/status + API arguments: {state}={state} + API schema: {json_data_object} + """ + params = {"state": state} + response = call_api(config, "post", "/status", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def get_info(config): + """ + Get status of the PVC cluster + + API endpoint: GET /api/v1/status + API arguments: + API schema: {json_data_object} + """ + response = call_api(config, "get", "/status") + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def format_info(cluster_information, oformat): + if oformat == "json": + return json.dumps(cluster_information) + + if oformat == "json-pretty": + return json.dumps(cluster_information, indent=4) + + # Plain formatting, i.e. human-readable + if ( + cluster_information.get("maintenance") == "true" + or cluster_information.get("cluster_health", {}).get("health", "N/A") == "N/A" + ): + health_colour = ansiprint.blue() + elif cluster_information.get("cluster_health", {}).get("health", 100) > 90: + health_colour = ansiprint.green() + elif cluster_information.get("cluster_health", {}).get("health", 100) > 50: + health_colour = ansiprint.yellow() + else: + health_colour = ansiprint.red() + + ainformation = [] + + ainformation.append( + "{}PVC cluster status:{}".format(ansiprint.bold(), ansiprint.end()) + ) + ainformation.append("") + + health_text = ( + f"{cluster_information.get('cluster_health', {}).get('health', 'N/A')}" + ) + if health_text != "N/A": + health_text += "%" + if cluster_information.get("maintenance") == "true": + health_text += " (maintenance on)" + + ainformation.append( + "{}Cluster health:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + health_colour, + health_text, + ansiprint.end(), + ) + ) + if cluster_information.get("cluster_health", {}).get("messages"): + health_messages = "\n > ".join( + sorted(cluster_information["cluster_health"]["messages"]) + ) + ainformation.append( + "{}Health messages:{} > {}".format( + ansiprint.purple(), + ansiprint.end(), + health_messages, + ) + ) + else: + ainformation.append( + "{}Health messages:{} N/A".format( + ansiprint.purple(), + ansiprint.end(), + ) + ) + + if oformat == "short": + return "\n".join(ainformation) + + ainformation.append("") + ainformation.append( + "{}Primary node:{} {}".format( + ansiprint.purple(), ansiprint.end(), cluster_information["primary_node"] + ) + ) + ainformation.append( + "{}PVC version:{} {}".format( + ansiprint.purple(), + ansiprint.end(), + cluster_information.get("pvc_version", "N/A"), + ) + ) + ainformation.append( + "{}Cluster upstream IP:{} {}".format( + ansiprint.purple(), ansiprint.end(), cluster_information["upstream_ip"] + ) + ) + ainformation.append("") + ainformation.append( + "{}Total nodes:{} {}".format( + ansiprint.purple(), ansiprint.end(), cluster_information["nodes"]["total"] + ) + ) + ainformation.append( + "{}Total VMs:{} {}".format( + ansiprint.purple(), ansiprint.end(), cluster_information["vms"]["total"] + ) + ) + ainformation.append( + "{}Total networks:{} {}".format( + ansiprint.purple(), ansiprint.end(), cluster_information["networks"] + ) + ) + ainformation.append( + "{}Total OSDs:{} {}".format( + ansiprint.purple(), ansiprint.end(), cluster_information["osds"]["total"] + ) + ) + ainformation.append( + "{}Total pools:{} {}".format( + ansiprint.purple(), ansiprint.end(), cluster_information["pools"] + ) + ) + ainformation.append( + "{}Total volumes:{} {}".format( + ansiprint.purple(), ansiprint.end(), cluster_information["volumes"] + ) + ) + ainformation.append( + "{}Total snapshots:{} {}".format( + ansiprint.purple(), ansiprint.end(), cluster_information["snapshots"] + ) + ) + + nodes_string = "{}Nodes:{} {}/{} {}ready,run{}".format( + ansiprint.purple(), + ansiprint.end(), + cluster_information["nodes"].get("run,ready", 0), + cluster_information["nodes"].get("total", 0), + ansiprint.green(), + ansiprint.end(), + ) + for state, count in cluster_information["nodes"].items(): + if state == "total" or state == "run,ready": + continue + + nodes_string += " {}/{} {}{}{}".format( + count, + cluster_information["nodes"]["total"], + ansiprint.yellow(), + state, + ansiprint.end(), + ) + + ainformation.append("") + ainformation.append(nodes_string) + + vms_string = "{}VMs:{} {}/{} {}start{}".format( + ansiprint.purple(), + ansiprint.end(), + cluster_information["vms"].get("start", 0), + cluster_information["vms"].get("total", 0), + ansiprint.green(), + ansiprint.end(), + ) + for state, count in cluster_information["vms"].items(): + if state == "total" or state == "start": + continue + + if state in ["disable", "migrate", "unmigrate", "provision"]: + colour = ansiprint.blue() + else: + colour = ansiprint.yellow() + + vms_string += " {}/{} {}{}{}".format( + count, cluster_information["vms"]["total"], colour, state, ansiprint.end() + ) + + ainformation.append("") + ainformation.append(vms_string) + + if cluster_information["osds"]["total"] > 0: + osds_string = "{}Ceph OSDs:{} {}/{} {}up,in{}".format( + ansiprint.purple(), + ansiprint.end(), + cluster_information["osds"].get("up,in", 0), + cluster_information["osds"].get("total", 0), + ansiprint.green(), + ansiprint.end(), + ) + for state, count in cluster_information["osds"].items(): + if state == "total" or state == "up,in": + continue + + osds_string += " {}/{} {}{}{}".format( + count, + cluster_information["osds"]["total"], + ansiprint.yellow(), + state, + ansiprint.end(), + ) + + ainformation.append("") + ainformation.append(osds_string) + + ainformation.append("") + return "\n".join(ainformation) diff --git a/cli-client-new/pvc/lib/common.py b/cli-client-new/pvc/lib/common.py new file mode 100644 index 00000000..8071884c --- /dev/null +++ b/cli-client-new/pvc/lib/common.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 + +# common.py - PVC CLI client function library, Common functions +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2022 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +import os +import math +import time +import requests +import click +from urllib3 import disable_warnings + + +def format_bytes(size_bytes): + byte_unit_matrix = { + "B": 1, + "K": 1024, + "M": 1024 * 1024, + "G": 1024 * 1024 * 1024, + "T": 1024 * 1024 * 1024 * 1024, + "P": 1024 * 1024 * 1024 * 1024 * 1024, + } + human_bytes = "0B" + for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get): + formatted_bytes = int(math.ceil(size_bytes / byte_unit_matrix[unit])) + if formatted_bytes < 10000: + human_bytes = "{}{}".format(formatted_bytes, unit) + break + return human_bytes + + +def format_metric(integer): + integer_unit_matrix = { + "": 1, + "K": 1000, + "M": 1000 * 1000, + "B": 1000 * 1000 * 1000, + "T": 1000 * 1000 * 1000 * 1000, + "Q": 1000 * 1000 * 1000 * 1000 * 1000, + } + human_integer = "0" + for unit in sorted(integer_unit_matrix, key=integer_unit_matrix.get): + formatted_integer = int(math.ceil(integer / integer_unit_matrix[unit])) + if formatted_integer < 10000: + human_integer = "{}{}".format(formatted_integer, unit) + break + return human_integer + + +class UploadProgressBar(object): + def __init__(self, filename, end_message="", end_nl=True): + file_size = os.path.getsize(filename) + file_size_human = format_bytes(file_size) + click.echo("Uploading file (total size {})...".format(file_size_human)) + + self.length = file_size + self.time_last = int(round(time.time() * 1000)) - 1000 + self.bytes_last = 0 + self.bytes_diff = 0 + self.is_end = False + + self.end_message = end_message + self.end_nl = end_nl + if not self.end_nl: + self.end_suffix = " " + else: + self.end_suffix = "" + + self.bar = click.progressbar(length=self.length, show_eta=True) + + def update(self, monitor): + bytes_cur = monitor.bytes_read + self.bytes_diff += bytes_cur - self.bytes_last + if self.bytes_last == bytes_cur: + self.is_end = True + self.bytes_last = bytes_cur + + time_cur = int(round(time.time() * 1000)) + if (time_cur - 1000) > self.time_last: + self.time_last = time_cur + self.bar.update(self.bytes_diff) + self.bytes_diff = 0 + + if self.is_end: + self.bar.update(self.bytes_diff) + self.bytes_diff = 0 + click.echo() + click.echo() + if self.end_message: + click.echo(self.end_message + self.end_suffix, nl=self.end_nl) + + +class ErrorResponse(requests.Response): + def __init__(self, json_data, status_code): + self.json_data = json_data + self.status_code = status_code + + def json(self): + return self.json_data + + +def call_api( + config, + operation, + request_uri, + headers={}, + params=None, + data=None, + files=None, +): + # Set the connect timeout to 2 seconds but extremely long (48 hour) data timeout + timeout = (2.05, 172800) + + # Craft the URI + uri = "{}://{}{}{}".format( + config["api_scheme"], config["api_host"], config["api_prefix"], request_uri + ) + + # Craft the authentication header if required + if config["api_key"]: + headers["X-Api-Key"] = config["api_key"] + + # Determine the request type and hit the API + disable_warnings() + try: + if operation == "get": + response = requests.get( + uri, + timeout=timeout, + headers=headers, + params=params, + data=data, + verify=config["verify_ssl"], + ) + if operation == "post": + response = requests.post( + uri, + timeout=timeout, + headers=headers, + params=params, + data=data, + files=files, + verify=config["verify_ssl"], + ) + if operation == "put": + response = requests.put( + uri, + timeout=timeout, + headers=headers, + params=params, + data=data, + files=files, + verify=config["verify_ssl"], + ) + if operation == "patch": + response = requests.patch( + uri, + timeout=timeout, + headers=headers, + params=params, + data=data, + verify=config["verify_ssl"], + ) + if operation == "delete": + response = requests.delete( + uri, + timeout=timeout, + headers=headers, + params=params, + data=data, + verify=config["verify_ssl"], + ) + except Exception as e: + message = "Failed to connect to the API: {}".format(e) + response = ErrorResponse({"message": message}, 500) + + # Display debug output + if config["debug"]: + click.echo("API endpoint: {}".format(uri), err=True) + click.echo("Response code: {}".format(response.status_code), err=True) + click.echo("Response headers: {}".format(response.headers), err=True) + click.echo(err=True) + + # Return the response object + return response diff --git a/cli-client-new/pvc/lib/network.py b/cli-client-new/pvc/lib/network.py new file mode 100644 index 00000000..8b07960f --- /dev/null +++ b/cli-client-new/pvc/lib/network.py @@ -0,0 +1,1487 @@ +#!/usr/bin/env python3 + +# network.py - PVC CLI client function library, Network functions +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2022 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +import re +import pvc.lib.ansiprint as ansiprint +from pvc.lib.common import call_api + + +def isValidMAC(macaddr): + allowed = re.compile( + r""" + ( + ^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$ + ) + """, + re.VERBOSE | re.IGNORECASE, + ) + + if allowed.match(macaddr): + return True + else: + return False + + +def isValidIP(ipaddr): + ip4_blocks = str(ipaddr).split(".") + if len(ip4_blocks) == 4: + for block in ip4_blocks: + # Check if number is digit, if not checked before calling this function + if not block.isdigit(): + return False + tmp = int(block) + if 0 > tmp > 255: + return False + return True + return False + + +# +# Primary functions +# +def net_info(config, net): + """ + Get information about network + + API endpoint: GET /api/v1/network/{net} + API arguments: + API schema: {json_data_object} + """ + response = call_api(config, "get", "/network/{net}".format(net=net)) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "Network not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def net_list(config, limit): + """ + Get list information about networks (limited by {limit}) + + API endpoint: GET /api/v1/network + API arguments: limit={limit} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + + response = call_api(config, "get", "/network", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def net_add( + config, + vni, + description, + nettype, + mtu, + domain, + name_servers, + ip4_network, + ip4_gateway, + ip6_network, + ip6_gateway, + dhcp4_flag, + dhcp4_start, + dhcp4_end, +): + """ + Add new network + + API endpoint: POST /api/v1/network + API arguments: lots + API schema: {"message":"{data}"} + """ + params = { + "vni": vni, + "description": description, + "nettype": nettype, + "mtu": mtu, + "domain": domain, + "name_servers": name_servers, + "ip4_network": ip4_network, + "ip4_gateway": ip4_gateway, + "ip6_network": ip6_network, + "ip6_gateway": ip6_gateway, + "dhcp4": dhcp4_flag, + "dhcp4_start": dhcp4_start, + "dhcp4_end": dhcp4_end, + } + response = call_api(config, "post", "/network", params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def net_modify( + config, + net, + description, + mtu, + domain, + name_servers, + ip4_network, + ip4_gateway, + ip6_network, + ip6_gateway, + dhcp4_flag, + dhcp4_start, + dhcp4_end, +): + """ + Modify a network + + API endpoint: POST /api/v1/network/{net} + API arguments: lots + API schema: {"message":"{data}"} + """ + params = dict() + if description is not None: + params["description"] = description + if mtu is not None: + params["mtu"] = mtu + if domain is not None: + params["domain"] = domain + if name_servers is not None: + params["name_servers"] = name_servers + if ip4_network is not None: + params["ip4_network"] = ip4_network + if ip4_gateway is not None: + params["ip4_gateway"] = ip4_gateway + if ip6_network is not None: + params["ip6_network"] = ip6_network + if ip6_gateway is not None: + params["ip6_gateway"] = ip6_gateway + if dhcp4_flag is not None: + params["dhcp4"] = dhcp4_flag + if dhcp4_start is not None: + params["dhcp4_start"] = dhcp4_start + if dhcp4_end is not None: + params["dhcp4_end"] = dhcp4_end + + response = call_api(config, "put", "/network/{net}".format(net=net), params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def net_remove(config, net): + """ + Remove a network + + API endpoint: DELETE /api/v1/network/{net} + API arguments: + API schema: {"message":"{data}"} + """ + response = call_api(config, "delete", "/network/{net}".format(net=net)) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +# +# DHCP lease functions +# +def net_dhcp_info(config, net, mac): + """A + Get information about network DHCP lease + + API endpoint: GET /api/v1/network/{net}/lease/{mac} + API arguments: + API schema: {json_data_object} + """ + response = call_api( + config, "get", "/network/{net}/lease/{mac}".format(net=net, mac=mac) + ) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "Lease not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def net_dhcp_list(config, net, limit, only_static=False): + """ + Get list information about leases (limited by {limit}) + + API endpoint: GET /api/v1/network/{net}/lease + API arguments: limit={limit}, static={only_static} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + + if only_static: + params["static"] = True + else: + params["static"] = False + + response = call_api( + config, "get", "/network/{net}/lease".format(net=net), params=params + ) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def net_dhcp_add(config, net, ipaddr, macaddr, hostname): + """ + Add new network DHCP lease + + API endpoint: POST /api/v1/network/{net}/lease + API arguments: macaddress=macaddr, ipaddress=ipaddr, hostname=hostname + API schema: {"message":"{data}"} + """ + params = {"macaddress": macaddr, "ipaddress": ipaddr, "hostname": hostname} + response = call_api( + config, "post", "/network/{net}/lease".format(net=net), params=params + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def net_dhcp_remove(config, net, mac): + """ + Remove a network DHCP lease + + API endpoint: DELETE /api/v1/network/{vni}/lease/{mac} + API arguments: + API schema: {"message":"{data}"} + """ + response = call_api( + config, "delete", "/network/{net}/lease/{mac}".format(net=net, mac=mac) + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +# +# ACL functions +# +def net_acl_info(config, net, description): + """ + Get information about network ACL + + API endpoint: GET /api/v1/network/{net}/acl/{description} + API arguments: + API schema: {json_data_object} + """ + response = call_api( + config, + "get", + "/network/{net}/acl/{description}".format(net=net, description=description), + ) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "ACL not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def net_acl_list(config, net, limit, direction): + """ + Get list information about ACLs (limited by {limit}) + + API endpoint: GET /api/v1/network/{net}/acl + API arguments: limit={limit}, direction={direction} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + if direction is not None: + params["direction"] = direction + + response = call_api( + config, "get", "/network/{net}/acl".format(net=net), params=params + ) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def net_acl_add(config, net, direction, description, rule, order): + """ + Add new network acl + + API endpoint: POST /api/v1/network/{net}/acl + API arguments: description=description, direction=direction, order=order, rule=rule + API schema: {"message":"{data}"} + """ + params = dict() + params["description"] = description + params["direction"] = direction + params["rule"] = rule + if order is not None: + params["order"] = order + + response = call_api( + config, "post", "/network/{net}/acl".format(net=net), params=params + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def net_acl_remove(config, net, description): + """ + Remove a network ACL + + API endpoint: DELETE /api/v1/network/{vni}/acl/{description} + API arguments: + API schema: {"message":"{data}"} + """ + response = call_api( + config, + "delete", + "/network/{net}/acl/{description}".format(net=net, description=description), + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +# +# SR-IOV functions +# +def net_sriov_pf_list(config, node): + """ + List all PFs on NODE + + API endpoint: GET /api/v1/sriov/pf/ + API arguments: node={node} + API schema: [{json_data_object},{json_data_object},etc.] + """ + response = call_api(config, "get", "/sriov/pf/{}".format(node)) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def net_sriov_vf_set( + config, + node, + vf, + vlan_id, + vlan_qos, + tx_rate_min, + tx_rate_max, + link_state, + spoof_check, + trust, + query_rss, +): + """ + Mdoify configuration of a SR-IOV VF + + API endpoint: PUT /api/v1/sriov/vf// + API arguments: vlan_id={vlan_id}, vlan_qos={vlan_qos}, tx_rate_min={tx_rate_min}, tx_rate_max={tx_rate_max}, + link_state={link_state}, spoof_check={spoof_check}, trust={trust}, query_rss={query_rss} + API schema: {"message": "{data}"} + """ + params = dict() + + # Update any params that we've sent + if vlan_id is not None: + params["vlan_id"] = vlan_id + + if vlan_qos is not None: + params["vlan_qos"] = vlan_qos + + if tx_rate_min is not None: + params["tx_rate_min"] = tx_rate_min + + if tx_rate_max is not None: + params["tx_rate_max"] = tx_rate_max + + if link_state is not None: + params["link_state"] = link_state + + if spoof_check is not None: + params["spoof_check"] = spoof_check + + if trust is not None: + params["trust"] = trust + + if query_rss is not None: + params["query_rss"] = query_rss + + # Write the new configuration to the API + response = call_api( + config, "put", "/sriov/vf/{node}/{vf}".format(node=node, vf=vf), params=params + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def net_sriov_vf_list(config, node, pf=None): + """ + List all VFs on NODE, optionally limited by PF + + API endpoint: GET /api/v1/sriov/vf/ + API arguments: node={node}, pf={pf} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + params["pf"] = pf + + response = call_api(config, "get", "/sriov/vf/{}".format(node), params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def net_sriov_vf_info(config, node, vf): + """ + Get info about VF on NODE + + API endpoint: GET /api/v1/sriov/vf// + API arguments: + API schema: [{json_data_object}] + """ + response = call_api(config, "get", "/sriov/vf/{}/{}".format(node, vf)) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "VF not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +# +# Output display functions +# +def getColour(value): + if value in ["False", "None"]: + return ansiprint.blue() + else: + return ansiprint.green() + + +def getOutputColours(network_information): + v6_flag_colour = getColour(network_information["ip6"]["network"]) + v4_flag_colour = getColour(network_information["ip4"]["network"]) + dhcp6_flag_colour = getColour(network_information["ip6"]["dhcp_flag"]) + dhcp4_flag_colour = getColour(network_information["ip4"]["dhcp_flag"]) + + return v6_flag_colour, v4_flag_colour, dhcp6_flag_colour, dhcp4_flag_colour + + +def format_info(config, network_information, long_output): + if not network_information: + return "No network found" + + ( + v6_flag_colour, + v4_flag_colour, + dhcp6_flag_colour, + dhcp4_flag_colour, + ) = getOutputColours(network_information) + + # Format a nice output: do this line-by-line then concat the elements at the end + ainformation = [] + ainformation.append( + "{}Virtual network information:{}".format(ansiprint.bold(), ansiprint.end()) + ) + ainformation.append("") + # Basic information + ainformation.append( + "{}VNI:{} {}".format( + ansiprint.purple(), ansiprint.end(), network_information["vni"] + ) + ) + ainformation.append( + "{}Type:{} {}".format( + ansiprint.purple(), ansiprint.end(), network_information["type"] + ) + ) + ainformation.append( + "{}MTU:{} {}".format( + ansiprint.purple(), ansiprint.end(), network_information["mtu"] + ) + ) + ainformation.append( + "{}Description:{} {}".format( + ansiprint.purple(), ansiprint.end(), network_information["description"] + ) + ) + if network_information["type"] == "managed": + ainformation.append( + "{}Domain:{} {}".format( + ansiprint.purple(), ansiprint.end(), network_information["domain"] + ) + ) + ainformation.append( + "{}DNS Servers:{} {}".format( + ansiprint.purple(), + ansiprint.end(), + ", ".join(network_information["name_servers"]), + ) + ) + if network_information["ip6"]["network"] != "None": + ainformation.append("") + ainformation.append( + "{}IPv6 network:{} {}".format( + ansiprint.purple(), + ansiprint.end(), + network_information["ip6"]["network"], + ) + ) + ainformation.append( + "{}IPv6 gateway:{} {}".format( + ansiprint.purple(), + ansiprint.end(), + network_information["ip6"]["gateway"], + ) + ) + ainformation.append( + "{}DHCPv6 enabled:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + dhcp6_flag_colour, + network_information["ip6"]["dhcp_flag"], + ansiprint.end(), + ) + ) + if network_information["ip4"]["network"] != "None": + ainformation.append("") + ainformation.append( + "{}IPv4 network:{} {}".format( + ansiprint.purple(), + ansiprint.end(), + network_information["ip4"]["network"], + ) + ) + ainformation.append( + "{}IPv4 gateway:{} {}".format( + ansiprint.purple(), + ansiprint.end(), + network_information["ip4"]["gateway"], + ) + ) + ainformation.append( + "{}DHCPv4 enabled:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + dhcp4_flag_colour, + network_information["ip4"]["dhcp_flag"], + ansiprint.end(), + ) + ) + if network_information["ip4"]["dhcp_flag"] == "True": + ainformation.append( + "{}DHCPv4 range:{} {} - {}".format( + ansiprint.purple(), + ansiprint.end(), + network_information["ip4"]["dhcp_start"], + network_information["ip4"]["dhcp_end"], + ) + ) + + if long_output: + retcode, dhcp4_reservations_list = net_dhcp_list( + config, network_information["vni"], None + ) + if dhcp4_reservations_list: + ainformation.append("") + ainformation.append( + "{}Client DHCPv4 reservations:{}".format( + ansiprint.bold(), ansiprint.end() + ) + ) + ainformation.append("") + if retcode: + dhcp4_reservations_string = format_list_dhcp( + dhcp4_reservations_list + ) + for line in dhcp4_reservations_string.split("\n"): + ainformation.append(line) + else: + ainformation.append("No leases found") + + retcode, firewall_rules_list = net_acl_list( + config, network_information["vni"], None, None + ) + if firewall_rules_list: + ainformation.append("") + ainformation.append( + "{}Network firewall rules:{}".format( + ansiprint.bold(), ansiprint.end() + ) + ) + ainformation.append("") + if retcode: + firewall_rules_string = format_list_acl(firewall_rules_list) + for line in firewall_rules_string.split("\n"): + ainformation.append(line) + else: + ainformation.append("No ACLs found") + + # Join it all together + return "\n".join(ainformation) + + +def format_list(config, network_list): + if not network_list: + return "No network found" + + network_list_output = [] + + # Determine optimal column widths + net_vni_length = 5 + net_description_length = 12 + net_nettype_length = 8 + net_mtu_length = 4 + net_domain_length = 6 + net_v6_flag_length = 6 + net_dhcp6_flag_length = 7 + net_v4_flag_length = 6 + net_dhcp4_flag_length = 7 + for network_information in network_list: + # vni column + _net_vni_length = len(str(network_information["vni"])) + 1 + if _net_vni_length > net_vni_length: + net_vni_length = _net_vni_length + # description column + _net_description_length = len(network_information["description"]) + 1 + if _net_description_length > net_description_length: + net_description_length = _net_description_length + # mtu column + _net_mtu_length = len(str(network_information["mtu"])) + 1 + if _net_mtu_length > net_mtu_length: + net_mtu_length = _net_mtu_length + # domain column + _net_domain_length = len(network_information["domain"]) + 1 + if _net_domain_length > net_domain_length: + net_domain_length = _net_domain_length + + # Format the string (header) + network_list_output.append( + "{bold}{networks_header: <{networks_header_length}} {config_header: <{config_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + networks_header_length=net_vni_length + net_description_length + 1, + config_header_length=net_nettype_length + + net_mtu_length + + net_domain_length + + net_v6_flag_length + + net_dhcp6_flag_length + + net_v4_flag_length + + net_dhcp4_flag_length + + 7, + networks_header="Networks " + + "".join(["-" for _ in range(9, net_vni_length + net_description_length)]), + config_header="Config " + + "".join( + [ + "-" + for _ in range( + 7, + net_nettype_length + + net_mtu_length + + net_domain_length + + net_v6_flag_length + + net_dhcp6_flag_length + + net_v4_flag_length + + net_dhcp4_flag_length + + 6, + ) + ] + ), + ) + ) + network_list_output.append( + "{bold}\ +{net_vni: <{net_vni_length}} \ +{net_description: <{net_description_length}} \ +{net_nettype: <{net_nettype_length}} \ +{net_mtu: <{net_mtu_length}} \ +{net_domain: <{net_domain_length}} \ +{net_v6_flag: <{net_v6_flag_length}} \ +{net_dhcp6_flag: <{net_dhcp6_flag_length}} \ +{net_v4_flag: <{net_v4_flag_length}} \ +{net_dhcp4_flag: <{net_dhcp4_flag_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + net_vni_length=net_vni_length, + net_description_length=net_description_length, + net_nettype_length=net_nettype_length, + net_mtu_length=net_mtu_length, + net_domain_length=net_domain_length, + net_v6_flag_length=net_v6_flag_length, + net_dhcp6_flag_length=net_dhcp6_flag_length, + net_v4_flag_length=net_v4_flag_length, + net_dhcp4_flag_length=net_dhcp4_flag_length, + net_vni="VNI", + net_description="Description", + net_nettype="Type", + net_mtu="MTU", + net_domain="Domain", + net_v6_flag="IPv6", + net_dhcp6_flag="DHCPv6", + net_v4_flag="IPv4", + net_dhcp4_flag="DHCPv4", + ) + ) + + for network_information in sorted(network_list, key=lambda n: int(n["vni"])): + ( + v6_flag_colour, + v4_flag_colour, + dhcp6_flag_colour, + dhcp4_flag_colour, + ) = getOutputColours(network_information) + if network_information["ip4"]["network"] != "None": + v4_flag = "True" + else: + v4_flag = "False" + + if network_information["ip6"]["network"] != "None": + v6_flag = "True" + else: + v6_flag = "False" + + network_list_output.append( + "{bold}\ +{net_vni: <{net_vni_length}} \ +{net_description: <{net_description_length}} \ +{net_nettype: <{net_nettype_length}} \ +{net_mtu: <{net_mtu_length}} \ +{net_domain: <{net_domain_length}} \ +{v6_flag_colour}{net_v6_flag: <{net_v6_flag_length}}{colour_off} \ +{dhcp6_flag_colour}{net_dhcp6_flag: <{net_dhcp6_flag_length}}{colour_off} \ +{v4_flag_colour}{net_v4_flag: <{net_v4_flag_length}}{colour_off} \ +{dhcp4_flag_colour}{net_dhcp4_flag: <{net_dhcp4_flag_length}}{colour_off} \ +{end_bold}".format( + bold="", + end_bold="", + net_vni_length=net_vni_length, + net_description_length=net_description_length, + net_nettype_length=net_nettype_length, + net_mtu_length=net_mtu_length, + net_domain_length=net_domain_length, + net_v6_flag_length=net_v6_flag_length, + net_dhcp6_flag_length=net_dhcp6_flag_length, + net_v4_flag_length=net_v4_flag_length, + net_dhcp4_flag_length=net_dhcp4_flag_length, + net_vni=network_information["vni"], + net_description=network_information["description"], + net_nettype=network_information["type"], + net_mtu=network_information["mtu"], + net_domain=network_information["domain"], + net_v6_flag=v6_flag, + v6_flag_colour=v6_flag_colour, + net_dhcp6_flag=network_information["ip6"]["dhcp_flag"], + dhcp6_flag_colour=dhcp6_flag_colour, + net_v4_flag=v4_flag, + v4_flag_colour=v4_flag_colour, + net_dhcp4_flag=network_information["ip4"]["dhcp_flag"], + dhcp4_flag_colour=dhcp4_flag_colour, + colour_off=ansiprint.end(), + ) + ) + + return "\n".join(network_list_output) + + +def format_list_dhcp(dhcp_lease_list): + dhcp_lease_list_output = [] + + # Determine optimal column widths + lease_hostname_length = 9 + lease_ip4_address_length = 11 + lease_mac_address_length = 13 + lease_timestamp_length = 10 + for dhcp_lease_information in dhcp_lease_list: + # hostname column + _lease_hostname_length = len(str(dhcp_lease_information["hostname"])) + 1 + if _lease_hostname_length > lease_hostname_length: + lease_hostname_length = _lease_hostname_length + # ip4_address column + _lease_ip4_address_length = len(str(dhcp_lease_information["ip4_address"])) + 1 + if _lease_ip4_address_length > lease_ip4_address_length: + lease_ip4_address_length = _lease_ip4_address_length + # mac_address column + _lease_mac_address_length = len(str(dhcp_lease_information["mac_address"])) + 1 + if _lease_mac_address_length > lease_mac_address_length: + lease_mac_address_length = _lease_mac_address_length + # timestamp column + _lease_timestamp_length = len(str(dhcp_lease_information["timestamp"])) + 1 + if _lease_timestamp_length > lease_timestamp_length: + lease_timestamp_length = _lease_timestamp_length + + # Format the string (header) + dhcp_lease_list_output.append( + "{bold}{lease_header: <{lease_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + lease_header_length=lease_hostname_length + + lease_ip4_address_length + + lease_mac_address_length + + lease_timestamp_length + + 3, + lease_header="Leases " + + "".join( + [ + "-" + for _ in range( + 7, + lease_hostname_length + + lease_ip4_address_length + + lease_mac_address_length + + lease_timestamp_length + + 2, + ) + ] + ), + ) + ) + + dhcp_lease_list_output.append( + "{bold}\ +{lease_hostname: <{lease_hostname_length}} \ +{lease_ip4_address: <{lease_ip4_address_length}} \ +{lease_mac_address: <{lease_mac_address_length}} \ +{lease_timestamp: <{lease_timestamp_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + lease_hostname_length=lease_hostname_length, + lease_ip4_address_length=lease_ip4_address_length, + lease_mac_address_length=lease_mac_address_length, + lease_timestamp_length=lease_timestamp_length, + lease_hostname="Hostname", + lease_ip4_address="IP Address", + lease_mac_address="MAC Address", + lease_timestamp="Timestamp", + ) + ) + + for dhcp_lease_information in sorted( + dhcp_lease_list, key=lambda lease: lease["hostname"] + ): + dhcp_lease_list_output.append( + "{bold}\ +{lease_hostname: <{lease_hostname_length}} \ +{lease_ip4_address: <{lease_ip4_address_length}} \ +{lease_mac_address: <{lease_mac_address_length}} \ +{lease_timestamp: <{lease_timestamp_length}} \ +{end_bold}".format( + bold="", + end_bold="", + lease_hostname_length=lease_hostname_length, + lease_ip4_address_length=lease_ip4_address_length, + lease_mac_address_length=lease_mac_address_length, + lease_timestamp_length=12, + lease_hostname=str(dhcp_lease_information["hostname"]), + lease_ip4_address=str(dhcp_lease_information["ip4_address"]), + lease_mac_address=str(dhcp_lease_information["mac_address"]), + lease_timestamp=str(dhcp_lease_information["timestamp"]), + ) + ) + + return "\n".join(dhcp_lease_list_output) + + +def format_list_acl(acl_list): + # Handle when we get an empty entry + if not acl_list: + acl_list = list() + + acl_list_output = [] + + # Determine optimal column widths + acl_direction_length = 10 + acl_order_length = 6 + acl_description_length = 12 + acl_rule_length = 5 + for acl_information in acl_list: + # order column + _acl_order_length = len(str(acl_information["order"])) + 1 + if _acl_order_length > acl_order_length: + acl_order_length = _acl_order_length + # description column + _acl_description_length = len(acl_information["description"]) + 1 + if _acl_description_length > acl_description_length: + acl_description_length = _acl_description_length + # rule column + _acl_rule_length = len(acl_information["rule"]) + 1 + if _acl_rule_length > acl_rule_length: + acl_rule_length = _acl_rule_length + + # Format the string (header) + acl_list_output.append( + "{bold}{acl_header: <{acl_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + acl_header_length=acl_direction_length + + acl_order_length + + acl_description_length + + acl_rule_length + + 3, + acl_header="ACLs " + + "".join( + [ + "-" + for _ in range( + 5, + acl_direction_length + + acl_order_length + + acl_description_length + + acl_rule_length + + 2, + ) + ] + ), + ) + ) + + acl_list_output.append( + "{bold}\ +{acl_direction: <{acl_direction_length}} \ +{acl_order: <{acl_order_length}} \ +{acl_description: <{acl_description_length}} \ +{acl_rule: <{acl_rule_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + acl_direction_length=acl_direction_length, + acl_order_length=acl_order_length, + acl_description_length=acl_description_length, + acl_rule_length=acl_rule_length, + acl_direction="Direction", + acl_order="Order", + acl_description="Description", + acl_rule="Rule", + ) + ) + + for acl_information in sorted( + acl_list, key=lambda acl: acl["direction"] + str(acl["order"]) + ): + acl_list_output.append( + "{bold}\ +{acl_direction: <{acl_direction_length}} \ +{acl_order: <{acl_order_length}} \ +{acl_description: <{acl_description_length}} \ +{acl_rule: <{acl_rule_length}} \ +{end_bold}".format( + bold="", + end_bold="", + acl_direction_length=acl_direction_length, + acl_order_length=acl_order_length, + acl_description_length=acl_description_length, + acl_rule_length=acl_rule_length, + acl_direction=acl_information["direction"], + acl_order=acl_information["order"], + acl_description=acl_information["description"], + acl_rule=acl_information["rule"], + ) + ) + + return "\n".join(acl_list_output) + + +def format_list_sriov_pf(pf_list): + # The maximum column width of the VFs column + max_vfs_length = 70 + + # Handle when we get an empty entry + if not pf_list: + pf_list = list() + + pf_list_output = [] + + # Determine optimal column widths + pf_phy_length = 6 + pf_mtu_length = 4 + pf_vfs_length = 4 + + for pf_information in pf_list: + # phy column + _pf_phy_length = len(str(pf_information["phy"])) + 1 + if _pf_phy_length > pf_phy_length: + pf_phy_length = _pf_phy_length + # mtu column + _pf_mtu_length = len(str(pf_information["mtu"])) + 1 + if _pf_mtu_length > pf_mtu_length: + pf_mtu_length = _pf_mtu_length + # vfs column + _pf_vfs_length = len(str(", ".join(pf_information["vfs"]))) + 1 + if _pf_vfs_length > pf_vfs_length: + pf_vfs_length = _pf_vfs_length + + # We handle columnizing very long lists later + if pf_vfs_length > max_vfs_length: + pf_vfs_length = max_vfs_length + + # Format the string (header) + pf_list_output.append( + "{bold}{pf_header: <{pf_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + pf_header_length=pf_phy_length + pf_mtu_length + pf_vfs_length + 2, + pf_header="PFs " + + "".join( + [ + "-" + for _ in range(4, pf_phy_length + pf_mtu_length + pf_vfs_length + 1) + ] + ), + ) + ) + + pf_list_output.append( + "{bold}\ +{pf_phy: <{pf_phy_length}} \ +{pf_mtu: <{pf_mtu_length}} \ +{pf_vfs: <{pf_vfs_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + pf_phy_length=pf_phy_length, + pf_mtu_length=pf_mtu_length, + pf_vfs_length=pf_vfs_length, + pf_phy="Device", + pf_mtu="MTU", + pf_vfs="VFs", + ) + ) + + for pf_information in sorted(pf_list, key=lambda p: p["phy"]): + # Figure out how to nicely columnize our list + nice_vfs_list = [list()] + vfs_lines = 0 + cur_vfs_length = 0 + for vfs in pf_information["vfs"]: + vfs_len = len(vfs) + cur_vfs_length += vfs_len + 2 # for the comma and space + if cur_vfs_length > max_vfs_length: + cur_vfs_length = 0 + vfs_lines += 1 + nice_vfs_list.append(list()) + nice_vfs_list[vfs_lines].append(vfs) + + # Append the lines + pf_list_output.append( + "{bold}\ +{pf_phy: <{pf_phy_length}} \ +{pf_mtu: <{pf_mtu_length}} \ +{pf_vfs: <{pf_vfs_length}} \ +{end_bold}".format( + bold="", + end_bold="", + pf_phy_length=pf_phy_length, + pf_mtu_length=pf_mtu_length, + pf_vfs_length=pf_vfs_length, + pf_phy=pf_information["phy"], + pf_mtu=pf_information["mtu"], + pf_vfs=", ".join(nice_vfs_list[0]), + ) + ) + + if len(nice_vfs_list) > 1: + for idx in range(1, len(nice_vfs_list)): + pf_list_output.append( + "{bold}\ +{pf_phy: <{pf_phy_length}} \ +{pf_mtu: <{pf_mtu_length}} \ +{pf_vfs: <{pf_vfs_length}} \ +{end_bold}".format( + bold="", + end_bold="", + pf_phy_length=pf_phy_length, + pf_mtu_length=pf_mtu_length, + pf_vfs_length=pf_vfs_length, + pf_phy="", + pf_mtu="", + pf_vfs=", ".join(nice_vfs_list[idx]), + ) + ) + + return "\n".join(pf_list_output) + + +def format_list_sriov_vf(vf_list): + # Handle when we get an empty entry + if not vf_list: + vf_list = list() + + vf_list_output = [] + + # Determine optimal column widths + vf_phy_length = 4 + vf_pf_length = 3 + vf_mtu_length = 4 + vf_mac_length = 11 + vf_used_length = 5 + vf_domain_length = 5 + + for vf_information in vf_list: + # phy column + _vf_phy_length = len(str(vf_information["phy"])) + 1 + if _vf_phy_length > vf_phy_length: + vf_phy_length = _vf_phy_length + # pf column + _vf_pf_length = len(str(vf_information["pf"])) + 1 + if _vf_pf_length > vf_pf_length: + vf_pf_length = _vf_pf_length + # mtu column + _vf_mtu_length = len(str(vf_information["mtu"])) + 1 + if _vf_mtu_length > vf_mtu_length: + vf_mtu_length = _vf_mtu_length + # mac column + _vf_mac_length = len(str(vf_information["mac"])) + 1 + if _vf_mac_length > vf_mac_length: + vf_mac_length = _vf_mac_length + # used column + _vf_used_length = len(str(vf_information["usage"]["used"])) + 1 + if _vf_used_length > vf_used_length: + vf_used_length = _vf_used_length + # domain column + _vf_domain_length = len(str(vf_information["usage"]["domain"])) + 1 + if _vf_domain_length > vf_domain_length: + vf_domain_length = _vf_domain_length + + # Format the string (header) + vf_list_output.append( + "{bold}{vf_header: <{vf_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + vf_header_length=vf_phy_length + + vf_pf_length + + vf_mtu_length + + vf_mac_length + + vf_used_length + + vf_domain_length + + 5, + vf_header="VFs " + + "".join( + [ + "-" + for _ in range( + 4, + vf_phy_length + + vf_pf_length + + vf_mtu_length + + vf_mac_length + + vf_used_length + + vf_domain_length + + 4, + ) + ] + ), + ) + ) + + vf_list_output.append( + "{bold}\ +{vf_phy: <{vf_phy_length}} \ +{vf_pf: <{vf_pf_length}} \ +{vf_mtu: <{vf_mtu_length}} \ +{vf_mac: <{vf_mac_length}} \ +{vf_used: <{vf_used_length}} \ +{vf_domain: <{vf_domain_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + vf_phy_length=vf_phy_length, + vf_pf_length=vf_pf_length, + vf_mtu_length=vf_mtu_length, + vf_mac_length=vf_mac_length, + vf_used_length=vf_used_length, + vf_domain_length=vf_domain_length, + vf_phy="Device", + vf_pf="PF", + vf_mtu="MTU", + vf_mac="MAC Address", + vf_used="Used", + vf_domain="Domain", + ) + ) + + for vf_information in sorted(vf_list, key=lambda v: v["phy"]): + vf_domain = vf_information["usage"]["domain"] + if not vf_domain: + vf_domain = "N/A" + + vf_list_output.append( + "{bold}\ +{vf_phy: <{vf_phy_length}} \ +{vf_pf: <{vf_pf_length}} \ +{vf_mtu: <{vf_mtu_length}} \ +{vf_mac: <{vf_mac_length}} \ +{vf_used: <{vf_used_length}} \ +{vf_domain: <{vf_domain_length}} \ +{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + vf_phy_length=vf_phy_length, + vf_pf_length=vf_pf_length, + vf_mtu_length=vf_mtu_length, + vf_mac_length=vf_mac_length, + vf_used_length=vf_used_length, + vf_domain_length=vf_domain_length, + vf_phy=vf_information["phy"], + vf_pf=vf_information["pf"], + vf_mtu=vf_information["mtu"], + vf_mac=vf_information["mac"], + vf_used=vf_information["usage"]["used"], + vf_domain=vf_domain, + ) + ) + + return "\n".join(vf_list_output) + + +def format_info_sriov_vf(config, vf_information, node): + if not vf_information: + return "No VF found" + + # Get information on the using VM if applicable + if vf_information["usage"]["used"] == "True" and vf_information["usage"]["domain"]: + vm_information = call_api( + config, "get", "/vm/{vm}".format(vm=vf_information["usage"]["domain"]) + ).json() + if isinstance(vm_information, list) and len(vm_information) > 0: + vm_information = vm_information[0] + else: + vm_information = None + + # Format a nice output: do this line-by-line then concat the elements at the end + ainformation = [] + ainformation.append( + "{}SR-IOV VF information:{}".format(ansiprint.bold(), ansiprint.end()) + ) + ainformation.append("") + # Basic information + ainformation.append( + "{}PHY:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["phy"] + ) + ) + ainformation.append( + "{}PF:{} {} @ {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["pf"], node + ) + ) + ainformation.append( + "{}MTU:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["mtu"] + ) + ) + ainformation.append( + "{}MAC Address:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["mac"] + ) + ) + ainformation.append("") + # Configuration information + ainformation.append( + "{}vLAN ID:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["config"]["vlan_id"] + ) + ) + ainformation.append( + "{}vLAN QOS priority:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["config"]["vlan_qos"] + ) + ) + ainformation.append( + "{}Minimum TX Rate:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["config"]["tx_rate_min"] + ) + ) + ainformation.append( + "{}Maximum TX Rate:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["config"]["tx_rate_max"] + ) + ) + ainformation.append( + "{}Link State:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["config"]["link_state"] + ) + ) + ainformation.append( + "{}Spoof Checking:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + getColour(vf_information["config"]["spoof_check"]), + vf_information["config"]["spoof_check"], + ansiprint.end(), + ) + ) + ainformation.append( + "{}VF User Trust:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + getColour(vf_information["config"]["trust"]), + vf_information["config"]["trust"], + ansiprint.end(), + ) + ) + ainformation.append( + "{}Query RSS Config:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + getColour(vf_information["config"]["query_rss"]), + vf_information["config"]["query_rss"], + ansiprint.end(), + ) + ) + ainformation.append("") + # PCIe bus information + ainformation.append( + "{}PCIe domain:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["pci"]["domain"] + ) + ) + ainformation.append( + "{}PCIe bus:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["pci"]["bus"] + ) + ) + ainformation.append( + "{}PCIe slot:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["pci"]["slot"] + ) + ) + ainformation.append( + "{}PCIe function:{} {}".format( + ansiprint.purple(), ansiprint.end(), vf_information["pci"]["function"] + ) + ) + ainformation.append("") + # Usage information + ainformation.append( + "{}VF Used:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + getColour(vf_information["usage"]["used"]), + vf_information["usage"]["used"], + ansiprint.end(), + ) + ) + if vf_information["usage"]["used"] == "True" and vm_information is not None: + ainformation.append( + "{}Using Domain:{} {} ({}) ({}{}{})".format( + ansiprint.purple(), + ansiprint.end(), + vf_information["usage"]["domain"], + vm_information["name"], + getColour(vm_information["state"]), + vm_information["state"], + ansiprint.end(), + ) + ) + else: + ainformation.append( + "{}Using Domain:{} N/A".format(ansiprint.purple(), ansiprint.end()) + ) + + # Join it all together + return "\n".join(ainformation) diff --git a/cli-client-new/pvc/lib/node.py b/cli-client-new/pvc/lib/node.py new file mode 100644 index 00000000..2390319a --- /dev/null +++ b/cli-client-new/pvc/lib/node.py @@ -0,0 +1,709 @@ +#!/usr/bin/env python3 + +# node.py - PVC CLI client function library, node management +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2022 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +import time + +import pvc.lib.ansiprint as ansiprint +from pvc.lib.common import call_api + + +# +# Primary functions +# +def node_coordinator_state(config, node, action): + """ + Set node coordinator state state (primary/secondary) + + API endpoint: POST /api/v1/node/{node}/coordinator-state + API arguments: action={action} + API schema: {"message": "{data}"} + """ + params = {"state": action} + response = call_api( + config, + "post", + "/node/{node}/coordinator-state".format(node=node), + params=params, + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def node_domain_state(config, node, action, wait): + """ + Set node domain state state (flush/ready) + + API endpoint: POST /api/v1/node/{node}/domain-state + API arguments: action={action}, wait={wait} + API schema: {"message": "{data}"} + """ + params = {"state": action, "wait": str(wait).lower()} + response = call_api( + config, "post", "/node/{node}/domain-state".format(node=node), params=params + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def view_node_log(config, node, lines=100): + """ + Return node log lines from the API (and display them in a pager in the main CLI) + + API endpoint: GET /node/{node}/log + API arguments: lines={lines} + API schema: {"name":"{node}","data":"{node_log}"} + """ + params = {"lines": lines} + response = call_api( + config, "get", "/node/{node}/log".format(node=node), params=params + ) + + if response.status_code != 200: + return False, response.json().get("message", "") + + node_log = response.json()["data"] + + # Shrink the log buffer to length lines + shrunk_log = node_log.split("\n")[-lines:] + loglines = "\n".join(shrunk_log) + + return True, loglines + + +def follow_node_log(config, node, lines=10): + """ + Return and follow node log lines from the API + + API endpoint: GET /node/{node}/log + API arguments: lines={lines} + API schema: {"name":"{nodename}","data":"{node_log}"} + """ + # We always grab 200 to match the follow call, but only _show_ `lines` number + params = {"lines": 200} + response = call_api( + config, "get", "/node/{node}/log".format(node=node), params=params + ) + + if response.status_code != 200: + return False, response.json().get("message", "") + + # Shrink the log buffer to length lines + node_log = response.json()["data"] + shrunk_log = node_log.split("\n")[-int(lines) :] + loglines = "\n".join(shrunk_log) + + # Print the initial data and begin following + print(loglines, end="") + print("\n", end="") + + while True: + # Grab the next line set (200 is a reasonable number of lines per half-second; any more are skipped) + try: + params = {"lines": 200} + response = call_api( + config, "get", "/node/{node}/log".format(node=node), params=params + ) + new_node_log = response.json()["data"] + except Exception: + break + # Split the new and old log strings into constitutent lines + old_node_loglines = node_log.split("\n") + new_node_loglines = new_node_log.split("\n") + + # Set the node log to the new log value for the next iteration + node_log = new_node_log + + # Get the difference between the two sets of lines + old_node_loglines_set = set(old_node_loglines) + diff_node_loglines = [ + x for x in new_node_loglines if x not in old_node_loglines_set + ] + + # If there's a difference, print it out + if len(diff_node_loglines) > 0: + print("\n".join(diff_node_loglines), end="") + print("\n", end="") + + # Wait half a second + time.sleep(0.5) + + return True, "" + + +def node_info(config, node): + """ + Get information about node + + API endpoint: GET /api/v1/node/{node} + API arguments: + API schema: {json_data_object} + """ + response = call_api(config, "get", "/node/{node}".format(node=node)) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match, return not found + return False, "Node not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def node_list( + config, limit, target_daemon_state, target_coordinator_state, target_domain_state +): + """ + Get list information about nodes (limited by {limit}) + + API endpoint: GET /api/v1/node + API arguments: limit={limit} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + if target_daemon_state: + params["daemon_state"] = target_daemon_state + if target_coordinator_state: + params["coordinator_state"] = target_coordinator_state + if target_domain_state: + params["domain_state"] = target_domain_state + + response = call_api(config, "get", "/node", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +# +# Output display functions +# +def getOutputColours(node_information): + node_health = node_information.get("health", "N/A") + if isinstance(node_health, int): + if node_health <= 50: + health_colour = ansiprint.red() + elif node_health <= 90: + health_colour = ansiprint.yellow() + elif node_health <= 100: + health_colour = ansiprint.green() + else: + health_colour = ansiprint.blue() + else: + health_colour = ansiprint.blue() + + if node_information["daemon_state"] == "run": + daemon_state_colour = ansiprint.green() + elif node_information["daemon_state"] == "stop": + daemon_state_colour = ansiprint.red() + elif node_information["daemon_state"] == "shutdown": + daemon_state_colour = ansiprint.yellow() + elif node_information["daemon_state"] == "init": + daemon_state_colour = ansiprint.yellow() + elif node_information["daemon_state"] == "dead": + daemon_state_colour = ansiprint.red() + ansiprint.bold() + else: + daemon_state_colour = ansiprint.blue() + + if node_information["coordinator_state"] == "primary": + coordinator_state_colour = ansiprint.green() + elif node_information["coordinator_state"] == "secondary": + coordinator_state_colour = ansiprint.blue() + else: + coordinator_state_colour = ansiprint.cyan() + + if node_information["domain_state"] == "ready": + domain_state_colour = ansiprint.green() + else: + domain_state_colour = ansiprint.blue() + + if node_information["memory"]["allocated"] > node_information["memory"]["total"]: + mem_allocated_colour = ansiprint.yellow() + else: + mem_allocated_colour = "" + + if node_information["memory"]["provisioned"] > node_information["memory"]["total"]: + mem_provisioned_colour = ansiprint.yellow() + else: + mem_provisioned_colour = "" + + return ( + health_colour, + daemon_state_colour, + coordinator_state_colour, + domain_state_colour, + mem_allocated_colour, + mem_provisioned_colour, + ) + + +def format_info(node_information, long_output): + ( + health_colour, + daemon_state_colour, + coordinator_state_colour, + domain_state_colour, + mem_allocated_colour, + mem_provisioned_colour, + ) = getOutputColours(node_information) + + # Format a nice output; do this line-by-line then concat the elements at the end + ainformation = [] + # Basic information + ainformation.append( + "{}Name:{} {}".format( + ansiprint.purple(), + ansiprint.end(), + node_information["name"], + ) + ) + ainformation.append( + "{}PVC Version:{} {}".format( + ansiprint.purple(), + ansiprint.end(), + node_information["pvc_version"], + ) + ) + + node_health = node_information.get("health", "N/A") + if isinstance(node_health, int): + node_health_text = f"{node_health}%" + else: + node_health_text = node_health + ainformation.append( + "{}Health:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + health_colour, + node_health_text, + ansiprint.end(), + ) + ) + + node_health_details = node_information.get("health_details", []) + if long_output: + node_health_messages = "\n ".join( + [f"{plugin['name']}: {plugin['message']}" for plugin in node_health_details] + ) + else: + node_health_messages = "\n ".join( + [ + f"{plugin['name']}: {plugin['message']}" + for plugin in node_health_details + if int(plugin.get("health_delta", 0)) > 0 + ] + ) + + if len(node_health_messages) > 0: + ainformation.append( + "{}Health Plugin Details:{} {}".format( + ansiprint.purple(), ansiprint.end(), node_health_messages + ) + ) + ainformation.append("") + + ainformation.append( + "{}Daemon State:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + daemon_state_colour, + node_information["daemon_state"], + ansiprint.end(), + ) + ) + ainformation.append( + "{}Coordinator State:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + coordinator_state_colour, + node_information["coordinator_state"], + ansiprint.end(), + ) + ) + ainformation.append( + "{}Domain State:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + domain_state_colour, + node_information["domain_state"], + ansiprint.end(), + ) + ) + if long_output: + ainformation.append("") + ainformation.append( + "{}Architecture:{} {}".format( + ansiprint.purple(), ansiprint.end(), node_information["arch"] + ) + ) + ainformation.append( + "{}Operating System:{} {}".format( + ansiprint.purple(), ansiprint.end(), node_information["os"] + ) + ) + ainformation.append( + "{}Kernel Version:{} {}".format( + ansiprint.purple(), ansiprint.end(), node_information["kernel"] + ) + ) + ainformation.append("") + ainformation.append( + "{}Active VM Count:{} {}".format( + ansiprint.purple(), ansiprint.end(), node_information["domains_count"] + ) + ) + ainformation.append( + "{}Host CPUs:{} {}".format( + ansiprint.purple(), ansiprint.end(), node_information["vcpu"]["total"] + ) + ) + ainformation.append( + "{}vCPUs:{} {}".format( + ansiprint.purple(), ansiprint.end(), node_information["vcpu"]["allocated"] + ) + ) + ainformation.append( + "{}Load:{} {}".format( + ansiprint.purple(), ansiprint.end(), node_information["load"] + ) + ) + ainformation.append( + "{}Total RAM (MiB):{} {}".format( + ansiprint.purple(), ansiprint.end(), node_information["memory"]["total"] + ) + ) + ainformation.append( + "{}Used RAM (MiB):{} {}".format( + ansiprint.purple(), ansiprint.end(), node_information["memory"]["used"] + ) + ) + ainformation.append( + "{}Free RAM (MiB):{} {}".format( + ansiprint.purple(), ansiprint.end(), node_information["memory"]["free"] + ) + ) + ainformation.append( + "{}Allocated RAM (MiB):{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + mem_allocated_colour, + node_information["memory"]["allocated"], + ansiprint.end(), + ) + ) + ainformation.append( + "{}Provisioned RAM (MiB):{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + mem_provisioned_colour, + node_information["memory"]["provisioned"], + ansiprint.end(), + ) + ) + + # Join it all together + ainformation.append("") + return "\n".join(ainformation) + + +def format_list(node_list, raw): + if raw: + ainformation = list() + for node in sorted(item["name"] for item in node_list): + ainformation.append(node) + return "\n".join(ainformation) + + node_list_output = [] + + # Determine optimal column widths + node_name_length = 5 + pvc_version_length = 8 + health_length = 7 + daemon_state_length = 7 + coordinator_state_length = 12 + domain_state_length = 7 + domains_count_length = 4 + cpu_count_length = 6 + load_length = 5 + mem_total_length = 6 + mem_used_length = 5 + mem_free_length = 5 + mem_alloc_length = 6 + mem_prov_length = 5 + for node_information in node_list: + # node_name column + _node_name_length = len(node_information["name"]) + 1 + if _node_name_length > node_name_length: + node_name_length = _node_name_length + # node_pvc_version column + _pvc_version_length = len(node_information.get("pvc_version", "N/A")) + 1 + if _pvc_version_length > pvc_version_length: + pvc_version_length = _pvc_version_length + # node_health column + node_health = node_information.get("health", "N/A") + if isinstance(node_health, int): + node_health_text = f"{node_health}%" + else: + node_health_text = node_health + _health_length = len(node_health_text) + 1 + if _health_length > health_length: + health_length = _health_length + # daemon_state column + _daemon_state_length = len(node_information["daemon_state"]) + 1 + if _daemon_state_length > daemon_state_length: + daemon_state_length = _daemon_state_length + # coordinator_state column + _coordinator_state_length = len(node_information["coordinator_state"]) + 1 + if _coordinator_state_length > coordinator_state_length: + coordinator_state_length = _coordinator_state_length + # domain_state column + _domain_state_length = len(node_information["domain_state"]) + 1 + if _domain_state_length > domain_state_length: + domain_state_length = _domain_state_length + # domains_count column + _domains_count_length = len(str(node_information["domains_count"])) + 1 + if _domains_count_length > domains_count_length: + domains_count_length = _domains_count_length + # cpu_count column + _cpu_count_length = len(str(node_information["cpu_count"])) + 1 + if _cpu_count_length > cpu_count_length: + cpu_count_length = _cpu_count_length + # load column + _load_length = len(str(node_information["load"])) + 1 + if _load_length > load_length: + load_length = _load_length + # mem_total column + _mem_total_length = len(str(node_information["memory"]["total"])) + 1 + if _mem_total_length > mem_total_length: + mem_total_length = _mem_total_length + # mem_used column + _mem_used_length = len(str(node_information["memory"]["used"])) + 1 + if _mem_used_length > mem_used_length: + mem_used_length = _mem_used_length + # mem_free column + _mem_free_length = len(str(node_information["memory"]["free"])) + 1 + if _mem_free_length > mem_free_length: + mem_free_length = _mem_free_length + # mem_alloc column + _mem_alloc_length = len(str(node_information["memory"]["allocated"])) + 1 + if _mem_alloc_length > mem_alloc_length: + mem_alloc_length = _mem_alloc_length + + # mem_prov column + _mem_prov_length = len(str(node_information["memory"]["provisioned"])) + 1 + if _mem_prov_length > mem_prov_length: + mem_prov_length = _mem_prov_length + + # Format the string (header) + node_list_output.append( + "{bold}{node_header: <{node_header_length}} {state_header: <{state_header_length}} {resource_header: <{resource_header_length}} {memory_header: <{memory_header_length}}{end_bold}".format( + node_header_length=node_name_length + + pvc_version_length + + health_length + + 2, + state_header_length=daemon_state_length + + coordinator_state_length + + domain_state_length + + 2, + resource_header_length=domains_count_length + + cpu_count_length + + load_length + + 2, + memory_header_length=mem_total_length + + mem_used_length + + mem_free_length + + mem_alloc_length + + mem_prov_length + + 4, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + node_header="Nodes " + + "".join( + [ + "-" + for _ in range( + 6, node_name_length + pvc_version_length + health_length + 1 + ) + ] + ), + state_header="States " + + "".join( + [ + "-" + for _ in range( + 7, + daemon_state_length + + coordinator_state_length + + domain_state_length + + 1, + ) + ] + ), + resource_header="Resources " + + "".join( + [ + "-" + for _ in range( + 10, domains_count_length + cpu_count_length + load_length + 1 + ) + ] + ), + memory_header="Memory (M) " + + "".join( + [ + "-" + for _ in range( + 11, + mem_total_length + + mem_used_length + + mem_free_length + + mem_alloc_length + + mem_prov_length + + 3, + ) + ] + ), + ) + ) + + node_list_output.append( + "{bold}{node_name: <{node_name_length}} {node_pvc_version: <{pvc_version_length}} {node_health: <{health_length}} \ +{daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \ +{node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \ +{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {node_mem_allocated: <{mem_alloc_length}} {node_mem_provisioned: <{mem_prov_length}}{end_bold}".format( + node_name_length=node_name_length, + pvc_version_length=pvc_version_length, + health_length=health_length, + daemon_state_length=daemon_state_length, + coordinator_state_length=coordinator_state_length, + domain_state_length=domain_state_length, + domains_count_length=domains_count_length, + cpu_count_length=cpu_count_length, + load_length=load_length, + mem_total_length=mem_total_length, + mem_used_length=mem_used_length, + mem_free_length=mem_free_length, + mem_alloc_length=mem_alloc_length, + mem_prov_length=mem_prov_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + daemon_state_colour="", + coordinator_state_colour="", + domain_state_colour="", + end_colour="", + node_name="Name", + node_pvc_version="Version", + node_health="Health", + node_daemon_state="Daemon", + node_coordinator_state="Coordinator", + node_domain_state="Domain", + node_domains_count="VMs", + node_cpu_count="vCPUs", + node_load="Load", + node_mem_total="Total", + node_mem_used="Used", + node_mem_free="Free", + node_mem_allocated="Alloc", + node_mem_provisioned="Prov", + ) + ) + + # Format the string (elements) + for node_information in sorted(node_list, key=lambda n: n["name"]): + ( + health_colour, + daemon_state_colour, + coordinator_state_colour, + domain_state_colour, + mem_allocated_colour, + mem_provisioned_colour, + ) = getOutputColours(node_information) + + node_health = node_information.get("health", "N/A") + if isinstance(node_health, int): + node_health_text = f"{node_health}%" + else: + node_health_text = node_health + + node_list_output.append( + "{bold}{node_name: <{node_name_length}} {node_pvc_version: <{pvc_version_length}} {health_colour}{node_health: <{health_length}}{end_colour} \ +{daemon_state_colour}{node_daemon_state: <{daemon_state_length}}{end_colour} {coordinator_state_colour}{node_coordinator_state: <{coordinator_state_length}}{end_colour} {domain_state_colour}{node_domain_state: <{domain_state_length}}{end_colour} \ +{node_domains_count: <{domains_count_length}} {node_cpu_count: <{cpu_count_length}} {node_load: <{load_length}} \ +{node_mem_total: <{mem_total_length}} {node_mem_used: <{mem_used_length}} {node_mem_free: <{mem_free_length}} {mem_allocated_colour}{node_mem_allocated: <{mem_alloc_length}}{end_colour} {mem_provisioned_colour}{node_mem_provisioned: <{mem_prov_length}}{end_colour}{end_bold}".format( + node_name_length=node_name_length, + pvc_version_length=pvc_version_length, + health_length=health_length, + daemon_state_length=daemon_state_length, + coordinator_state_length=coordinator_state_length, + domain_state_length=domain_state_length, + domains_count_length=domains_count_length, + cpu_count_length=cpu_count_length, + load_length=load_length, + mem_total_length=mem_total_length, + mem_used_length=mem_used_length, + mem_free_length=mem_free_length, + mem_alloc_length=mem_alloc_length, + mem_prov_length=mem_prov_length, + bold="", + end_bold="", + health_colour=health_colour, + daemon_state_colour=daemon_state_colour, + coordinator_state_colour=coordinator_state_colour, + domain_state_colour=domain_state_colour, + mem_allocated_colour=mem_allocated_colour, + mem_provisioned_colour=mem_allocated_colour, + end_colour=ansiprint.end(), + node_name=node_information["name"], + node_pvc_version=node_information.get("pvc_version", "N/A"), + node_health=node_health_text, + node_daemon_state=node_information["daemon_state"], + node_coordinator_state=node_information["coordinator_state"], + node_domain_state=node_information["domain_state"], + node_domains_count=node_information["domains_count"], + node_cpu_count=node_information["vcpu"]["allocated"], + node_load=node_information["load"], + node_mem_total=node_information["memory"]["total"], + node_mem_used=node_information["memory"]["used"], + node_mem_free=node_information["memory"]["free"], + node_mem_allocated=node_information["memory"]["allocated"], + node_mem_provisioned=node_information["memory"]["provisioned"], + ) + ) + + return "\n".join(node_list_output) diff --git a/cli-client-new/pvc/lib/provisioner.py b/cli-client-new/pvc/lib/provisioner.py new file mode 100644 index 00000000..f533621d --- /dev/null +++ b/cli-client-new/pvc/lib/provisioner.py @@ -0,0 +1,2006 @@ +#!/usr/bin/env python3 + +# provisioner.py - PVC CLI client function library, Provisioner functions +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2022 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +from requests_toolbelt.multipart.encoder import ( + MultipartEncoder, + MultipartEncoderMonitor, +) + +import pvc.lib.ansiprint as ansiprint +from pvc.lib.common import UploadProgressBar, call_api +from ast import literal_eval + + +# +# Primary functions +# +def template_info(config, template, template_type): + """ + Get information about template + + API endpoint: GET /api/v1/provisioner/template/{template_type}/{template} + API arguments: + API schema: {json_template_object} + """ + response = call_api( + config, + "get", + "/provisioner/template/{template_type}/{template}".format( + template_type=template_type, template=template + ), + ) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "Template not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def template_list(config, limit, template_type=None): + """ + Get list information about templates (limited by {limit}) + + API endpoint: GET /api/v1/provisioner/template/{template_type} + API arguments: limit={limit} + API schema: [{json_template_object},{json_template_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + + if template_type is not None: + response = call_api( + config, + "get", + "/provisioner/template/{template_type}".format(template_type=template_type), + params=params, + ) + else: + response = call_api(config, "get", "/provisioner/template", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def template_add(config, params, template_type=None): + """ + Add a new template of {template_type} with {params} + + API endpoint: POST /api/v1/provisioner/template/{template_type} + API_arguments: args + API schema: {message} + """ + response = call_api( + config, + "post", + "/provisioner/template/{template_type}".format(template_type=template_type), + params=params, + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def template_modify(config, params, name, template_type): + """ + Modify an existing template of {template_type} with {params} + + API endpoint: PUT /api/v1/provisioner/template/{template_type}/{name} + API_arguments: args + API schema: {message} + """ + response = call_api( + config, + "put", + "/provisioner/template/{template_type}/{name}".format( + template_type=template_type, name=name + ), + params=params, + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def template_remove(config, name, template_type): + """ + Remove template {name} of {template_type} + + API endpoint: DELETE /api/v1/provisioner/template/{template_type}/{name} + API_arguments: + API schema: {message} + """ + response = call_api( + config, + "delete", + "/provisioner/template/{template_type}/{name}".format( + template_type=template_type, name=name + ), + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def template_element_add( + config, name, element_id, params, element_type=None, template_type=None +): + """ + Add a new template element of {element_type} with {params} to template {name} of {template_type} + + API endpoint: POST /api/v1/provisioner/template/{template_type}/{name}/{element_type}/{element_id} + API_arguments: args + API schema: {message} + """ + response = call_api( + config, + "post", + "/provisioner/template/{template_type}/{name}/{element_type}/{element_id}".format( + template_type=template_type, + name=name, + element_type=element_type, + element_id=element_id, + ), + params=params, + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def template_element_remove( + config, name, element_id, element_type=None, template_type=None +): + """ + Remove template element {element_id} of {element_type} from template {name} of {template_type} + + API endpoint: DELETE /api/v1/provisioner/template/{template_type}/{name}/{element_type}/{element_id} + API_arguments: + API schema: {message} + """ + response = call_api( + config, + "delete", + "/provisioner/template/{template_type}/{name}/{element_type}/{element_id}".format( + template_type=template_type, + name=name, + element_type=element_type, + element_id=element_id, + ), + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def userdata_info(config, userdata): + """ + Get information about userdata + + API endpoint: GET /api/v1/provisioner/userdata/{userdata} + API arguments: + API schema: {json_data_object} + """ + response = call_api( + config, "get", "/provisioner/userdata/{userdata}".format(userdata=userdata) + ) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "Userdata not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def userdata_list(config, limit): + """ + Get list information about userdatas (limited by {limit}) + + API endpoint: GET /api/v1/provisioner/userdata + API arguments: limit={limit} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + + response = call_api(config, "get", "/provisioner/userdata", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def userdata_show(config, name): + """ + Get information about userdata name + + API endpoint: GET /api/v1/provisioner/userdata/{name} + API arguments: + API schema: [{json_data_object},{json_data_object},etc.] + """ + response = call_api(config, "get", "/provisioner/userdata/{}".format(name)) + + if response.status_code == 200: + return True, response.json()[0]["userdata"] + else: + return False, response.json().get("message", "") + + +def userdata_add(config, params): + """ + Add a new userdata with {params} + + API endpoint: POST /api/v1/provisioner/userdata + API_arguments: args + API schema: {message} + """ + name = params.get("name") + userdata_data = params.get("data") + + params = {"name": name} + data = {"data": userdata_data} + response = call_api( + config, "post", "/provisioner/userdata", params=params, data=data + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def userdata_modify(config, name, params): + """ + Modify userdata {name} with {params} + + API endpoint: PUT /api/v1/provisioner/userdata/{name} + API_arguments: args + API schema: {message} + """ + userdata_data = params.get("data") + + params = {"name": name} + data = {"data": userdata_data} + response = call_api( + config, + "put", + "/provisioner/userdata/{name}".format(name=name), + params=params, + data=data, + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def userdata_remove(config, name): + """ + Remove userdata {name} + + API endpoint: DELETE /api/v1/provisioner/userdata/{name} + API_arguments: + API schema: {message} + """ + response = call_api( + config, "delete", "/provisioner/userdata/{name}".format(name=name) + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def script_info(config, script): + """ + Get information about script + + API endpoint: GET /api/v1/provisioner/script/{script} + API arguments: + API schema: {json_data_object} + """ + response = call_api( + config, "get", "/provisioner/script/{script}".format(script=script) + ) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "Script not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def script_list(config, limit): + """ + Get list information about scripts (limited by {limit}) + + API endpoint: GET /api/v1/provisioner/script + API arguments: limit={limit} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + + response = call_api(config, "get", "/provisioner/script", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def script_show(config, name): + """ + Get information about script name + + API endpoint: GET /api/v1/provisioner/script/{name} + API arguments: + API schema: [{json_data_object},{json_data_object},etc.] + """ + response = call_api(config, "get", "/provisioner/script/{}".format(name)) + + if response.status_code == 200: + return True, response.json()[0]["script"] + else: + return False, response.json().get("message", "") + + +def script_add(config, params): + """ + Add a new script with {params} + + API endpoint: POST /api/v1/provisioner/script + API_arguments: args + API schema: {message} + """ + name = params.get("name") + script_data = params.get("data") + + params = {"name": name} + data = {"data": script_data} + response = call_api(config, "post", "/provisioner/script", params=params, data=data) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def script_modify(config, name, params): + """ + Modify script {name} with {params} + + API endpoint: PUT /api/v1/provisioner/script/{name} + API_arguments: args + API schema: {message} + """ + script_data = params.get("data") + + params = {"name": name} + data = {"data": script_data} + response = call_api( + config, + "put", + "/provisioner/script/{name}".format(name=name), + params=params, + data=data, + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def script_remove(config, name): + """ + Remove script {name} + + API endpoint: DELETE /api/v1/provisioner/script/{name} + API_arguments: + API schema: {message} + """ + response = call_api( + config, "delete", "/provisioner/script/{name}".format(name=name) + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def ova_info(config, name): + """ + Get information about OVA image {name} + + API endpoint: GET /api/v1/provisioner/ova/{name} + API arguments: + API schema: {json_data_object} + """ + response = call_api(config, "get", "/provisioner/ova/{name}".format(name=name)) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "OVA not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ova_list(config, limit): + """ + Get list information about OVA images (limited by {limit}) + + API endpoint: GET /api/v1/provisioner/ova + API arguments: limit={limit} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + + response = call_api(config, "get", "/provisioner/ova", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def ova_upload(config, name, ova_file, params): + """ + Upload an OVA image to the cluster + + API endpoint: POST /api/v1/provisioner/ova/{name} + API arguments: pool={pool}, ova_size={ova_size} + API schema: {"message":"{data}"} + """ + import click + + bar = UploadProgressBar( + ova_file, end_message="Parsing file on remote side...", end_nl=False + ) + upload_data = MultipartEncoder( + fields={"file": ("filename", open(ova_file, "rb"), "application/octet-stream")} + ) + upload_monitor = MultipartEncoderMonitor(upload_data, bar.update) + + headers = {"Content-Type": upload_monitor.content_type} + + response = call_api( + config, + "post", + "/provisioner/ova/{}".format(name), + headers=headers, + params=params, + data=upload_monitor, + ) + + click.echo("done.") + click.echo() + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def ova_remove(config, name): + """ + Remove OVA image {name} + + API endpoint: DELETE /api/v1/provisioner/ova/{name} + API_arguments: + API schema: {message} + """ + response = call_api(config, "delete", "/provisioner/ova/{name}".format(name=name)) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def profile_info(config, profile): + """ + Get information about profile + + API endpoint: GET /api/v1/provisioner/profile/{profile} + API arguments: + API schema: {json_data_object} + """ + response = call_api( + config, "get", "/provisioner/profile/{profile}".format(profile=profile) + ) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "Profile not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def profile_list(config, limit): + """ + Get list information about profiles (limited by {limit}) + + API endpoint: GET /api/v1/provisioner/profile/{profile_type} + API arguments: limit={limit} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + + response = call_api(config, "get", "/provisioner/profile", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def profile_add(config, params): + """ + Add a new profile with {params} + + API endpoint: POST /api/v1/provisioner/profile + API_arguments: args + API schema: {message} + """ + response = call_api(config, "post", "/provisioner/profile", params=params) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def profile_modify(config, name, params): + """ + Modify profile {name} with {params} + + API endpoint: PUT /api/v1/provisioner/profile/{name} + API_arguments: args + API schema: {message} + """ + response = call_api( + config, "put", "/provisioner/profile/{name}".format(name=name), params=params + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def profile_remove(config, name): + """ + Remove profile {name} + + API endpoint: DELETE /api/v1/provisioner/profile/{name} + API_arguments: + API schema: {message} + """ + response = call_api( + config, "delete", "/provisioner/profile/{name}".format(name=name) + ) + + if response.status_code == 200: + retvalue = True + else: + retvalue = False + + return retvalue, response.json().get("message", "") + + +def vm_create(config, name, profile, wait_flag, define_flag, start_flag, script_args): + """ + Create a new VM named {name} with profile {profile} + + API endpoint: POST /api/v1/provisioner/create + API_arguments: name={name}, profile={profile}, arg={script_args} + API schema: {message} + """ + params = { + "name": name, + "profile": profile, + "start_vm": start_flag, + "define_vm": define_flag, + "arg": script_args, + } + response = call_api(config, "post", "/provisioner/create", params=params) + + if response.status_code == 202: + retvalue = True + if not wait_flag: + retdata = "Task ID: {}".format(response.json()["task_id"]) + else: + # Just return the task_id raw, instead of formatting it + retdata = response.json()["task_id"] + else: + retvalue = False + retdata = response.json().get("message", "") + + return retvalue, retdata + + +def task_status(config, task_id=None, is_watching=False): + """ + Get information about provisioner job {task_id} or all tasks if None + + API endpoint: GET /api/v1/provisioner/status + API arguments: + API schema: {json_data_object} + """ + if task_id is not None: + response = call_api( + config, "get", "/provisioner/status/{task_id}".format(task_id=task_id) + ) + else: + response = call_api(config, "get", "/provisioner/status") + + if task_id is not None: + if response.status_code == 200: + retvalue = True + respjson = response.json() + + if is_watching: + # Just return the raw JSON to the watching process instead of formatting it + return respjson + + job_state = respjson["state"] + if job_state == "RUNNING": + retdata = "Job state: RUNNING\nStage: {}/{}\nStatus: {}".format( + respjson["current"], respjson["total"], respjson["status"] + ) + elif job_state == "FAILED": + retdata = "Job state: FAILED\nStatus: {}".format(respjson["status"]) + elif job_state == "COMPLETED": + retdata = "Job state: COMPLETED\nStatus: {}".format(respjson["status"]) + else: + retdata = "Job state: {}\nStatus: {}".format( + respjson["state"], respjson["status"] + ) + else: + retvalue = False + retdata = response.json().get("message", "") + else: + retvalue = True + task_data_raw = response.json() + # Format the Celery data into a more useful data structure + task_data = list() + for task_type in ["active", "reserved", "scheduled"]: + try: + type_data = task_data_raw[task_type] + except Exception: + type_data = None + + if not type_data: + type_data = dict() + for task_host in type_data: + for task_job in task_data_raw[task_type][task_host]: + task = dict() + if task_type == "reserved": + task["type"] = "pending" + else: + task["type"] = task_type + task["worker"] = task_host + task["id"] = task_job.get("id") + try: + task_args = literal_eval(task_job.get("args")) + except Exception: + task_args = task_job.get("args") + task["vm_name"] = task_args[0] + task["vm_profile"] = task_args[1] + try: + task_kwargs = literal_eval(task_job.get("kwargs")) + except Exception: + task_kwargs = task_job.get("kwargs") + task["vm_define"] = str(bool(task_kwargs["define_vm"])) + task["vm_start"] = str(bool(task_kwargs["start_vm"])) + task_data.append(task) + retdata = task_data + + return retvalue, retdata + + +# +# Format functions +# +def format_list_template(template_data, template_type=None): + """ + Format the returned template template + + template_type can be used to only display part of the full list, allowing function + reuse with more limited output options. + """ + template_types = ["system", "network", "storage"] + normalized_template_data = dict() + ainformation = list() + + if template_type in template_types: + template_types = [template_type] + template_data_type = "{}_templates".format(template_type) + normalized_template_data[template_data_type] = template_data + else: + normalized_template_data = template_data + + if "system" in template_types: + ainformation.append( + format_list_template_system(normalized_template_data["system_templates"]) + ) + if len(template_types) > 1: + ainformation.append("") + + if "network" in template_types: + ainformation.append( + format_list_template_network(normalized_template_data["network_templates"]) + ) + if len(template_types) > 1: + ainformation.append("") + + if "storage" in template_types: + ainformation.append( + format_list_template_storage(normalized_template_data["storage_templates"]) + ) + + return "\n".join(ainformation) + + +def format_list_template_system(template_data): + if isinstance(template_data, dict): + template_data = [template_data] + + template_list_output = [] + + # Determine optimal column widths + template_name_length = 15 + template_id_length = 5 + template_vcpu_length = 6 + template_vram_length = 9 + template_serial_length = 7 + template_vnc_length = 4 + template_vnc_bind_length = 9 + template_node_limit_length = 6 + template_node_selector_length = 9 + template_node_autostart_length = 10 + template_migration_method_length = 10 + + for template in template_data: + # template_name column + _template_name_length = len(str(template["name"])) + 1 + if _template_name_length > template_name_length: + template_name_length = _template_name_length + # template_id column + _template_id_length = len(str(template["id"])) + 1 + if _template_id_length > template_id_length: + template_id_length = _template_id_length + # template_vcpu column + _template_vcpu_length = len(str(template["vcpu_count"])) + 1 + if _template_vcpu_length > template_vcpu_length: + template_vcpu_length = _template_vcpu_length + # template_vram column + _template_vram_length = len(str(template["vram_mb"])) + 1 + if _template_vram_length > template_vram_length: + template_vram_length = _template_vram_length + # template_serial column + _template_serial_length = len(str(template["serial"])) + 1 + if _template_serial_length > template_serial_length: + template_serial_length = _template_serial_length + # template_vnc column + _template_vnc_length = len(str(template["vnc"])) + 1 + if _template_vnc_length > template_vnc_length: + template_vnc_length = _template_vnc_length + # template_vnc_bind column + _template_vnc_bind_length = len(str(template["vnc_bind"])) + 1 + if _template_vnc_bind_length > template_vnc_bind_length: + template_vnc_bind_length = _template_vnc_bind_length + # template_node_limit column + _template_node_limit_length = len(str(template["node_limit"])) + 1 + if _template_node_limit_length > template_node_limit_length: + template_node_limit_length = _template_node_limit_length + # template_node_selector column + _template_node_selector_length = len(str(template["node_selector"])) + 1 + if _template_node_selector_length > template_node_selector_length: + template_node_selector_length = _template_node_selector_length + # template_node_autostart column + _template_node_autostart_length = len(str(template["node_autostart"])) + 1 + if _template_node_autostart_length > template_node_autostart_length: + template_node_autostart_length = _template_node_autostart_length + # template_migration_method column + _template_migration_method_length = len(str(template["migration_method"])) + 1 + if _template_migration_method_length > template_migration_method_length: + template_migration_method_length = _template_migration_method_length + + # Format the string (header) + template_list_output.append( + "{bold}{template_header: <{template_header_length}} {resources_header: <{resources_header_length}} {consoles_header: <{consoles_header_length}} {metadata_header: <{metadata_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + template_header_length=template_name_length + template_id_length + 1, + resources_header_length=template_vcpu_length + template_vram_length + 1, + consoles_header_length=template_serial_length + + template_vnc_length + + template_vnc_bind_length + + 2, + metadata_header_length=template_node_limit_length + + template_node_selector_length + + template_node_autostart_length + + template_migration_method_length + + 3, + template_header="System Templates " + + "".join( + ["-" for _ in range(17, template_name_length + template_id_length)] + ), + resources_header="Resources " + + "".join( + ["-" for _ in range(10, template_vcpu_length + template_vram_length)] + ), + consoles_header="Consoles " + + "".join( + [ + "-" + for _ in range( + 9, + template_serial_length + + template_vnc_length + + template_vnc_bind_length + + 1, + ) + ] + ), + metadata_header="Metadata " + + "".join( + [ + "-" + for _ in range( + 9, + template_node_limit_length + + template_node_selector_length + + template_node_autostart_length + + template_migration_method_length + + 2, + ) + ] + ), + ) + ) + + template_list_output.append( + "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ +{template_vcpu: <{template_vcpu_length}} \ +{template_vram: <{template_vram_length}} \ +{template_serial: <{template_serial_length}} \ +{template_vnc: <{template_vnc_length}} \ +{template_vnc_bind: <{template_vnc_bind_length}} \ +{template_node_limit: <{template_node_limit_length}} \ +{template_node_selector: <{template_node_selector_length}} \ +{template_node_autostart: <{template_node_autostart_length}} \ +{template_migration_method: <{template_migration_method_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + template_name_length=template_name_length, + template_id_length=template_id_length, + template_vcpu_length=template_vcpu_length, + template_vram_length=template_vram_length, + template_serial_length=template_serial_length, + template_vnc_length=template_vnc_length, + template_vnc_bind_length=template_vnc_bind_length, + template_node_limit_length=template_node_limit_length, + template_node_selector_length=template_node_selector_length, + template_node_autostart_length=template_node_autostart_length, + template_migration_method_length=template_migration_method_length, + template_name="Name", + template_id="ID", + template_vcpu="vCPUs", + template_vram="vRAM [M]", + template_serial="Serial", + template_vnc="VNC", + template_vnc_bind="VNC bind", + template_node_limit="Limit", + template_node_selector="Selector", + template_node_autostart="Autostart", + template_migration_method="Migration", + ) + ) + + # Format the string (elements) + for template in sorted(template_data, key=lambda i: i.get("name", None)): + template_list_output.append( + "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ +{template_vcpu: <{template_vcpu_length}} \ +{template_vram: <{template_vram_length}} \ +{template_serial: <{template_serial_length}} \ +{template_vnc: <{template_vnc_length}} \ +{template_vnc_bind: <{template_vnc_bind_length}} \ +{template_node_limit: <{template_node_limit_length}} \ +{template_node_selector: <{template_node_selector_length}} \ +{template_node_autostart: <{template_node_autostart_length}} \ +{template_migration_method: <{template_migration_method_length}}{end_bold}".format( + template_name_length=template_name_length, + template_id_length=template_id_length, + template_vcpu_length=template_vcpu_length, + template_vram_length=template_vram_length, + template_serial_length=template_serial_length, + template_vnc_length=template_vnc_length, + template_vnc_bind_length=template_vnc_bind_length, + template_node_limit_length=template_node_limit_length, + template_node_selector_length=template_node_selector_length, + template_node_autostart_length=template_node_autostart_length, + template_migration_method_length=template_migration_method_length, + bold="", + end_bold="", + template_name=str(template["name"]), + template_id=str(template["id"]), + template_vcpu=str(template["vcpu_count"]), + template_vram=str(template["vram_mb"]), + template_serial=str(template["serial"]), + template_vnc=str(template["vnc"]), + template_vnc_bind=str(template["vnc_bind"]), + template_node_limit=str(template["node_limit"]), + template_node_selector=str(template["node_selector"]), + template_node_autostart=str(template["node_autostart"]), + template_migration_method=str(template["migration_method"]), + ) + ) + + return "\n".join(template_list_output) + + +def format_list_template_network(template_template): + if isinstance(template_template, dict): + template_template = [template_template] + + template_list_output = [] + + # Determine optimal column widths + template_name_length = 18 + template_id_length = 5 + template_mac_template_length = 13 + template_networks_length = 10 + + for template in template_template: + # Join the networks elements into a single list of VNIs + network_list = list() + for network in template["networks"]: + network_list.append(str(network["vni"])) + template["networks_csv"] = ",".join(network_list) + + for template in template_template: + # template_name column + _template_name_length = len(str(template["name"])) + 1 + if _template_name_length > template_name_length: + template_name_length = _template_name_length + # template_id column + _template_id_length = len(str(template["id"])) + 1 + if _template_id_length > template_id_length: + template_id_length = _template_id_length + # template_mac_template column + _template_mac_template_length = len(str(template["mac_template"])) + 1 + if _template_mac_template_length > template_mac_template_length: + template_mac_template_length = _template_mac_template_length + # template_networks column + _template_networks_length = len(str(template["networks_csv"])) + 1 + if _template_networks_length > template_networks_length: + template_networks_length = _template_networks_length + + # Format the string (header) + template_list_output.append( + "{bold}{template_header: <{template_header_length}} {details_header: <{details_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + template_header_length=template_name_length + template_id_length + 1, + details_header_length=template_mac_template_length + + template_networks_length + + 1, + template_header="Network Templates " + + "".join( + ["-" for _ in range(18, template_name_length + template_id_length)] + ), + details_header="Details " + + "".join( + [ + "-" + for _ in range( + 8, template_mac_template_length + template_networks_length + ) + ] + ), + ) + ) + + template_list_output.append( + "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ +{template_mac_template: <{template_mac_template_length}} \ +{template_networks: <{template_networks_length}}{end_bold}".format( + template_name_length=template_name_length, + template_id_length=template_id_length, + template_mac_template_length=template_mac_template_length, + template_networks_length=template_networks_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + template_name="Name", + template_id="ID", + template_mac_template="MAC template", + template_networks="Network VNIs", + ) + ) + + # Format the string (elements) + for template in sorted(template_template, key=lambda i: i.get("name", None)): + template_list_output.append( + "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ +{template_mac_template: <{template_mac_template_length}} \ +{template_networks: <{template_networks_length}}{end_bold}".format( + template_name_length=template_name_length, + template_id_length=template_id_length, + template_mac_template_length=template_mac_template_length, + template_networks_length=template_networks_length, + bold="", + end_bold="", + template_name=str(template["name"]), + template_id=str(template["id"]), + template_mac_template=str(template["mac_template"]), + template_networks=str(template["networks_csv"]), + ) + ) + + return "\n".join(template_list_output) + + +def format_list_template_storage(template_template): + if isinstance(template_template, dict): + template_template = [template_template] + + template_list_output = [] + + # Determine optimal column widths + template_name_length = 18 + template_id_length = 5 + template_disk_id_length = 8 + template_disk_pool_length = 5 + template_disk_source_length = 14 + template_disk_size_length = 9 + template_disk_filesystem_length = 11 + template_disk_fsargs_length = 10 + template_disk_mountpoint_length = 10 + + for template in template_template: + # template_name column + _template_name_length = len(str(template["name"])) + 1 + if _template_name_length > template_name_length: + template_name_length = _template_name_length + # template_id column + _template_id_length = len(str(template["id"])) + 1 + if _template_id_length > template_id_length: + template_id_length = _template_id_length + + for disk in template["disks"]: + # template_disk_id column + _template_disk_id_length = len(str(disk["disk_id"])) + 1 + if _template_disk_id_length > template_disk_id_length: + template_disk_id_length = _template_disk_id_length + # template_disk_pool column + _template_disk_pool_length = len(str(disk["pool"])) + 1 + if _template_disk_pool_length > template_disk_pool_length: + template_disk_pool_length = _template_disk_pool_length + # template_disk_source column + _template_disk_source_length = len(str(disk["source_volume"])) + 1 + if _template_disk_source_length > template_disk_source_length: + template_disk_source_length = _template_disk_source_length + # template_disk_size column + _template_disk_size_length = len(str(disk["disk_size_gb"])) + 1 + if _template_disk_size_length > template_disk_size_length: + template_disk_size_length = _template_disk_size_length + # template_disk_filesystem column + _template_disk_filesystem_length = len(str(disk["filesystem"])) + 1 + if _template_disk_filesystem_length > template_disk_filesystem_length: + template_disk_filesystem_length = _template_disk_filesystem_length + # template_disk_fsargs column + _template_disk_fsargs_length = len(str(disk["filesystem_args"])) + 1 + if _template_disk_fsargs_length > template_disk_fsargs_length: + template_disk_fsargs_length = _template_disk_fsargs_length + # template_disk_mountpoint column + _template_disk_mountpoint_length = len(str(disk["mountpoint"])) + 1 + if _template_disk_mountpoint_length > template_disk_mountpoint_length: + template_disk_mountpoint_length = _template_disk_mountpoint_length + + # Format the string (header) + template_list_output.append( + "{bold}{template_header: <{template_header_length}} {details_header: <{details_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + template_header_length=template_name_length + template_id_length + 1, + details_header_length=template_disk_id_length + + template_disk_pool_length + + template_disk_source_length + + template_disk_size_length + + template_disk_filesystem_length + + template_disk_fsargs_length + + template_disk_mountpoint_length + + 7, + template_header="Storage Templates " + + "".join( + ["-" for _ in range(18, template_name_length + template_id_length)] + ), + details_header="Details " + + "".join( + [ + "-" + for _ in range( + 8, + template_disk_id_length + + template_disk_pool_length + + template_disk_source_length + + template_disk_size_length + + template_disk_filesystem_length + + template_disk_fsargs_length + + template_disk_mountpoint_length + + 6, + ) + ] + ), + ) + ) + + template_list_output.append( + "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ +{template_disk_id: <{template_disk_id_length}} \ +{template_disk_pool: <{template_disk_pool_length}} \ +{template_disk_source: <{template_disk_source_length}} \ +{template_disk_size: <{template_disk_size_length}} \ +{template_disk_filesystem: <{template_disk_filesystem_length}} \ +{template_disk_fsargs: <{template_disk_fsargs_length}} \ +{template_disk_mountpoint: <{template_disk_mountpoint_length}}{end_bold}".format( + template_name_length=template_name_length, + template_id_length=template_id_length, + template_disk_id_length=template_disk_id_length, + template_disk_pool_length=template_disk_pool_length, + template_disk_source_length=template_disk_source_length, + template_disk_size_length=template_disk_size_length, + template_disk_filesystem_length=template_disk_filesystem_length, + template_disk_fsargs_length=template_disk_fsargs_length, + template_disk_mountpoint_length=template_disk_mountpoint_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + template_name="Name", + template_id="ID", + template_disk_id="Disk ID", + template_disk_pool="Pool", + template_disk_source="Source Volume", + template_disk_size="Size [G]", + template_disk_filesystem="Filesystem", + template_disk_fsargs="Arguments", + template_disk_mountpoint="Mountpoint", + ) + ) + + # Format the string (elements) + for template in sorted(template_template, key=lambda i: i.get("name", None)): + template_list_output.append( + "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}}{end_bold}".format( + template_name_length=template_name_length, + template_id_length=template_id_length, + bold="", + end_bold="", + template_name=str(template["name"]), + template_id=str(template["id"]), + ) + ) + for disk in sorted(template["disks"], key=lambda i: i.get("disk_id", None)): + template_list_output.append( + "{bold}{template_name: <{template_name_length}} {template_id: <{template_id_length}} \ +{template_disk_id: <{template_disk_id_length}} \ +{template_disk_pool: <{template_disk_pool_length}} \ +{template_disk_source: <{template_disk_source_length}} \ +{template_disk_size: <{template_disk_size_length}} \ +{template_disk_filesystem: <{template_disk_filesystem_length}} \ +{template_disk_fsargs: <{template_disk_fsargs_length}} \ +{template_disk_mountpoint: <{template_disk_mountpoint_length}}{end_bold}".format( + template_name_length=template_name_length, + template_id_length=template_id_length, + template_disk_id_length=template_disk_id_length, + template_disk_pool_length=template_disk_pool_length, + template_disk_source_length=template_disk_source_length, + template_disk_size_length=template_disk_size_length, + template_disk_filesystem_length=template_disk_filesystem_length, + template_disk_fsargs_length=template_disk_fsargs_length, + template_disk_mountpoint_length=template_disk_mountpoint_length, + bold="", + end_bold="", + template_name="", + template_id="", + template_disk_id=str(disk["disk_id"]), + template_disk_pool=str(disk["pool"]), + template_disk_source=str(disk["source_volume"]), + template_disk_size=str(disk["disk_size_gb"]), + template_disk_filesystem=str(disk["filesystem"]), + template_disk_fsargs=str(disk["filesystem_args"]), + template_disk_mountpoint=str(disk["mountpoint"]), + ) + ) + + return "\n".join(template_list_output) + + +def format_list_userdata(userdata_data, lines=None): + if isinstance(userdata_data, dict): + userdata_data = [userdata_data] + + userdata_list_output = [] + + # Determine optimal column widths + userdata_name_length = 12 + userdata_id_length = 5 + userdata_document_length = 92 - userdata_name_length - userdata_id_length + + for userdata in userdata_data: + # userdata_name column + _userdata_name_length = len(str(userdata["name"])) + 1 + if _userdata_name_length > userdata_name_length: + userdata_name_length = _userdata_name_length + # userdata_id column + _userdata_id_length = len(str(userdata["id"])) + 1 + if _userdata_id_length > userdata_id_length: + userdata_id_length = _userdata_id_length + + # Format the string (header) + userdata_list_output.append( + "{bold}{userdata_header: <{userdata_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + userdata_header_length=userdata_name_length + + userdata_id_length + + userdata_document_length + + 2, + userdata_header="Userdata " + + "".join( + [ + "-" + for _ in range( + 9, + userdata_name_length + + userdata_id_length + + userdata_document_length + + 1, + ) + ] + ), + ) + ) + + userdata_list_output.append( + "{bold}{userdata_name: <{userdata_name_length}} {userdata_id: <{userdata_id_length}} \ +{userdata_data}{end_bold}".format( + userdata_name_length=userdata_name_length, + userdata_id_length=userdata_id_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + userdata_name="Name", + userdata_id="ID", + userdata_data="Document", + ) + ) + + # Format the string (elements) + for data in sorted(userdata_data, key=lambda i: i.get("name", None)): + line_count = 0 + for line in data["userdata"].split("\n"): + if line_count < 1: + userdata_name = data["name"] + userdata_id = data["id"] + else: + userdata_name = "" + userdata_id = "" + line_count += 1 + + if lines and line_count > lines: + userdata_list_output.append( + "{bold}{userdata_name: <{userdata_name_length}} {userdata_id: <{userdata_id_length}} \ +{userdata_data}{end_bold}".format( + userdata_name_length=userdata_name_length, + userdata_id_length=userdata_id_length, + bold="", + end_bold="", + userdata_name=userdata_name, + userdata_id=userdata_id, + userdata_data="[...]", + ) + ) + break + + userdata_list_output.append( + "{bold}{userdata_name: <{userdata_name_length}} {userdata_id: <{userdata_id_length}} \ +{userdata_data}{end_bold}".format( + userdata_name_length=userdata_name_length, + userdata_id_length=userdata_id_length, + bold="", + end_bold="", + userdata_name=userdata_name, + userdata_id=userdata_id, + userdata_data=str(line), + ) + ) + + return "\n".join(userdata_list_output) + + +def format_list_script(script_data, lines=None): + if isinstance(script_data, dict): + script_data = [script_data] + + script_list_output = [] + + # Determine optimal column widths + script_name_length = 12 + script_id_length = 5 + script_data_length = 92 - script_name_length - script_id_length + + for script in script_data: + # script_name column + _script_name_length = len(str(script["name"])) + 1 + if _script_name_length > script_name_length: + script_name_length = _script_name_length + # script_id column + _script_id_length = len(str(script["id"])) + 1 + if _script_id_length > script_id_length: + script_id_length = _script_id_length + + # Format the string (header) + script_list_output.append( + "{bold}{script_header: <{script_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + script_header_length=script_name_length + + script_id_length + + script_data_length + + 2, + script_header="Script " + + "".join( + [ + "-" + for _ in range( + 7, + script_name_length + script_id_length + script_data_length + 1, + ) + ] + ), + ) + ) + + script_list_output.append( + "{bold}{script_name: <{script_name_length}} {script_id: <{script_id_length}} \ +{script_data}{end_bold}".format( + script_name_length=script_name_length, + script_id_length=script_id_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + script_name="Name", + script_id="ID", + script_data="Script", + ) + ) + + # Format the string (elements) + for script in sorted(script_data, key=lambda i: i.get("name", None)): + line_count = 0 + for line in script["script"].split("\n"): + if line_count < 1: + script_name = script["name"] + script_id = script["id"] + else: + script_name = "" + script_id = "" + line_count += 1 + + if lines and line_count > lines: + script_list_output.append( + "{bold}{script_name: <{script_name_length}} {script_id: <{script_id_length}} \ +{script_data}{end_bold}".format( + script_name_length=script_name_length, + script_id_length=script_id_length, + bold="", + end_bold="", + script_name=script_name, + script_id=script_id, + script_data="[...]", + ) + ) + break + + script_list_output.append( + "{bold}{script_name: <{script_name_length}} {script_id: <{script_id_length}} \ +{script_data}{end_bold}".format( + script_name_length=script_name_length, + script_id_length=script_id_length, + bold="", + end_bold="", + script_name=script_name, + script_id=script_id, + script_data=str(line), + ) + ) + + return "\n".join(script_list_output) + + +def format_list_ova(ova_data): + if isinstance(ova_data, dict): + ova_data = [ova_data] + + ova_list_output = [] + + # Determine optimal column widths + ova_name_length = 18 + ova_id_length = 5 + ova_disk_id_length = 8 + ova_disk_size_length = 10 + ova_disk_pool_length = 5 + ova_disk_volume_format_length = 7 + ova_disk_volume_name_length = 13 + + for ova in ova_data: + # ova_name column + _ova_name_length = len(str(ova["name"])) + 1 + if _ova_name_length > ova_name_length: + ova_name_length = _ova_name_length + # ova_id column + _ova_id_length = len(str(ova["id"])) + 1 + if _ova_id_length > ova_id_length: + ova_id_length = _ova_id_length + + for disk in ova["volumes"]: + # ova_disk_id column + _ova_disk_id_length = len(str(disk["disk_id"])) + 1 + if _ova_disk_id_length > ova_disk_id_length: + ova_disk_id_length = _ova_disk_id_length + # ova_disk_size column + _ova_disk_size_length = len(str(disk["disk_size_gb"])) + 1 + if _ova_disk_size_length > ova_disk_size_length: + ova_disk_size_length = _ova_disk_size_length + # ova_disk_pool column + _ova_disk_pool_length = len(str(disk["pool"])) + 1 + if _ova_disk_pool_length > ova_disk_pool_length: + ova_disk_pool_length = _ova_disk_pool_length + # ova_disk_volume_format column + _ova_disk_volume_format_length = len(str(disk["volume_format"])) + 1 + if _ova_disk_volume_format_length > ova_disk_volume_format_length: + ova_disk_volume_format_length = _ova_disk_volume_format_length + # ova_disk_volume_name column + _ova_disk_volume_name_length = len(str(disk["volume_name"])) + 1 + if _ova_disk_volume_name_length > ova_disk_volume_name_length: + ova_disk_volume_name_length = _ova_disk_volume_name_length + + # Format the string (header) + ova_list_output.append( + "{bold}{ova_header: <{ova_header_length}} {details_header: <{details_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + ova_header_length=ova_name_length + ova_id_length + 1, + details_header_length=ova_disk_id_length + + ova_disk_size_length + + ova_disk_pool_length + + ova_disk_volume_format_length + + ova_disk_volume_name_length + + 4, + ova_header="OVAs " + + "".join(["-" for _ in range(5, ova_name_length + ova_id_length)]), + details_header="Details " + + "".join( + [ + "-" + for _ in range( + 8, + ova_disk_id_length + + ova_disk_size_length + + ova_disk_pool_length + + ova_disk_volume_format_length + + ova_disk_volume_name_length + + 3, + ) + ] + ), + ) + ) + + ova_list_output.append( + "{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}} \ +{ova_disk_id: <{ova_disk_id_length}} \ +{ova_disk_size: <{ova_disk_size_length}} \ +{ova_disk_pool: <{ova_disk_pool_length}} \ +{ova_disk_volume_format: <{ova_disk_volume_format_length}} \ +{ova_disk_volume_name: <{ova_disk_volume_name_length}}{end_bold}".format( + ova_name_length=ova_name_length, + ova_id_length=ova_id_length, + ova_disk_id_length=ova_disk_id_length, + ova_disk_pool_length=ova_disk_pool_length, + ova_disk_size_length=ova_disk_size_length, + ova_disk_volume_format_length=ova_disk_volume_format_length, + ova_disk_volume_name_length=ova_disk_volume_name_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + ova_name="Name", + ova_id="ID", + ova_disk_id="Disk ID", + ova_disk_size="Size [GB]", + ova_disk_pool="Pool", + ova_disk_volume_format="Format", + ova_disk_volume_name="Source Volume", + ) + ) + + # Format the string (elements) + for ova in sorted(ova_data, key=lambda i: i.get("name", None)): + ova_list_output.append( + "{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}}{end_bold}".format( + ova_name_length=ova_name_length, + ova_id_length=ova_id_length, + bold="", + end_bold="", + ova_name=str(ova["name"]), + ova_id=str(ova["id"]), + ) + ) + for disk in sorted(ova["volumes"], key=lambda i: i.get("disk_id", None)): + ova_list_output.append( + "{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}} \ +{ova_disk_id: <{ova_disk_id_length}} \ +{ova_disk_size: <{ova_disk_size_length}} \ +{ova_disk_pool: <{ova_disk_pool_length}} \ +{ova_disk_volume_format: <{ova_disk_volume_format_length}} \ +{ova_disk_volume_name: <{ova_disk_volume_name_length}}{end_bold}".format( + ova_name_length=ova_name_length, + ova_id_length=ova_id_length, + ova_disk_id_length=ova_disk_id_length, + ova_disk_size_length=ova_disk_size_length, + ova_disk_pool_length=ova_disk_pool_length, + ova_disk_volume_format_length=ova_disk_volume_format_length, + ova_disk_volume_name_length=ova_disk_volume_name_length, + bold="", + end_bold="", + ova_name="", + ova_id="", + ova_disk_id=str(disk["disk_id"]), + ova_disk_size=str(disk["disk_size_gb"]), + ova_disk_pool=str(disk["pool"]), + ova_disk_volume_format=str(disk["volume_format"]), + ova_disk_volume_name=str(disk["volume_name"]), + ) + ) + + return "\n".join(ova_list_output) + + +def format_list_profile(profile_data): + if isinstance(profile_data, dict): + profile_data = [profile_data] + + # Format the profile "source" from the type and, if applicable, OVA profile name + for profile in profile_data: + profile_type = profile["type"] + if "ova" in profile_type: + # Set the source to the name of the OVA: + profile["source"] = "OVA {}".format(profile["ova"]) + else: + # Set the source to be the type + profile["source"] = profile_type + + profile_list_output = [] + + # Determine optimal column widths + profile_name_length = 18 + profile_id_length = 5 + profile_source_length = 7 + + profile_system_template_length = 7 + profile_network_template_length = 8 + profile_storage_template_length = 8 + profile_userdata_length = 9 + profile_script_length = 7 + profile_arguments_length = 18 + + for profile in profile_data: + # profile_name column + _profile_name_length = len(str(profile["name"])) + 1 + if _profile_name_length > profile_name_length: + profile_name_length = _profile_name_length + # profile_id column + _profile_id_length = len(str(profile["id"])) + 1 + if _profile_id_length > profile_id_length: + profile_id_length = _profile_id_length + # profile_source column + _profile_source_length = len(str(profile["source"])) + 1 + if _profile_source_length > profile_source_length: + profile_source_length = _profile_source_length + # profile_system_template column + _profile_system_template_length = len(str(profile["system_template"])) + 1 + if _profile_system_template_length > profile_system_template_length: + profile_system_template_length = _profile_system_template_length + # profile_network_template column + _profile_network_template_length = len(str(profile["network_template"])) + 1 + if _profile_network_template_length > profile_network_template_length: + profile_network_template_length = _profile_network_template_length + # profile_storage_template column + _profile_storage_template_length = len(str(profile["storage_template"])) + 1 + if _profile_storage_template_length > profile_storage_template_length: + profile_storage_template_length = _profile_storage_template_length + # profile_userdata column + _profile_userdata_length = len(str(profile["userdata"])) + 1 + if _profile_userdata_length > profile_userdata_length: + profile_userdata_length = _profile_userdata_length + # profile_script column + _profile_script_length = len(str(profile["script"])) + 1 + if _profile_script_length > profile_script_length: + profile_script_length = _profile_script_length + + # Format the string (header) + profile_list_output.append( + "{bold}{profile_header: <{profile_header_length}} {templates_header: <{templates_header_length}} {data_header: <{data_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + profile_header_length=profile_name_length + + profile_id_length + + profile_source_length + + 2, + templates_header_length=profile_system_template_length + + profile_network_template_length + + profile_storage_template_length + + 2, + data_header_length=profile_userdata_length + + profile_script_length + + profile_arguments_length + + 2, + profile_header="Profiles " + + "".join( + [ + "-" + for _ in range( + 9, + profile_name_length + + profile_id_length + + profile_source_length + + 1, + ) + ] + ), + templates_header="Templates " + + "".join( + [ + "-" + for _ in range( + 10, + profile_system_template_length + + profile_network_template_length + + profile_storage_template_length + + 1, + ) + ] + ), + data_header="Data " + + "".join( + [ + "-" + for _ in range( + 5, + profile_userdata_length + + profile_script_length + + profile_arguments_length + + 1, + ) + ] + ), + ) + ) + + profile_list_output.append( + "{bold}{profile_name: <{profile_name_length}} {profile_id: <{profile_id_length}} {profile_source: <{profile_source_length}} \ +{profile_system_template: <{profile_system_template_length}} \ +{profile_network_template: <{profile_network_template_length}} \ +{profile_storage_template: <{profile_storage_template_length}} \ +{profile_userdata: <{profile_userdata_length}} \ +{profile_script: <{profile_script_length}} \ +{profile_arguments}{end_bold}".format( + profile_name_length=profile_name_length, + profile_id_length=profile_id_length, + profile_source_length=profile_source_length, + profile_system_template_length=profile_system_template_length, + profile_network_template_length=profile_network_template_length, + profile_storage_template_length=profile_storage_template_length, + profile_userdata_length=profile_userdata_length, + profile_script_length=profile_script_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + profile_name="Name", + profile_id="ID", + profile_source="Source", + profile_system_template="System", + profile_network_template="Network", + profile_storage_template="Storage", + profile_userdata="Userdata", + profile_script="Script", + profile_arguments="Script Arguments", + ) + ) + + # Format the string (elements) + for profile in sorted(profile_data, key=lambda i: i.get("name", None)): + arguments_list = ", ".join(profile["arguments"]) + if not arguments_list: + arguments_list = "N/A" + profile_list_output.append( + "{bold}{profile_name: <{profile_name_length}} {profile_id: <{profile_id_length}} {profile_source: <{profile_source_length}} \ +{profile_system_template: <{profile_system_template_length}} \ +{profile_network_template: <{profile_network_template_length}} \ +{profile_storage_template: <{profile_storage_template_length}} \ +{profile_userdata: <{profile_userdata_length}} \ +{profile_script: <{profile_script_length}} \ +{profile_arguments}{end_bold}".format( + profile_name_length=profile_name_length, + profile_id_length=profile_id_length, + profile_source_length=profile_source_length, + profile_system_template_length=profile_system_template_length, + profile_network_template_length=profile_network_template_length, + profile_storage_template_length=profile_storage_template_length, + profile_userdata_length=profile_userdata_length, + profile_script_length=profile_script_length, + bold="", + end_bold="", + profile_name=profile["name"], + profile_id=profile["id"], + profile_source=profile["source"], + profile_system_template=profile["system_template"], + profile_network_template=profile["network_template"], + profile_storage_template=profile["storage_template"], + profile_userdata=profile["userdata"], + profile_script=profile["script"], + profile_arguments=arguments_list, + ) + ) + + return "\n".join(profile_list_output) + + +def format_list_task(task_data): + task_list_output = [] + + # Determine optimal column widths + task_id_length = 7 + task_type_length = 7 + task_worker_length = 7 + task_vm_name_length = 5 + task_vm_profile_length = 8 + task_vm_define_length = 8 + task_vm_start_length = 7 + + for task in task_data: + # task_id column + _task_id_length = len(str(task["id"])) + 1 + if _task_id_length > task_id_length: + task_id_length = _task_id_length + # task_worker column + _task_worker_length = len(str(task["worker"])) + 1 + if _task_worker_length > task_worker_length: + task_worker_length = _task_worker_length + # task_type column + _task_type_length = len(str(task["type"])) + 1 + if _task_type_length > task_type_length: + task_type_length = _task_type_length + # task_vm_name column + _task_vm_name_length = len(str(task["vm_name"])) + 1 + if _task_vm_name_length > task_vm_name_length: + task_vm_name_length = _task_vm_name_length + # task_vm_profile column + _task_vm_profile_length = len(str(task["vm_profile"])) + 1 + if _task_vm_profile_length > task_vm_profile_length: + task_vm_profile_length = _task_vm_profile_length + # task_vm_define column + _task_vm_define_length = len(str(task["vm_define"])) + 1 + if _task_vm_define_length > task_vm_define_length: + task_vm_define_length = _task_vm_define_length + # task_vm_start column + _task_vm_start_length = len(str(task["vm_start"])) + 1 + if _task_vm_start_length > task_vm_start_length: + task_vm_start_length = _task_vm_start_length + + # Format the string (header) + task_list_output.append( + "{bold}{task_header: <{task_header_length}} {vms_header: <{vms_header_length}}{end_bold}".format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + task_header_length=task_id_length + + task_type_length + + task_worker_length + + 2, + vms_header_length=task_vm_name_length + + task_vm_profile_length + + task_vm_define_length + + task_vm_start_length + + 3, + task_header="Tasks " + + "".join( + [ + "-" + for _ in range( + 6, task_id_length + task_type_length + task_worker_length + 1 + ) + ] + ), + vms_header="VM Details " + + "".join( + [ + "-" + for _ in range( + 11, + task_vm_name_length + + task_vm_profile_length + + task_vm_define_length + + task_vm_start_length + + 2, + ) + ] + ), + ) + ) + + task_list_output.append( + "{bold}{task_id: <{task_id_length}} {task_type: <{task_type_length}} \ +{task_worker: <{task_worker_length}} \ +{task_vm_name: <{task_vm_name_length}} \ +{task_vm_profile: <{task_vm_profile_length}} \ +{task_vm_define: <{task_vm_define_length}} \ +{task_vm_start: <{task_vm_start_length}}{end_bold}".format( + task_id_length=task_id_length, + task_type_length=task_type_length, + task_worker_length=task_worker_length, + task_vm_name_length=task_vm_name_length, + task_vm_profile_length=task_vm_profile_length, + task_vm_define_length=task_vm_define_length, + task_vm_start_length=task_vm_start_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + task_id="Job ID", + task_type="Status", + task_worker="Worker", + task_vm_name="Name", + task_vm_profile="Profile", + task_vm_define="Define?", + task_vm_start="Start?", + ) + ) + + # Format the string (elements) + for task in sorted(task_data, key=lambda i: i.get("type", None)): + task_list_output.append( + "{bold}{task_id: <{task_id_length}} {task_type: <{task_type_length}} \ +{task_worker: <{task_worker_length}} \ +{task_vm_name: <{task_vm_name_length}} \ +{task_vm_profile: <{task_vm_profile_length}} \ +{task_vm_define: <{task_vm_define_length}} \ +{task_vm_start: <{task_vm_start_length}}{end_bold}".format( + task_id_length=task_id_length, + task_type_length=task_type_length, + task_worker_length=task_worker_length, + task_vm_name_length=task_vm_name_length, + task_vm_profile_length=task_vm_profile_length, + task_vm_define_length=task_vm_define_length, + task_vm_start_length=task_vm_start_length, + bold="", + end_bold="", + task_id=task["id"], + task_type=task["type"], + task_worker=task["worker"], + task_vm_name=task["vm_name"], + task_vm_profile=task["vm_profile"], + task_vm_define=task["vm_define"], + task_vm_start=task["vm_start"], + ) + ) + + return "\n".join(task_list_output) diff --git a/cli-client-new/pvc/lib/vm.py b/cli-client-new/pvc/lib/vm.py new file mode 100644 index 00000000..65abc5c8 --- /dev/null +++ b/cli-client-new/pvc/lib/vm.py @@ -0,0 +1,2085 @@ +#!/usr/bin/env python3 + +# vm.py - PVC CLI client function library, VM functions +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2022 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +import time +import re + +import pvc.lib.ansiprint as ansiprint +from pvc.lib.common import call_api, format_bytes, format_metric + + +# +# Primary functions +# +def vm_info(config, vm): + """ + Get information about (single) VM + + API endpoint: GET /api/v1/vm/{vm} + API arguments: + API schema: {json_data_object} + """ + response = call_api(config, "get", "/vm/{vm}".format(vm=vm)) + + if response.status_code == 200: + if isinstance(response.json(), list) and len(response.json()) != 1: + # No exact match; return not found + return False, "VM not found." + else: + # Return a single instance if the response is a list + if isinstance(response.json(), list): + return True, response.json()[0] + # This shouldn't happen, but is here just in case + else: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def vm_list(config, limit, target_node, target_state, target_tag, negate): + """ + Get list information about VMs (limited by {limit}, {target_node}, or {target_state}) + + API endpoint: GET /api/v1/vm + API arguments: limit={limit}, node={target_node}, state={target_state}, tag={target_tag}, negate={negate} + API schema: [{json_data_object},{json_data_object},etc.] + """ + params = dict() + if limit: + params["limit"] = limit + if target_node: + params["node"] = target_node + if target_state: + params["state"] = target_state + if target_tag: + params["tag"] = target_tag + params["negate"] = negate + + response = call_api(config, "get", "/vm", params=params) + + if response.status_code == 200: + return True, response.json() + else: + return False, response.json().get("message", "") + + +def vm_define( + config, + xml, + node, + node_limit, + node_selector, + node_autostart, + migration_method, + user_tags, + protected_tags, +): + """ + Define a new VM on the cluster + + API endpoint: POST /vm + API arguments: xml={xml}, node={node}, limit={node_limit}, selector={node_selector}, autostart={node_autostart}, migration_method={migration_method}, user_tags={user_tags}, protected_tags={protected_tags} + API schema: {"message":"{data}"} + """ + params = { + "node": node, + "limit": node_limit, + "selector": node_selector, + "autostart": node_autostart, + "migration_method": migration_method, + "user_tags": user_tags, + "protected_tags": protected_tags, + } + data = {"xml": xml} + response = call_api(config, "post", "/vm", params=params, data=data) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def vm_modify(config, vm, xml, restart): + """ + Modify the configuration of VM + + API endpoint: PUT /vm/{vm} + API arguments: xml={xml}, restart={restart} + API schema: {"message":"{data}"} + """ + params = {"restart": restart} + data = {"xml": xml} + response = call_api( + config, "put", "/vm/{vm}".format(vm=vm), params=params, data=data + ) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def vm_device_attach(config, vm, xml): + """ + Attach a device to a VM + + API endpoint: POST /vm/{vm}/device + API arguments: xml={xml} + API schema: {"message":"{data}"} + """ + data = {"xml": xml} + response = call_api(config, "post", "/vm/{vm}/device".format(vm=vm), data=data) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def vm_device_detach(config, vm, xml): + """ + Detach a device from a VM + + API endpoint: DELETE /vm/{vm}/device + API arguments: xml={xml} + API schema: {"message":"{data}"} + """ + data = {"xml": xml} + response = call_api(config, "delete", "/vm/{vm}/device".format(vm=vm), data=data) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def vm_rename(config, vm, new_name): + """ + Rename VM to new name + + API endpoint: POST /vm/{vm}/rename + API arguments: new_name={new_name} + API schema: {"message":"{data}"} + """ + params = {"new_name": new_name} + response = call_api(config, "post", "/vm/{vm}/rename".format(vm=vm), params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def vm_metadata( + config, + vm, + node_limit, + node_selector, + node_autostart, + migration_method, + provisioner_profile, +): + """ + Modify PVC metadata of a VM + + API endpoint: POST /vm/{vm}/meta + API arguments: limit={node_limit}, selector={node_selector}, autostart={node_autostart}, migration_method={migration_method} profile={provisioner_profile} + API schema: {"message":"{data}"} + """ + params = dict() + + # Update any params that we've sent + if node_limit is not None: + params["limit"] = node_limit + + if node_selector is not None: + params["selector"] = node_selector + + if node_autostart is not None: + params["autostart"] = node_autostart + + if migration_method is not None: + params["migration_method"] = migration_method + + if provisioner_profile is not None: + params["profile"] = provisioner_profile + + # Write the new metadata + response = call_api(config, "post", "/vm/{vm}/meta".format(vm=vm), params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def vm_tags_get(config, vm): + """ + Get PVC tags of a VM + + API endpoint: GET /vm/{vm}/tags + API arguments: + API schema: {{"name": "{name}", "type": "{type}"},...} + """ + + response = call_api(config, "get", "/vm/{vm}/tags".format(vm=vm)) + + if response.status_code == 200: + retstatus = True + retdata = response.json() + else: + retstatus = False + retdata = response.json().get("message", "") + + return retstatus, retdata + + +def vm_tag_set(config, vm, action, tag, protected=False): + """ + Modify PVC tags of a VM + + API endpoint: POST /vm/{vm}/tags + API arguments: action={action}, tag={tag}, protected={protected} + API schema: {"message":"{data}"} + """ + + params = {"action": action, "tag": tag, "protected": protected} + + # Update the tags + response = call_api(config, "post", "/vm/{vm}/tags".format(vm=vm), params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def format_vm_tags(config, name, tags): + """ + Format the output of a tags dictionary in a nice table + """ + if len(tags) < 1: + return "No tags found." + + output_list = [] + + name_length = 5 + _name_length = len(name) + 1 + if _name_length > name_length: + name_length = _name_length + + tags_name_length = 4 + tags_type_length = 5 + tags_protected_length = 10 + for tag in tags: + _tags_name_length = len(tag["name"]) + 1 + if _tags_name_length > tags_name_length: + tags_name_length = _tags_name_length + + _tags_type_length = len(tag["type"]) + 1 + if _tags_type_length > tags_type_length: + tags_type_length = _tags_type_length + + _tags_protected_length = len(str(tag["protected"])) + 1 + if _tags_protected_length > tags_protected_length: + tags_protected_length = _tags_protected_length + + output_list.append( + "{bold}{tags_name: <{tags_name_length}} \ +{tags_type: <{tags_type_length}} \ +{tags_protected: <{tags_protected_length}}{end_bold}".format( + tags_name_length=tags_name_length, + tags_type_length=tags_type_length, + tags_protected_length=tags_protected_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + tags_name="Name", + tags_type="Type", + tags_protected="Protected", + ) + ) + + for tag in sorted(tags, key=lambda t: t["name"]): + output_list.append( + "{bold}{tags_name: <{tags_name_length}} \ +{tags_type: <{tags_type_length}} \ +{tags_protected: <{tags_protected_length}}{end_bold}".format( + tags_type_length=tags_type_length, + tags_name_length=tags_name_length, + tags_protected_length=tags_protected_length, + bold="", + end_bold="", + tags_name=tag["name"], + tags_type=tag["type"], + tags_protected=str(tag["protected"]), + ) + ) + + return "\n".join(output_list) + + +def vm_remove(config, vm, delete_disks=False): + """ + Remove a VM + + API endpoint: DELETE /vm/{vm} + API arguments: delete_disks={delete_disks} + API schema: {"message":"{data}"} + """ + params = {"delete_disks": delete_disks} + response = call_api(config, "delete", "/vm/{vm}".format(vm=vm), params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def vm_state(config, vm, target_state, force=False, wait=False): + """ + Modify the current state of VM + + API endpoint: POST /vm/{vm}/state + API arguments: state={state}, wait={wait} + API schema: {"message":"{data}"} + """ + params = { + "state": target_state, + "force": str(force).lower(), + "wait": str(wait).lower(), + } + response = call_api(config, "post", "/vm/{vm}/state".format(vm=vm), params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def vm_node(config, vm, target_node, action, force=False, wait=False, force_live=False): + """ + Modify the current node of VM via {action} + + API endpoint: POST /vm/{vm}/node + API arguments: node={target_node}, action={action}, force={force}, wait={wait}, force_live={force_live} + API schema: {"message":"{data}"} + """ + params = { + "node": target_node, + "action": action, + "force": str(force).lower(), + "wait": str(wait).lower(), + "force_live": str(force_live).lower(), + } + response = call_api(config, "post", "/vm/{vm}/node".format(vm=vm), params=params) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def vm_locks(config, vm): + """ + Flush RBD locks of (stopped) VM + + API endpoint: POST /vm/{vm}/locks + API arguments: + API schema: {"message":"{data}"} + """ + response = call_api(config, "post", "/vm/{vm}/locks".format(vm=vm)) + + if response.status_code == 200: + retstatus = True + else: + retstatus = False + + return retstatus, response.json().get("message", "") + + +def vm_vcpus_set(config, vm, vcpus, topology, restart): + """ + Set the vCPU count of the VM with topology + + Calls vm_info to get the VM XML. + + Calls vm_modify to set the VM XML. + """ + from lxml.objectify import fromstring + from lxml.etree import tostring + + status, domain_information = vm_info(config, vm) + if not status: + return status, domain_information + + xml = domain_information.get("xml", None) + if xml is None: + return False, "VM does not have a valid XML doccument." + + try: + parsed_xml = fromstring(xml) + except Exception: + return False, "ERROR: Failed to parse XML data." + + parsed_xml.vcpu._setText(str(vcpus)) + parsed_xml.cpu.topology.set("sockets", str(topology[0])) + parsed_xml.cpu.topology.set("cores", str(topology[1])) + parsed_xml.cpu.topology.set("threads", str(topology[2])) + + try: + new_xml = tostring(parsed_xml, pretty_print=True) + except Exception: + return False, "ERROR: Failed to dump XML data." + + return vm_modify(config, vm, new_xml, restart) + + +def vm_vcpus_get(config, vm): + """ + Get the vCPU count of the VM + + Calls vm_info to get VM XML. + + Returns a tuple of (vcpus, (sockets, cores, threads)) + """ + from lxml.objectify import fromstring + + status, domain_information = vm_info(config, vm) + if not status: + return status, domain_information + + xml = domain_information.get("xml", None) + if xml is None: + return False, "VM does not have a valid XML doccument." + + try: + parsed_xml = fromstring(xml) + except Exception: + return False, "ERROR: Failed to parse XML data." + + vm_vcpus = int(parsed_xml.vcpu.text) + vm_sockets = parsed_xml.cpu.topology.attrib.get("sockets") + vm_cores = parsed_xml.cpu.topology.attrib.get("cores") + vm_threads = parsed_xml.cpu.topology.attrib.get("threads") + + return True, (vm_vcpus, (vm_sockets, vm_cores, vm_threads)) + + +def format_vm_vcpus(config, name, vcpus): + """ + Format the output of a vCPU value in a nice table + """ + output_list = [] + + name_length = 5 + _name_length = len(name) + 1 + if _name_length > name_length: + name_length = _name_length + + vcpus_length = 6 + sockets_length = 8 + cores_length = 6 + threads_length = 8 + + output_list.append( + "{bold}{name: <{name_length}} \ +{vcpus: <{vcpus_length}} \ +{sockets: <{sockets_length}} \ +{cores: <{cores_length}} \ +{threads: <{threads_length}}{end_bold}".format( + name_length=name_length, + vcpus_length=vcpus_length, + sockets_length=sockets_length, + cores_length=cores_length, + threads_length=threads_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + name="Name", + vcpus="vCPUs", + sockets="Sockets", + cores="Cores", + threads="Threads", + ) + ) + output_list.append( + "{bold}{name: <{name_length}} \ +{vcpus: <{vcpus_length}} \ +{sockets: <{sockets_length}} \ +{cores: <{cores_length}} \ +{threads: <{threads_length}}{end_bold}".format( + name_length=name_length, + vcpus_length=vcpus_length, + sockets_length=sockets_length, + cores_length=cores_length, + threads_length=threads_length, + bold="", + end_bold="", + name=name, + vcpus=vcpus[0], + sockets=vcpus[1][0], + cores=vcpus[1][1], + threads=vcpus[1][2], + ) + ) + return "\n".join(output_list) + + +def vm_memory_set(config, vm, memory, restart): + """ + Set the provisioned memory of the VM with topology + + Calls vm_info to get the VM XML. + + Calls vm_modify to set the VM XML. + """ + from lxml.objectify import fromstring + from lxml.etree import tostring + + status, domain_information = vm_info(config, vm) + if not status: + return status, domain_information + + xml = domain_information.get("xml", None) + if xml is None: + return False, "VM does not have a valid XML doccument." + + try: + parsed_xml = fromstring(xml) + except Exception: + return False, "ERROR: Failed to parse XML data." + + parsed_xml.memory._setText(str(memory)) + + try: + new_xml = tostring(parsed_xml, pretty_print=True) + except Exception: + return False, "ERROR: Failed to dump XML data." + + return vm_modify(config, vm, new_xml, restart) + + +def vm_memory_get(config, vm): + """ + Get the provisioned memory of the VM + + Calls vm_info to get VM XML. + + Returns an integer memory value. + """ + from lxml.objectify import fromstring + + status, domain_information = vm_info(config, vm) + if not status: + return status, domain_information + + xml = domain_information.get("xml", None) + if xml is None: + return False, "VM does not have a valid XML doccument." + + try: + parsed_xml = fromstring(xml) + except Exception: + return False, "ERROR: Failed to parse XML data." + + vm_memory = int(parsed_xml.memory.text) + + return True, vm_memory + + +def format_vm_memory(config, name, memory): + """ + Format the output of a memory value in a nice table + """ + output_list = [] + + name_length = 5 + _name_length = len(name) + 1 + if _name_length > name_length: + name_length = _name_length + + memory_length = 6 + + output_list.append( + "{bold}{name: <{name_length}} \ +{memory: <{memory_length}}{end_bold}".format( + name_length=name_length, + memory_length=memory_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + name="Name", + memory="RAM (M)", + ) + ) + output_list.append( + "{bold}{name: <{name_length}} \ +{memory: <{memory_length}}{end_bold}".format( + name_length=name_length, + memory_length=memory_length, + bold="", + end_bold="", + name=name, + memory=memory, + ) + ) + return "\n".join(output_list) + + +def vm_networks_add( + config, vm, network, macaddr, model, sriov, sriov_mode, live, restart +): + """ + Add a new network to the VM + + Calls vm_info to get the VM XML. + + Calls vm_modify to set the VM XML. + + Calls vm_device_attach if live to hot-attach the device. + """ + from lxml.objectify import fromstring + from lxml.etree import tostring + from random import randint + import pvc.lib.network as pvc_network + + network_exists, _ = pvc_network.net_info(config, network) + if not network_exists: + return False, "Network {} not found on the cluster.".format(network) + + status, domain_information = vm_info(config, vm) + if not status: + return status, domain_information + + xml = domain_information.get("xml", None) + if xml is None: + return False, "VM does not have a valid XML doccument." + + try: + parsed_xml = fromstring(xml) + except Exception: + return False, "ERROR: Failed to parse XML data." + + if macaddr is None: + mac_prefix = "52:54:00" + random_octet_A = "{:x}".format(randint(16, 238)) + random_octet_B = "{:x}".format(randint(16, 238)) + random_octet_C = "{:x}".format(randint(16, 238)) + macaddr = "{prefix}:{octetA}:{octetB}:{octetC}".format( + prefix=mac_prefix, + octetA=random_octet_A, + octetB=random_octet_B, + octetC=random_octet_C, + ) + + # Add an SR-IOV network + if sriov: + valid, sriov_vf_information = pvc_network.net_sriov_vf_info( + config, domain_information["node"], network + ) + if not valid: + return ( + False, + 'Specified SR-IOV VF "{}" does not exist on VM node "{}".'.format( + network, domain_information["node"] + ), + ) + + # Add a hostdev (direct PCIe) SR-IOV network + if sriov_mode == "hostdev": + bus_address = 'domain="0x{pci_domain}" bus="0x{pci_bus}" slot="0x{pci_slot}" function="0x{pci_function}"'.format( + pci_domain=sriov_vf_information["pci"]["domain"], + pci_bus=sriov_vf_information["pci"]["bus"], + pci_slot=sriov_vf_information["pci"]["slot"], + pci_function=sriov_vf_information["pci"]["function"], + ) + device_string = '
{network}'.format( + macaddr=macaddr, bus_address=bus_address, network=network + ) + # Add a macvtap SR-IOV network + elif sriov_mode == "macvtap": + device_string = ''.format( + macaddr=macaddr, network=network, model=model + ) + else: + return False, "ERROR: Invalid SR-IOV mode specified." + # Add a normal bridged PVC network + else: + # Set the bridge prefix + if network in ["upstream", "cluster", "storage"]: + br_prefix = "br" + else: + br_prefix = "vmbr" + + device_string = ''.format( + macaddr=macaddr, bridge="{}{}".format(br_prefix, network), model=model + ) + + device_xml = fromstring(device_string) + + all_interfaces = parsed_xml.devices.find("interface") + if all_interfaces is None: + all_interfaces = [] + for interface in all_interfaces: + if sriov: + if sriov_mode == "hostdev": + if interface.attrib.get("type") == "hostdev": + interface_address = 'domain="{pci_domain}" bus="{pci_bus}" slot="{pci_slot}" function="{pci_function}"'.format( + pci_domain=interface.source.address.attrib.get("domain"), + pci_bus=interface.source.address.attrib.get("bus"), + pci_slot=interface.source.address.attrib.get("slot"), + pci_function=interface.source.address.attrib.get("function"), + ) + if interface_address == bus_address: + return ( + False, + 'SR-IOV device "{}" is already configured for VM "{}".'.format( + network, vm + ), + ) + elif sriov_mode == "macvtap": + if interface.attrib.get("type") == "direct": + interface_dev = interface.source.attrib.get("dev") + if interface_dev == network: + return ( + False, + 'SR-IOV device "{}" is already configured for VM "{}".'.format( + network, vm + ), + ) + + # Add the interface at the end of the list (or, right above emulator) + if len(all_interfaces) > 0: + for idx, interface in enumerate(parsed_xml.devices.find("interface")): + if idx == len(all_interfaces) - 1: + interface.addnext(device_xml) + else: + parsed_xml.devices.find("emulator").addprevious(device_xml) + + try: + new_xml = tostring(parsed_xml, pretty_print=True) + except Exception: + return False, "ERROR: Failed to dump XML data." + + modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart) + + if not modify_retcode: + return modify_retcode, modify_retmsg + + if live: + attach_retcode, attach_retmsg = vm_device_attach(config, vm, device_string) + + if not attach_retcode: + retcode = attach_retcode + retmsg = attach_retmsg + else: + retcode = attach_retcode + retmsg = "Network '{}' successfully added to VM config and hot attached to running VM.".format( + network + ) + else: + retcode = modify_retcode + retmsg = modify_retmsg + + return retcode, retmsg + + +def vm_networks_remove(config, vm, network, macaddr, sriov, live, restart): + """ + Remove a network from the VM, optionally by MAC + + Calls vm_info to get the VM XML. + + Calls vm_modify to set the VM XML. + + Calls vm_device_detach to hot-remove the device. + """ + from lxml.objectify import fromstring + from lxml.etree import tostring + + if network is None and macaddr is None: + return False, "A network or MAC address must be specified for removal." + + status, domain_information = vm_info(config, vm) + if not status: + return status, domain_information + + xml = domain_information.get("xml", None) + if xml is None: + return False, "VM does not have a valid XML doccument." + + try: + parsed_xml = fromstring(xml) + except Exception: + return False, "ERROR: Failed to parse XML data." + + changed = False + device_string = None + for interface in parsed_xml.devices.find("interface"): + if sriov: + if interface.attrib.get("type") == "hostdev": + if_dev = str(interface.sriov_device) + if macaddr is None and network == if_dev: + interface.getparent().remove(interface) + changed = True + elif macaddr is not None and macaddr == interface.mac.attrib.get( + "address" + ): + interface.getparent().remove(interface) + changed = True + elif interface.attrib.get("type") == "direct": + if_dev = str(interface.source.attrib.get("dev")) + if macaddr is None and network == if_dev: + interface.getparent().remove(interface) + changed = True + elif macaddr is not None and macaddr == interface.mac.attrib.get( + "address" + ): + interface.getparent().remove(interface) + changed = True + else: + if_vni = re.match( + r"[vm]*br([0-9a-z]+)", interface.source.attrib.get("bridge") + ).group(1) + if macaddr is None and network == if_vni: + interface.getparent().remove(interface) + changed = True + elif macaddr is not None and macaddr == interface.mac.attrib.get("address"): + interface.getparent().remove(interface) + changed = True + if changed: + device_string = tostring(interface) + + if changed: + try: + new_xml = tostring(parsed_xml, pretty_print=True) + except Exception: + return False, "ERROR: Failed to dump XML data." + elif not changed and macaddr is not None: + return False, 'ERROR: Interface with MAC "{}" does not exist on VM.'.format( + macaddr + ) + elif not changed and network is not None: + return False, 'ERROR: Network "{}" does not exist on VM.'.format(network) + else: + return False, "ERROR: Unspecified error finding interface to remove." + + modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart) + + if not modify_retcode: + return modify_retcode, modify_retmsg + + if live and device_string: + detach_retcode, detach_retmsg = vm_device_detach(config, vm, device_string) + + if not detach_retcode: + retcode = detach_retcode + retmsg = detach_retmsg + else: + retcode = detach_retcode + retmsg = "Network '{}' successfully removed from VM config and hot detached from running VM.".format( + network + ) + else: + retcode = modify_retcode + retmsg = modify_retmsg + + return retcode, retmsg + + +def vm_networks_get(config, vm): + """ + Get the networks of the VM + + Calls vm_info to get VM XML. + + Returns a list of tuples of (network_vni, mac_address, model) + """ + from lxml.objectify import fromstring + + status, domain_information = vm_info(config, vm) + if not status: + return status, domain_information + + xml = domain_information.get("xml", None) + if xml is None: + return False, "VM does not have a valid XML doccument." + + try: + parsed_xml = fromstring(xml) + except Exception: + return False, "ERROR: Failed to parse XML data." + + network_data = list() + for interface in parsed_xml.devices.find("interface"): + mac_address = interface.mac.attrib.get("address") + model = interface.model.attrib.get("type") + interface_type = interface.attrib.get("type") + if interface_type == "bridge": + network = re.search( + r"[vm]*br([0-9a-z]+)", interface.source.attrib.get("bridge") + ).group(1) + elif interface_type == "direct": + network = "macvtap:{}".format(interface.source.attrib.get("dev")) + elif interface_type == "hostdev": + network = "hostdev:{}".format(interface.source.attrib.get("dev")) + + network_data.append((network, mac_address, model)) + + return True, network_data + + +def format_vm_networks(config, name, networks): + """ + Format the output of a network list in a nice table + """ + output_list = [] + + name_length = 5 + vni_length = 8 + macaddr_length = 12 + model_length = 6 + + _name_length = len(name) + 1 + if _name_length > name_length: + name_length = _name_length + + for network in networks: + _vni_length = len(network[0]) + 1 + if _vni_length > vni_length: + vni_length = _vni_length + + _macaddr_length = len(network[1]) + 1 + if _macaddr_length > macaddr_length: + macaddr_length = _macaddr_length + + _model_length = len(network[2]) + 1 + if _model_length > model_length: + model_length = _model_length + + output_list.append( + "{bold}{name: <{name_length}} \ +{vni: <{vni_length}} \ +{macaddr: <{macaddr_length}} \ +{model: <{model_length}}{end_bold}".format( + name_length=name_length, + vni_length=vni_length, + macaddr_length=macaddr_length, + model_length=model_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + name="Name", + vni="Network", + macaddr="MAC Address", + model="Model", + ) + ) + count = 0 + for network in networks: + if count > 0: + name = "" + count += 1 + output_list.append( + "{bold}{name: <{name_length}} \ +{vni: <{vni_length}} \ +{macaddr: <{macaddr_length}} \ +{model: <{model_length}}{end_bold}".format( + name_length=name_length, + vni_length=vni_length, + macaddr_length=macaddr_length, + model_length=model_length, + bold="", + end_bold="", + name=name, + vni=network[0], + macaddr=network[1], + model=network[2], + ) + ) + return "\n".join(output_list) + + +def vm_volumes_add(config, vm, volume, disk_id, bus, disk_type, live, restart): + """ + Add a new volume to the VM + + Calls vm_info to get the VM XML. + + Calls vm_modify to set the VM XML. + """ + from lxml.objectify import fromstring + from lxml.etree import tostring + from copy import deepcopy + import pvc.lib.ceph as pvc_ceph + + if disk_type == "rbd": + # Verify that the provided volume is valid + vpool = volume.split("/")[0] + vname = volume.split("/")[1] + retcode, retdata = pvc_ceph.ceph_volume_info(config, vpool, vname) + if not retcode: + return False, "Volume {} is not present in the cluster.".format(volume) + + status, domain_information = vm_info(config, vm) + if not status: + return status, domain_information + + xml = domain_information.get("xml", None) + if xml is None: + return False, "VM does not have a valid XML doccument." + + try: + parsed_xml = fromstring(xml) + except Exception: + return False, "ERROR: Failed to parse XML data." + + last_disk = None + id_list = list() + all_disks = parsed_xml.devices.find("disk") + if all_disks is None: + all_disks = [] + for disk in all_disks: + id_list.append(disk.target.attrib.get("dev")) + if disk.source.attrib.get("protocol") == disk_type: + if disk_type == "rbd": + last_disk = disk.source.attrib.get("name") + elif disk_type == "file": + last_disk = disk.source.attrib.get("file") + if last_disk == volume: + return False, "Volume {} is already configured for VM {}.".format( + volume, vm + ) + last_disk_details = deepcopy(disk) + + if disk_id is not None: + if disk_id in id_list: + return ( + False, + "Manually specified disk ID {} is already in use for VM {}.".format( + disk_id, vm + ), + ) + else: + # Find the next free disk ID + first_dev_prefix = id_list[0][0:-1] + + for char in range(ord("a"), ord("z")): + char = chr(char) + next_id = "{}{}".format(first_dev_prefix, char) + if next_id not in id_list: + break + else: + next_id = None + if next_id is None: + return ( + False, + "Failed to find a valid disk_id and none specified; too many disks for VM {}?".format( + vm + ), + ) + disk_id = next_id + + if last_disk is None: + if disk_type == "rbd": + # RBD volumes need an example to be based on + return ( + False, + "There are no existing RBD volumes attached to this VM. Autoconfiguration failed; use the 'vm modify' command to manually configure this volume with the required details for authentication, hosts, etc..", + ) + elif disk_type == "file": + # File types can be added ad-hoc + disk_template = ''.format( + source=volume, dev=disk_id, bus=bus + ) + last_disk_details = fromstring(disk_template) + + new_disk_details = last_disk_details + new_disk_details.target.set("dev", disk_id) + new_disk_details.target.set("bus", bus) + if disk_type == "rbd": + new_disk_details.source.set("name", volume) + elif disk_type == "file": + new_disk_details.source.set("file", volume) + device_xml = new_disk_details + + all_disks = parsed_xml.devices.find("disk") + if all_disks is None: + all_disks = [] + for disk in all_disks: + last_disk = disk + + # Add the disk at the end of the list (or, right above emulator) + if len(all_disks) > 0: + for idx, disk in enumerate(parsed_xml.devices.find("disk")): + if idx == len(all_disks) - 1: + disk.addnext(device_xml) + else: + parsed_xml.devices.find("emulator").addprevious(device_xml) + + try: + new_xml = tostring(parsed_xml, pretty_print=True) + except Exception: + return False, "ERROR: Failed to dump XML data." + + modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart) + + if not modify_retcode: + return modify_retcode, modify_retmsg + + if live: + device_string = tostring(device_xml) + attach_retcode, attach_retmsg = vm_device_attach(config, vm, device_string) + + if not attach_retcode: + retcode = attach_retcode + retmsg = attach_retmsg + else: + retcode = attach_retcode + retmsg = "Volume '{}/{}' successfully added to VM config and hot attached to running VM.".format( + vpool, vname + ) + else: + retcode = modify_retcode + retmsg = modify_retmsg + + return retcode, retmsg + + +def vm_volumes_remove(config, vm, volume, live, restart): + """ + Remove a volume to the VM + + Calls vm_info to get the VM XML. + + Calls vm_modify to set the VM XML. + """ + from lxml.objectify import fromstring + from lxml.etree import tostring + + status, domain_information = vm_info(config, vm) + if not status: + return status, domain_information + + xml = domain_information.get("xml", None) + if xml is None: + return False, "VM does not have a valid XML document." + + try: + parsed_xml = fromstring(xml) + except Exception: + return False, "ERROR: Failed to parse XML data." + + changed = False + device_string = None + for disk in parsed_xml.devices.find("disk"): + disk_name = disk.source.attrib.get("name") + if not disk_name: + disk_name = disk.source.attrib.get("file") + if volume == disk_name: + device_string = tostring(disk) + disk.getparent().remove(disk) + changed = True + + if changed: + try: + new_xml = tostring(parsed_xml, pretty_print=True) + except Exception: + return False, "ERROR: Failed to dump XML data." + else: + return False, 'ERROR: Volume "{}" does not exist on VM.'.format(volume) + + modify_retcode, modify_retmsg = vm_modify(config, vm, new_xml, restart) + + if not modify_retcode: + return modify_retcode, modify_retmsg + + if live and device_string: + detach_retcode, detach_retmsg = vm_device_detach(config, vm, device_string) + + if not detach_retcode: + retcode = detach_retcode + retmsg = detach_retmsg + else: + retcode = detach_retcode + retmsg = "Volume '{}' successfully removed from VM config and hot detached from running VM.".format( + volume + ) + else: + retcode = modify_retcode + retmsg = modify_retmsg + + return retcode, retmsg + + +def vm_volumes_get(config, vm): + """ + Get the volumes of the VM + + Calls vm_info to get VM XML. + + Returns a list of tuples of (volume, disk_id, type, bus) + """ + from lxml.objectify import fromstring + + status, domain_information = vm_info(config, vm) + if not status: + return status, domain_information + + xml = domain_information.get("xml", None) + if xml is None: + return False, "VM does not have a valid XML doccument." + + try: + parsed_xml = fromstring(xml) + except Exception: + return False, "ERROR: Failed to parse XML data." + + volume_data = list() + for disk in parsed_xml.devices.find("disk"): + protocol = disk.attrib.get("type") + disk_id = disk.target.attrib.get("dev") + bus = disk.target.attrib.get("bus") + if protocol == "network": + protocol = disk.source.attrib.get("protocol") + source = disk.source.attrib.get("name") + elif protocol == "file": + protocol = "file" + source = disk.source.attrib.get("file") + else: + protocol = "unknown" + source = "unknown" + + volume_data.append((source, disk_id, protocol, bus)) + + return True, volume_data + + +def format_vm_volumes(config, name, volumes): + """ + Format the output of a volume value in a nice table + """ + output_list = [] + + name_length = 5 + volume_length = 7 + disk_id_length = 4 + protocol_length = 5 + bus_length = 4 + + _name_length = len(name) + 1 + if _name_length > name_length: + name_length = _name_length + + for volume in volumes: + _volume_length = len(volume[0]) + 1 + if _volume_length > volume_length: + volume_length = _volume_length + + _disk_id_length = len(volume[1]) + 1 + if _disk_id_length > disk_id_length: + disk_id_length = _disk_id_length + + _protocol_length = len(volume[2]) + 1 + if _protocol_length > protocol_length: + protocol_length = _protocol_length + + _bus_length = len(volume[3]) + 1 + if _bus_length > bus_length: + bus_length = _bus_length + + output_list.append( + "{bold}{name: <{name_length}} \ +{volume: <{volume_length}} \ +{disk_id: <{disk_id_length}} \ +{protocol: <{protocol_length}} \ +{bus: <{bus_length}}{end_bold}".format( + name_length=name_length, + volume_length=volume_length, + disk_id_length=disk_id_length, + protocol_length=protocol_length, + bus_length=bus_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + name="Name", + volume="Volume", + disk_id="Dev", + protocol="Type", + bus="Bus", + ) + ) + count = 0 + for volume in volumes: + if count > 0: + name = "" + count += 1 + output_list.append( + "{bold}{name: <{name_length}} \ +{volume: <{volume_length}} \ +{disk_id: <{disk_id_length}} \ +{protocol: <{protocol_length}} \ +{bus: <{bus_length}}{end_bold}".format( + name_length=name_length, + volume_length=volume_length, + disk_id_length=disk_id_length, + protocol_length=protocol_length, + bus_length=bus_length, + bold="", + end_bold="", + name=name, + volume=volume[0], + disk_id=volume[1], + protocol=volume[2], + bus=volume[3], + ) + ) + return "\n".join(output_list) + + +def view_console_log(config, vm, lines=100): + """ + Return console log lines from the API (and display them in a pager in the main CLI) + + API endpoint: GET /vm/{vm}/console + API arguments: lines={lines} + API schema: {"name":"{vmname}","data":"{console_log}"} + """ + params = {"lines": lines} + response = call_api(config, "get", "/vm/{vm}/console".format(vm=vm), params=params) + + if response.status_code != 200: + return False, response.json().get("message", "") + + console_log = response.json()["data"] + + # Shrink the log buffer to length lines + shrunk_log = console_log.split("\n")[-lines:] + loglines = "\n".join(shrunk_log) + + return True, loglines + + +def follow_console_log(config, vm, lines=10): + """ + Return and follow console log lines from the API + + API endpoint: GET /vm/{vm}/console + API arguments: lines={lines} + API schema: {"name":"{vmname}","data":"{console_log}"} + """ + # We always grab 200 to match the follow call, but only _show_ `lines` number + params = {"lines": 200} + response = call_api(config, "get", "/vm/{vm}/console".format(vm=vm), params=params) + + if response.status_code != 200: + return False, response.json().get("message", "") + + # Shrink the log buffer to length lines + console_log = response.json()["data"] + shrunk_log = console_log.split("\n")[-int(lines) :] + loglines = "\n".join(shrunk_log) + + # Print the initial data and begin following + print(loglines, end="") + + while True: + # Grab the next line set (200 is a reasonable number of lines per half-second; any more are skipped) + try: + params = {"lines": 200} + response = call_api( + config, "get", "/vm/{vm}/console".format(vm=vm), params=params + ) + new_console_log = response.json()["data"] + except Exception: + break + # Split the new and old log strings into constitutent lines + old_console_loglines = console_log.split("\n") + new_console_loglines = new_console_log.split("\n") + + # Set the console log to the new log value for the next iteration + console_log = new_console_log + + # Remove the lines from the old log until we hit the first line of the new log; this + # ensures that the old log is a string that we can remove from the new log entirely + for index, line in enumerate(old_console_loglines, start=0): + if line == new_console_loglines[0]: + del old_console_loglines[0:index] + break + # Rejoin the log lines into strings + old_console_log = "\n".join(old_console_loglines) + new_console_log = "\n".join(new_console_loglines) + # Remove the old lines from the new log + diff_console_log = new_console_log.replace(old_console_log, "") + # If there's a difference, print it out + if diff_console_log: + print(diff_console_log, end="") + # Wait half a second + time.sleep(0.5) + + return True, "" + + +# +# Output display functions +# +def format_info(config, domain_information, long_output): + # Format a nice output; do this line-by-line then concat the elements at the end + ainformation = [] + ainformation.append( + "{}Virtual machine information:{}".format(ansiprint.bold(), ansiprint.end()) + ) + ainformation.append("") + # Basic information + ainformation.append( + "{}Name:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["name"] + ) + ) + ainformation.append( + "{}UUID:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["uuid"] + ) + ) + ainformation.append( + "{}Description:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["description"] + ) + ) + ainformation.append( + "{}Profile:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["profile"] + ) + ) + ainformation.append( + "{}Memory (M):{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["memory"] + ) + ) + ainformation.append( + "{}vCPUs:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["vcpu"] + ) + ) + ainformation.append( + "{}Topology (S/C/T):{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["vcpu_topology"] + ) + ) + + if ( + domain_information["vnc"].get("listen", "None") != "None" + and domain_information["vnc"].get("port", "None") != "None" + ): + ainformation.append("") + ainformation.append( + "{}VNC listen:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["vnc"]["listen"] + ) + ) + ainformation.append( + "{}VNC port:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["vnc"]["port"] + ) + ) + + if long_output is True: + # Virtualization information + ainformation.append("") + ainformation.append( + "{}Emulator:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["emulator"] + ) + ) + ainformation.append( + "{}Type:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["type"] + ) + ) + ainformation.append( + "{}Arch:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["arch"] + ) + ) + ainformation.append( + "{}Machine:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["machine"] + ) + ) + ainformation.append( + "{}Features:{} {}".format( + ansiprint.purple(), + ansiprint.end(), + " ".join(domain_information["features"]), + ) + ) + ainformation.append("") + ainformation.append( + "{0}Memory stats:{1} {2}Swap In Swap Out Faults (maj/min) Available Usable Unused RSS{3}".format( + ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end() + ) + ) + ainformation.append( + " {0: <7} {1: <8} {2: <16} {3: <10} {4: <7} {5: <7} {6: <10}".format( + format_metric(domain_information["memory_stats"].get("swap_in", 0)), + format_metric(domain_information["memory_stats"].get("swap_out", 0)), + "/".join( + [ + format_metric( + domain_information["memory_stats"].get("major_fault", 0) + ), + format_metric( + domain_information["memory_stats"].get("minor_fault", 0) + ), + ] + ), + format_bytes( + domain_information["memory_stats"].get("available", 0) * 1024 + ), + format_bytes( + domain_information["memory_stats"].get("usable", 0) * 1024 + ), + format_bytes( + domain_information["memory_stats"].get("unused", 0) * 1024 + ), + format_bytes(domain_information["memory_stats"].get("rss", 0) * 1024), + ) + ) + ainformation.append("") + ainformation.append( + "{0}vCPU stats:{1} {2}CPU time (ns) User time (ns) System time (ns){3}".format( + ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end() + ) + ) + ainformation.append( + " {0: <16} {1: <16} {2: <15}".format( + str(domain_information["vcpu_stats"].get("cpu_time", 0)), + str(domain_information["vcpu_stats"].get("user_time", 0)), + str(domain_information["vcpu_stats"].get("system_time", 0)), + ) + ) + + # PVC cluster information + ainformation.append("") + dstate_colour = { + "start": ansiprint.green(), + "restart": ansiprint.yellow(), + "shutdown": ansiprint.yellow(), + "stop": ansiprint.red(), + "disable": ansiprint.blue(), + "fail": ansiprint.red(), + "migrate": ansiprint.blue(), + "unmigrate": ansiprint.blue(), + "provision": ansiprint.blue(), + } + ainformation.append( + "{}State:{} {}{}{}".format( + ansiprint.purple(), + ansiprint.end(), + dstate_colour[domain_information["state"]], + domain_information["state"], + ansiprint.end(), + ) + ) + ainformation.append( + "{}Current Node:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["node"] + ) + ) + if not domain_information["last_node"]: + domain_information["last_node"] = "N/A" + ainformation.append( + "{}Previous Node:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["last_node"] + ) + ) + + # Get a failure reason if applicable + if domain_information["failed_reason"]: + ainformation.append("") + ainformation.append( + "{}Failure reason:{} {}".format( + ansiprint.purple(), ansiprint.end(), domain_information["failed_reason"] + ) + ) + + if not domain_information.get("node_selector"): + formatted_node_selector = "False" + else: + formatted_node_selector = domain_information["node_selector"] + + if not domain_information.get("node_limit"): + formatted_node_limit = "False" + else: + formatted_node_limit = ", ".join(domain_information["node_limit"]) + + if not domain_information.get("node_autostart"): + formatted_node_autostart = "False" + else: + formatted_node_autostart = domain_information["node_autostart"] + + if not domain_information.get("migration_method"): + formatted_migration_method = "any" + else: + formatted_migration_method = domain_information["migration_method"] + + ainformation.append( + "{}Migration selector:{} {}".format( + ansiprint.purple(), ansiprint.end(), formatted_node_selector + ) + ) + ainformation.append( + "{}Node limit:{} {}".format( + ansiprint.purple(), ansiprint.end(), formatted_node_limit + ) + ) + ainformation.append( + "{}Autostart:{} {}".format( + ansiprint.purple(), ansiprint.end(), formatted_node_autostart + ) + ) + ainformation.append( + "{}Migration Method:{} {}".format( + ansiprint.purple(), ansiprint.end(), formatted_migration_method + ) + ) + + # Tag list + tags_name_length = 5 + tags_type_length = 5 + tags_protected_length = 10 + for tag in domain_information["tags"]: + _tags_name_length = len(tag["name"]) + 1 + if _tags_name_length > tags_name_length: + tags_name_length = _tags_name_length + + _tags_type_length = len(tag["type"]) + 1 + if _tags_type_length > tags_type_length: + tags_type_length = _tags_type_length + + _tags_protected_length = len(str(tag["protected"])) + 1 + if _tags_protected_length > tags_protected_length: + tags_protected_length = _tags_protected_length + + if len(domain_information["tags"]) > 0: + ainformation.append("") + ainformation.append( + "{purple}Tags:{end} {bold}{tags_name: <{tags_name_length}} {tags_type: <{tags_type_length}} {tags_protected: <{tags_protected_length}}{end}".format( + purple=ansiprint.purple(), + bold=ansiprint.bold(), + end=ansiprint.end(), + tags_name_length=tags_name_length, + tags_type_length=tags_type_length, + tags_protected_length=tags_protected_length, + tags_name="Name", + tags_type="Type", + tags_protected="Protected", + ) + ) + + for tag in sorted( + domain_information["tags"], key=lambda t: t["type"] + t["name"] + ): + ainformation.append( + " {tags_name: <{tags_name_length}} {tags_type: <{tags_type_length}} {tags_protected: <{tags_protected_length}}".format( + tags_name_length=tags_name_length, + tags_type_length=tags_type_length, + tags_protected_length=tags_protected_length, + tags_name=tag["name"], + tags_type=tag["type"], + tags_protected=str(tag["protected"]), + ) + ) + else: + ainformation.append("") + ainformation.append( + "{purple}Tags:{end} N/A".format( + purple=ansiprint.purple(), + end=ansiprint.end(), + ) + ) + + # Network list + net_list = [] + cluster_net_list = call_api(config, "get", "/network").json() + for net in domain_information["networks"]: + net_vni = net["vni"] + if ( + net_vni not in ["cluster", "storage", "upstream"] + and not re.match(r"^macvtap:.*", net_vni) + and not re.match(r"^hostdev:.*", net_vni) + ): + if int(net_vni) not in [net["vni"] for net in cluster_net_list]: + net_list.append( + ansiprint.red() + net_vni + ansiprint.end() + " [invalid]" + ) + else: + net_list.append(net_vni) + else: + net_list.append(net_vni) + + ainformation.append("") + ainformation.append( + "{}Networks:{} {}".format( + ansiprint.purple(), ansiprint.end(), ", ".join(net_list) + ) + ) + + if long_output is True: + # Disk list + ainformation.append("") + name_length = 0 + for disk in domain_information["disks"]: + _name_length = len(disk["name"]) + 1 + if _name_length > name_length: + name_length = _name_length + ainformation.append( + "{0}Disks:{1} {2}ID Type {3: <{width}} Dev Bus Requests (r/w) Data (r/w){4}".format( + ansiprint.purple(), + ansiprint.end(), + ansiprint.bold(), + "Name", + ansiprint.end(), + width=name_length, + ) + ) + for disk in domain_information["disks"]: + ainformation.append( + " {0: <3} {1: <5} {2: <{width}} {3: <4} {4: <5} {5: <15} {6}".format( + domain_information["disks"].index(disk), + disk["type"], + disk["name"], + disk["dev"], + disk["bus"], + "/".join( + [ + str(format_metric(disk.get("rd_req", 0))), + str(format_metric(disk.get("wr_req", 0))), + ] + ), + "/".join( + [ + str(format_bytes(disk.get("rd_bytes", 0))), + str(format_bytes(disk.get("wr_bytes", 0))), + ] + ), + width=name_length, + ) + ) + ainformation.append("") + ainformation.append( + "{}Interfaces:{} {}ID Type Source Model MAC Data (r/w) Packets (r/w) Errors (r/w){}".format( + ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end() + ) + ) + for net in domain_information["networks"]: + net_type = net["type"] + net_source = net["source"] + net_mac = net["mac"] + if net_type in ["direct", "hostdev"]: + net_model = "N/A" + net_bytes = "N/A" + net_packets = "N/A" + net_errors = "N/A" + elif net_type in ["bridge"]: + net_model = net["model"] + net_bytes = "/".join( + [ + str(format_bytes(net.get("rd_bytes", 0))), + str(format_bytes(net.get("wr_bytes", 0))), + ] + ) + net_packets = "/".join( + [ + str(format_metric(net.get("rd_packets", 0))), + str(format_metric(net.get("wr_packets", 0))), + ] + ) + net_errors = "/".join( + [ + str(format_metric(net.get("rd_errors", 0))), + str(format_metric(net.get("wr_errors", 0))), + ] + ) + + ainformation.append( + " {0: <3} {1: <8} {2: <12} {3: <8} {4: <18} {5: <12} {6: <15} {7: <12}".format( + domain_information["networks"].index(net), + net_type, + net_source, + net_model, + net_mac, + net_bytes, + net_packets, + net_errors, + ) + ) + # Controller list + ainformation.append("") + ainformation.append( + "{}Controllers:{} {}ID Type Model{}".format( + ansiprint.purple(), ansiprint.end(), ansiprint.bold(), ansiprint.end() + ) + ) + for controller in domain_information["controllers"]: + ainformation.append( + " {0: <3} {1: <14} {2: <8}".format( + domain_information["controllers"].index(controller), + controller["type"], + str(controller["model"]), + ) + ) + + # Join it all together + ainformation.append("") + return "\n".join(ainformation) + + +def format_list(config, vm_list, raw): + # Function to strip the "br" off of nets and return a nicer list + def getNiceNetID(domain_information): + # Network list + net_list = [] + for net in domain_information["networks"]: + net_list.append(net["vni"]) + return net_list + + # Function to get tag names and returna nicer list + def getNiceTagName(domain_information): + # Tag list + tag_list = [] + for tag in sorted( + domain_information["tags"], key=lambda t: t["type"] + t["name"] + ): + tag_list.append(tag["name"]) + return tag_list + + # Handle raw mode since it just lists the names + if raw: + ainformation = list() + for vm in sorted(item["name"] for item in vm_list): + ainformation.append(vm) + return "\n".join(ainformation) + + vm_list_output = [] + + # Determine optimal column widths + # Dynamic columns: node_name, node, migrated + vm_name_length = 5 + vm_state_length = 6 + vm_tags_length = 5 + vm_nets_length = 9 + vm_ram_length = 8 + vm_vcpu_length = 6 + vm_node_length = 8 + vm_migrated_length = 9 + for domain_information in vm_list: + net_list = getNiceNetID(domain_information) + tag_list = getNiceTagName(domain_information) + # vm_name column + _vm_name_length = len(domain_information["name"]) + 1 + if _vm_name_length > vm_name_length: + vm_name_length = _vm_name_length + # vm_state column + _vm_state_length = len(domain_information["state"]) + 1 + if _vm_state_length > vm_state_length: + vm_state_length = _vm_state_length + # vm_tags column + _vm_tags_length = len(",".join(tag_list)) + 1 + if _vm_tags_length > vm_tags_length: + vm_tags_length = _vm_tags_length + # vm_nets column + _vm_nets_length = len(",".join(net_list)) + 1 + if _vm_nets_length > vm_nets_length: + vm_nets_length = _vm_nets_length + # vm_node column + _vm_node_length = len(domain_information["node"]) + 1 + if _vm_node_length > vm_node_length: + vm_node_length = _vm_node_length + # vm_migrated column + _vm_migrated_length = len(domain_information["migrated"]) + 1 + if _vm_migrated_length > vm_migrated_length: + vm_migrated_length = _vm_migrated_length + + # Format the string (header) + vm_list_output.append( + "{bold}{vm_header: <{vm_header_length}} {resource_header: <{resource_header_length}} {node_header: <{node_header_length}}{end_bold}".format( + vm_header_length=vm_name_length + vm_state_length + vm_tags_length + 2, + resource_header_length=vm_nets_length + vm_ram_length + vm_vcpu_length + 2, + node_header_length=vm_node_length + vm_migrated_length + 1, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + vm_header="VMs " + + "".join( + [ + "-" + for _ in range( + 4, vm_name_length + vm_state_length + vm_tags_length + 1 + ) + ] + ), + resource_header="Resources " + + "".join( + [ + "-" + for _ in range( + 10, vm_nets_length + vm_ram_length + vm_vcpu_length + 1 + ) + ] + ), + node_header="Node " + + "".join(["-" for _ in range(5, vm_node_length + vm_migrated_length)]), + ) + ) + + vm_list_output.append( + "{bold}{vm_name: <{vm_name_length}} \ +{vm_state_colour}{vm_state: <{vm_state_length}}{end_colour} \ +{vm_tags: <{vm_tags_length}} \ +{vm_networks: <{vm_nets_length}} \ +{vm_memory: <{vm_ram_length}} {vm_vcpu: <{vm_vcpu_length}} \ +{vm_node: <{vm_node_length}} \ +{vm_migrated: <{vm_migrated_length}}{end_bold}".format( + vm_name_length=vm_name_length, + vm_state_length=vm_state_length, + vm_tags_length=vm_tags_length, + vm_nets_length=vm_nets_length, + vm_ram_length=vm_ram_length, + vm_vcpu_length=vm_vcpu_length, + vm_node_length=vm_node_length, + vm_migrated_length=vm_migrated_length, + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + vm_state_colour="", + end_colour="", + vm_name="Name", + vm_state="State", + vm_tags="Tags", + vm_networks="Networks", + vm_memory="RAM (M)", + vm_vcpu="vCPUs", + vm_node="Current", + vm_migrated="Migrated", + ) + ) + + # Get a list of cluster networks for validity comparisons + cluster_net_list = call_api(config, "get", "/network").json() + + # Format the string (elements) + for domain_information in sorted(vm_list, key=lambda v: v["name"]): + if domain_information["state"] == "start": + vm_state_colour = ansiprint.green() + elif domain_information["state"] == "restart": + vm_state_colour = ansiprint.yellow() + elif domain_information["state"] == "shutdown": + vm_state_colour = ansiprint.yellow() + elif domain_information["state"] == "stop": + vm_state_colour = ansiprint.red() + elif domain_information["state"] == "fail": + vm_state_colour = ansiprint.red() + else: + vm_state_colour = ansiprint.blue() + + # Handle colouring for an invalid network config + net_list = getNiceNetID(domain_information) + tag_list = getNiceTagName(domain_information) + if len(tag_list) < 1: + tag_list = ["N/A"] + + net_invalid_list = [] + for net_vni in net_list: + if ( + net_vni not in ["cluster", "storage", "upstream"] + and not re.match(r"^macvtap:.*", net_vni) + and not re.match(r"^hostdev:.*", net_vni) + ): + if int(net_vni) not in [net["vni"] for net in cluster_net_list]: + net_invalid_list.append(True) + else: + net_invalid_list.append(False) + else: + net_invalid_list.append(False) + + net_string_list = [] + for net_idx, net_vni in enumerate(net_list): + if net_invalid_list[net_idx]: + net_string_list.append( + "{}{}{}".format( + ansiprint.red(), + net_vni, + ansiprint.end(), + ) + ) + # Fix the length due to the extra fake characters + vm_nets_length -= len(net_vni) + vm_nets_length += len(net_string_list[net_idx]) + else: + net_string_list.append(net_vni) + + vm_list_output.append( + "{bold}{vm_name: <{vm_name_length}} \ +{vm_state_colour}{vm_state: <{vm_state_length}}{end_colour} \ +{vm_tags: <{vm_tags_length}} \ +{vm_networks: <{vm_nets_length}} \ +{vm_memory: <{vm_ram_length}} {vm_vcpu: <{vm_vcpu_length}} \ +{vm_node: <{vm_node_length}} \ +{vm_migrated: <{vm_migrated_length}}{end_bold}".format( + vm_name_length=vm_name_length, + vm_state_length=vm_state_length, + vm_tags_length=vm_tags_length, + vm_nets_length=vm_nets_length, + vm_ram_length=vm_ram_length, + vm_vcpu_length=vm_vcpu_length, + vm_node_length=vm_node_length, + vm_migrated_length=vm_migrated_length, + bold="", + end_bold="", + vm_state_colour=vm_state_colour, + end_colour=ansiprint.end(), + vm_name=domain_information["name"], + vm_state=domain_information["state"], + vm_tags=",".join(tag_list), + vm_networks=",".join(net_string_list), + vm_memory=domain_information["memory"], + vm_vcpu=domain_information["vcpu"], + vm_node=domain_information["node"], + vm_migrated=domain_information["migrated"], + ) + ) + + return "\n".join(vm_list_output) diff --git a/cli-client-new/pvc/lib/zkhandler.py b/cli-client-new/pvc/lib/zkhandler.py new file mode 100644 index 00000000..b1437ce2 --- /dev/null +++ b/cli-client-new/pvc/lib/zkhandler.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 + +# zkhandler.py - Secure versioned ZooKeeper updates +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2022 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, version 3. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +import uuid + + +# Exists function +def exists(zk_conn, key): + stat = zk_conn.exists(key) + if stat: + return True + else: + return False + + +# Child list function +def listchildren(zk_conn, key): + children = zk_conn.get_children(key) + return children + + +# Delete key function +def deletekey(zk_conn, key, recursive=True): + zk_conn.delete(key, recursive=recursive) + + +# Data read function +def readdata(zk_conn, key): + data_raw = zk_conn.get(key) + data = data_raw[0].decode("utf8") + return data + + +# Data write function +def writedata(zk_conn, kv): + # Start up a transaction + zk_transaction = zk_conn.transaction() + + # Proceed one KV pair at a time + for key in sorted(kv): + data = kv[key] + + # Check if this key already exists or not + if not zk_conn.exists(key): + # We're creating a new key + zk_transaction.create(key, str(data).encode("utf8")) + else: + # We're updating a key with version validation + orig_data = zk_conn.get(key) + version = orig_data[1].version + + # Set what we expect the new version to be + new_version = version + 1 + + # Update the data + zk_transaction.set_data(key, str(data).encode("utf8")) + + # Set up the check + try: + zk_transaction.check(key, new_version) + except TypeError: + print('Zookeeper key "{}" does not match expected version'.format(key)) + return False + + # Commit the transaction + try: + zk_transaction.commit() + return True + except Exception: + return False + + +# Write lock function +def writelock(zk_conn, key): + lock_id = str(uuid.uuid1()) + lock = zk_conn.WriteLock("{}".format(key), lock_id) + return lock + + +# Read lock function +def readlock(zk_conn, key): + lock_id = str(uuid.uuid1()) + lock = zk_conn.ReadLock("{}".format(key), lock_id) + return lock diff --git a/cli-client-new/setup.py b/cli-client-new/setup.py new file mode 100644 index 00000000..b0af6b58 --- /dev/null +++ b/cli-client-new/setup.py @@ -0,0 +1,20 @@ +from setuptools import setup + +setup( + name="pvc", + version="0.9.63", + packages=["pvc", "pvc.lib"], + install_requires=[ + "Click", + "PyYAML", + "lxml", + "colorama", + "requests", + "requests-toolbelt", + ], + entry_points={ + "console_scripts": [ + "pvc = pvc.pvc:cli", + ], + }, +)