Compare commits

...

10 Commits

Author SHA1 Message Date
bb77c5f1fc Move lxml imports to runtime
Avoid loading these if unneeded
2025-03-12 23:50:12 -04:00
fc740927cc Switch to modern Python build system
Remove setuptools and use pyproject.toml instead.
2025-03-12 23:46:52 -04:00
34149fe933 Move multipart.encoder import to runtime
Another library with a ridiculously long load time.
2025-03-12 23:11:42 -04:00
a2fed1885c Remove distutils strtobool
This import takes a ridiculously long time just to implement a function
that can be done in one line in O(1) time.
2025-03-12 23:09:44 -04:00
ee055bdb81 Improve loading efficiency of common.py 2025-03-12 22:55:11 -04:00
60967b5606 Fix formatters colour bug for mirror state 2025-03-02 14:58:26 -05:00
89bfbe1fd8 Add translation of domain UUIDs to names
Allows frontends to better handle the domain list gracefully, as humans
don't care about the UUIDs.
2025-02-28 21:52:42 -05:00
be092756a9 Add cluster name to Zookeeper and log+API output 2025-02-27 00:57:07 -05:00
387fcfdf6b Bump version to 0.9.107 2025-02-10 23:15:21 -05:00
d695e855f9 Catch errors if snapshot fails to remove
A missing snapshot could cause an exception here which would break the
entire autobackup run. Catch the exception and continue on as this
should never be a fatal situation.
2025-02-10 16:33:44 -05:00
22 changed files with 115 additions and 80 deletions

1
.gitignore vendored
View File

@@ -8,3 +8,4 @@ debian/pvc-*/
debian/*.log debian/*.log
debian/*.substvars debian/*.substvars
debian/files debian/files
client-cli/build/

View File

@@ -1 +1 @@
0.9.106 0.9.107

View File

@@ -1,5 +1,9 @@
## PVC Changelog ## PVC Changelog
###### [v0.9.107](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.107)
* [Worker Daemon] Fixes a bug where snapshot removal fails during autobackups
###### [v0.9.106](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.106) ###### [v0.9.106](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.106)
* [API Daemon] Fixes a calculation bug when checking storage free space * [API Daemon] Fixes a calculation bug when checking storage free space

View File

@@ -81,6 +81,7 @@ def create_app():
print("|--------------------------------------------------------------|") print("|--------------------------------------------------------------|")
print("| Parallel Virtual Cluster API daemon v{0: <23} |".format(version)) print("| Parallel Virtual Cluster API daemon v{0: <23} |".format(version))
print("| Debug: {0: <53} |".format(str(config["debug"]))) print("| Debug: {0: <53} |".format(str(config["debug"])))
print("| Cluster: {0: <51} |".format(config["cluster_name"]))
print("| API version: v{0: <46} |".format(API_VERSION)) print("| API version: v{0: <46} |".format(API_VERSION))
print( print(
"| Listen: {0: <52} |".format( "| Listen: {0: <52} |".format(

View File

@@ -25,7 +25,6 @@ from functools import wraps
from json import dump as jdump from json import dump as jdump
from json import dumps as jdumps from json import dumps as jdumps
from json import loads as jloads from json import loads as jloads
from lxml.etree import fromstring, tostring
from os import environ, makedirs, path from os import environ, makedirs, path
from re import sub, match from re import sub, match
from yaml import load as yload from yaml import load as yload
@@ -1192,6 +1191,8 @@ def cli_vm_define(
# Verify our XML is sensible # Verify our XML is sensible
try: try:
from lxml.etree import fromstring, tostring
xml_data = fromstring(vmconfig_data) xml_data = fromstring(vmconfig_data)
new_cfg = tostring(xml_data, pretty_print=True).decode("utf8") new_cfg = tostring(xml_data, pretty_print=True).decode("utf8")
except Exception: except Exception:
@@ -1377,6 +1378,9 @@ def cli_vm_modify(
# Grab the current config # Grab the current config
current_vm_cfg_raw = vm_information.get("xml") current_vm_cfg_raw = vm_information.get("xml")
from lxml.etree import fromstring, tostring
xml_data = fromstring(current_vm_cfg_raw) xml_data = fromstring(current_vm_cfg_raw)
current_vm_cfgfile = tostring(xml_data, pretty_print=True).decode("utf8").strip() current_vm_cfgfile = tostring(xml_data, pretty_print=True).decode("utf8").strip()
@@ -1435,6 +1439,8 @@ def cli_vm_modify(
# Verify our XML is sensible # Verify our XML is sensible
try: try:
from lxml.etree import fromstring, tostring
xml_data = fromstring(new_vm_cfgfile) xml_data = fromstring(new_vm_cfgfile)
new_cfg = tostring(xml_data, pretty_print=True).decode("utf8") new_cfg = tostring(xml_data, pretty_print=True).decode("utf8")
except Exception as e: except Exception as e:
@@ -3265,6 +3271,9 @@ def cli_vm_dump(filename, domain):
finish(False, 'ERROR: Could not find VM "{}"!'.format(domain)) finish(False, 'ERROR: Could not find VM "{}"!'.format(domain))
current_vm_cfg_raw = retdata.get("xml") current_vm_cfg_raw = retdata.get("xml")
from lxml.etree import fromstring, tostring
xml_data = fromstring(current_vm_cfg_raw) xml_data = fromstring(current_vm_cfg_raw)
current_vm_cfgfile = tostring(xml_data, pretty_print=True).decode("utf8") current_vm_cfgfile = tostring(xml_data, pretty_print=True).decode("utf8")
xml = current_vm_cfgfile.strip() xml = current_vm_cfgfile.strip()

View File

@@ -221,7 +221,7 @@ def cli_cluster_status_format_pretty(CLI_CONFIG, data):
continue continue
if state in ["start"]: if state in ["start"]:
state_colour = ansii["green"] state_colour = ansii["green"]
elif state in ["migrate", "disable", "provision", "mirror"]: elif state in ["migrate", "disable", "provision"]:
state_colour = ansii["blue"] state_colour = ansii["blue"]
elif state in ["mirror"]: elif state in ["mirror"]:
state_colour = ansii["purple"] state_colour = ansii["purple"]

View File

@@ -20,7 +20,6 @@
############################################################################### ###############################################################################
from click import echo as click_echo from click import echo as click_echo
from distutils.util import strtobool
from json import load as jload from json import load as jload
from json import dump as jdump from json import dump as jdump
from os import chmod, environ, getpid, path, get_terminal_size from os import chmod, environ, getpid, path, get_terminal_size
@@ -150,9 +149,7 @@ def get_config(store_data, connection=None):
if connection == "local": if connection == "local":
config["verify_ssl"] = False config["verify_ssl"] = False
else: else:
config["verify_ssl"] = bool( config["verify_ssl"] = environ.get("PVC_CLIENT_VERIFY_SSL", "True") == "True"
strtobool(environ.get("PVC_CLIENT_VERIFY_SSL", "True"))
)
return config return config

View File

@@ -19,12 +19,13 @@
# #
############################################################################### ###############################################################################
import os
import math
import time
import requests
import click
from ast import literal_eval from ast import literal_eval
from click import echo, progressbar
from math import ceil
from os.path import getsize
from requests import get, post, put, patch, delete, Response
from requests.exceptions import ConnectionError
from time import time
from urllib3 import disable_warnings from urllib3 import disable_warnings
@@ -39,7 +40,7 @@ def format_bytes(size_bytes):
} }
human_bytes = "0B" human_bytes = "0B"
for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get): for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get):
formatted_bytes = int(math.ceil(size_bytes / byte_unit_matrix[unit])) formatted_bytes = int(ceil(size_bytes / byte_unit_matrix[unit]))
if formatted_bytes < 10000: if formatted_bytes < 10000:
human_bytes = "{}{}".format(formatted_bytes, unit) human_bytes = "{}{}".format(formatted_bytes, unit)
break break
@@ -57,7 +58,7 @@ def format_metric(integer):
} }
human_integer = "0" human_integer = "0"
for unit in sorted(integer_unit_matrix, key=integer_unit_matrix.get): for unit in sorted(integer_unit_matrix, key=integer_unit_matrix.get):
formatted_integer = int(math.ceil(integer / integer_unit_matrix[unit])) formatted_integer = int(ceil(integer / integer_unit_matrix[unit]))
if formatted_integer < 10000: if formatted_integer < 10000:
human_integer = "{}{}".format(formatted_integer, unit) human_integer = "{}{}".format(formatted_integer, unit)
break break
@@ -97,12 +98,12 @@ def format_age(age_secs):
class UploadProgressBar(object): class UploadProgressBar(object):
def __init__(self, filename, end_message="", end_nl=True): def __init__(self, filename, end_message="", end_nl=True):
file_size = os.path.getsize(filename) file_size = getsize(filename)
file_size_human = format_bytes(file_size) file_size_human = format_bytes(file_size)
click.echo("Uploading file (total size {})...".format(file_size_human)) echo("Uploading file (total size {})...".format(file_size_human))
self.length = file_size self.length = file_size
self.time_last = int(round(time.time() * 1000)) - 1000 self.time_last = int(round(time() * 1000)) - 1000
self.bytes_last = 0 self.bytes_last = 0
self.bytes_diff = 0 self.bytes_diff = 0
self.is_end = False self.is_end = False
@@ -114,7 +115,7 @@ class UploadProgressBar(object):
else: else:
self.end_suffix = "" self.end_suffix = ""
self.bar = click.progressbar(length=self.length, width=20, show_eta=True) self.bar = progressbar(length=self.length, width=20, show_eta=True)
def update(self, monitor): def update(self, monitor):
bytes_cur = monitor.bytes_read bytes_cur = monitor.bytes_read
@@ -123,7 +124,7 @@ class UploadProgressBar(object):
self.is_end = True self.is_end = True
self.bytes_last = bytes_cur self.bytes_last = bytes_cur
time_cur = int(round(time.time() * 1000)) time_cur = int(round(time() * 1000))
if (time_cur - 1000) > self.time_last: if (time_cur - 1000) > self.time_last:
self.time_last = time_cur self.time_last = time_cur
self.bar.update(self.bytes_diff) self.bar.update(self.bytes_diff)
@@ -132,13 +133,13 @@ class UploadProgressBar(object):
if self.is_end: if self.is_end:
self.bar.update(self.bytes_diff) self.bar.update(self.bytes_diff)
self.bytes_diff = 0 self.bytes_diff = 0
click.echo() echo()
click.echo() echo()
if self.end_message: if self.end_message:
click.echo(self.end_message + self.end_suffix, nl=self.end_nl) echo(self.end_message + self.end_suffix, nl=self.end_nl)
class ErrorResponse(requests.Response): class ErrorResponse(Response):
def __init__(self, json_data, status_code, headers): def __init__(self, json_data, status_code, headers):
self.json_data = json_data self.json_data = json_data
self.status_code = status_code self.status_code = status_code
@@ -178,7 +179,7 @@ def call_api(
for i in range(3): for i in range(3):
failed = False failed = False
try: try:
response = requests.get( response = get(
uri, uri,
timeout=timeout, timeout=timeout,
headers=headers, headers=headers,
@@ -190,16 +191,14 @@ def call_api(
failed = True failed = True
continue continue
break break
except requests.exceptions.ConnectionError: except ConnectionError:
failed = True failed = True
continue continue
if failed: if failed:
error = f"Code {response.status_code}" if response else "Timeout" error = f"Code {response.status_code}" if response else "Timeout"
raise requests.exceptions.ConnectionError( raise ConnectionError(f"Failed to connect after 3 tries ({error})")
f"Failed to connect after 3 tries ({error})"
)
if operation == "post": if operation == "post":
response = requests.post( response = post(
uri, uri,
timeout=timeout, timeout=timeout,
headers=headers, headers=headers,
@@ -209,7 +208,7 @@ def call_api(
verify=config["verify_ssl"], verify=config["verify_ssl"],
) )
if operation == "put": if operation == "put":
response = requests.put( response = put(
uri, uri,
timeout=timeout, timeout=timeout,
headers=headers, headers=headers,
@@ -219,7 +218,7 @@ def call_api(
verify=config["verify_ssl"], verify=config["verify_ssl"],
) )
if operation == "patch": if operation == "patch":
response = requests.patch( response = patch(
uri, uri,
timeout=timeout, timeout=timeout,
headers=headers, headers=headers,
@@ -228,7 +227,7 @@ def call_api(
verify=config["verify_ssl"], verify=config["verify_ssl"],
) )
if operation == "delete": if operation == "delete":
response = requests.delete( response = patch, delete(
uri, uri,
timeout=timeout, timeout=timeout,
headers=headers, headers=headers,
@@ -243,10 +242,10 @@ def call_api(
# Display debug output # Display debug output
if config["debug"]: if config["debug"]:
click.echo("API endpoint: {}".format(uri), err=True) echo("API endpoint: {}".format(uri), err=True)
click.echo("Response code: {}".format(response.status_code), err=True) echo("Response code: {}".format(response.status_code), err=True)
click.echo("Response headers: {}".format(response.headers), err=True) echo("Response headers: {}".format(response.headers), err=True)
click.echo(err=True) echo(err=True)
# Return the response object # Return the response object
return response return response

View File

@@ -19,11 +19,6 @@
# #
############################################################################### ###############################################################################
from requests_toolbelt.multipart.encoder import (
MultipartEncoder,
MultipartEncoderMonitor,
)
import pvc.lib.ansiprint as ansiprint import pvc.lib.ansiprint as ansiprint
from pvc.lib.common import UploadProgressBar, call_api, get_wait_retdata from pvc.lib.common import UploadProgressBar, call_api, get_wait_retdata
@@ -549,6 +544,12 @@ def ova_upload(config, name, ova_file, params):
bar = UploadProgressBar( bar = UploadProgressBar(
ova_file, end_message="Parsing file on remote side...", end_nl=False ova_file, end_message="Parsing file on remote side...", end_nl=False
) )
from requests_toolbelt.multipart.encoder import (
MultipartEncoder,
MultipartEncoderMonitor,
)
upload_data = MultipartEncoder( upload_data = MultipartEncoder(
fields={"file": ("filename", open(ova_file, "rb"), "application/octet-stream")} fields={"file": ("filename", open(ova_file, "rb"), "application/octet-stream")}
) )

View File

@@ -23,10 +23,6 @@ import math
from os import path from os import path
from json import loads from json import loads
from requests_toolbelt.multipart.encoder import (
MultipartEncoder,
MultipartEncoderMonitor,
)
import pvc.lib.ansiprint as ansiprint import pvc.lib.ansiprint as ansiprint
from pvc.lib.common import UploadProgressBar, call_api, get_wait_retdata from pvc.lib.common import UploadProgressBar, call_api, get_wait_retdata
@@ -1212,6 +1208,12 @@ def ceph_volume_upload(config, pool, volume, image_format, image_file):
bar = UploadProgressBar( bar = UploadProgressBar(
image_file, end_message="Parsing file on remote side...", end_nl=False image_file, end_message="Parsing file on remote side...", end_nl=False
) )
from requests_toolbelt.multipart.encoder import (
MultipartEncoder,
MultipartEncoderMonitor,
)
upload_data = MultipartEncoder( upload_data = MultipartEncoder(
fields={ fields={
"file": ("filename", open(image_file, "rb"), "application/octet-stream") "file": ("filename", open(image_file, "rb"), "application/octet-stream")

21
client-cli/pyproject.toml Normal file
View File

@@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "pvc"
version = "0.9.107"
dependencies = [
"Click",
"PyYAML",
"lxml",
"colorama",
"requests",
"requests-toolbelt",
]
[tool.setuptools]
packages = ["pvc.cli", "pvc.lib"]
[project.scripts]
pvc = "pvc.cli.cli:cli"

View File

@@ -1,20 +0,0 @@
from setuptools import setup
setup(
name="pvc",
version="0.9.106",
packages=["pvc.cli", "pvc.lib"],
install_requires=[
"Click",
"PyYAML",
"lxml",
"colorama",
"requests",
"requests-toolbelt",
],
entry_points={
"console_scripts": [
"pvc = pvc.cli.cli:cli",
],
},
)

View File

@@ -469,15 +469,16 @@ def run_vm_backup(zkhandler, celery, config, vm_detail, force_full=False):
if len(marked_for_deletion) > 0: if len(marked_for_deletion) > 0:
for backup_to_delete in marked_for_deletion: for backup_to_delete in marked_for_deletion:
ret = vm.vm_worker_remove_snapshot( try:
zkhandler, None, vm_name, backup_to_delete["snapshot_name"] ret = vm.vm_worker_remove_snapshot(
) zkhandler, None, vm_name, backup_to_delete["snapshot_name"]
if ret is False: )
error_message = f"Failed to remove obsolete backup snapshot '{backup_to_delete['snapshot_name']}', leaving in tracked backups" except Exception:
error_message = f"Failed to remove obsolete backup snapshot '{backup_to_delete['snapshot_name']}', removing from tracked backups anyways"
log_err(celery, error_message) log_err(celery, error_message)
else:
rmtree(f"{vm_backup_path}/{backup_to_delete['snapshot_name']}") rmtree(f"{vm_backup_path}/{backup_to_delete['snapshot_name']}")
tracked_backups.remove(backup_to_delete) tracked_backups.remove(backup_to_delete)
tracked_backups = update_tracked_backups() tracked_backups = update_tracked_backups()
return tracked_backups return tracked_backups

View File

@@ -496,6 +496,7 @@ def getClusterInformation(zkhandler):
# Format the status data # Format the status data
cluster_information = { cluster_information = {
"cluster_name": zkhandler.read("base.config"),
"cluster_health": getClusterHealthFromFaults(zkhandler, faults_data), "cluster_health": getClusterHealthFromFaults(zkhandler, faults_data),
"node_health": getNodeHealth(zkhandler, node_list), "node_health": getNodeHealth(zkhandler, node_list),
"maintenance": maintenance_state, "maintenance": maintenance_state,

View File

@@ -1212,3 +1212,7 @@ def get_detect_device(detect_string):
return device return device
else: else:
return None return None
def translate_domains_to_names(zkhandler, domain_list):
return list(zkhandler.read_many([("domain.name", d) for d in domain_list]))

View File

@@ -142,7 +142,9 @@ def getNodeInformation(zkhandler, node_name):
node_mem_free = int(_node_mem_free) node_mem_free = int(_node_mem_free)
node_load = float(_node_load) node_load = float(_node_load)
node_domains_count = int(_node_domains_count) node_domains_count = int(_node_domains_count)
node_running_domains = _node_running_domains.split() node_running_domains = common.translate_domains_to_names(
zkhandler, _node_running_domains.split()
)
try: try:
node_health = int(_node_health) node_health = int(_node_health)

6
debian/changelog vendored
View File

@@ -1,3 +1,9 @@
pvc (0.9.107-0) unstable; urgency=high
* [Worker Daemon] Fixes a bug where snapshot removal fails during autobackups
-- Joshua M. Boniface <joshua@boniface.me> Mon, 10 Feb 2025 23:15:21 -0500
pvc (0.9.106-0) unstable; urgency=high pvc (0.9.106-0) unstable; urgency=high
* [API Daemon] Fixes a calculation bug when checking storage free space * [API Daemon] Fixes a calculation bug when checking storage free space

2
debian/compat vendored
View File

@@ -1 +1 @@
9 13

5
debian/rules vendored
View File

@@ -7,13 +7,14 @@ export DH_VERBOSE = 1
dh $@ --with python3 dh $@ --with python3
override_dh_python3: override_dh_python3:
cd $(CURDIR)/client-cli; pybuild --system=distutils --dest-dir=../debian/pvc-client-cli/ cd $(CURDIR)/client-cli; pybuild --system=pyproject --dest-dir=../debian/pvc-client-cli/
mkdir -p debian/pvc-client-cli/usr/lib/python3 mkdir -p debian/pvc-client-cli/usr/lib/python3
mv debian/pvc-client-cli/usr/lib/python3*/* debian/pvc-client-cli/usr/lib/python3/ mv debian/pvc-client-cli/usr/lib/python3*/* debian/pvc-client-cli/usr/lib/python3/
rm -r $(CURDIR)/client-cli/.pybuild $(CURDIR)/client-cli/pvc.egg-info rm -r $(CURDIR)/client-cli/.pybuild $(CURDIR)/client-cli/pvc.egg-info
override_dh_auto_clean: override_dh_auto_clean:
find . -name "__pycache__" -o -name ".pybuild" -exec rm -fr {} + || true find $(CURDIR) -name "__pycache__" -o -name ".pybuild" -exec rm -fr {} + || true
rm -r $(CURDIR)/client-cli/build
# If you need to rebuild the Sphinx documentation # If you need to rebuild the Sphinx documentation
# Add spinxdoc to the dh --with line # Add spinxdoc to the dh --with line

View File

@@ -33,7 +33,7 @@ import os
import signal import signal
# Daemon version # Daemon version
version = "0.9.106" version = "0.9.107"
########################################################## ##########################################################
@@ -64,6 +64,7 @@ def entrypoint():
logger.out("|--------------------------------------------------------------|") logger.out("|--------------------------------------------------------------|")
logger.out("| Parallel Virtual Cluster health daemon v{0: <20} |".format(version)) logger.out("| Parallel Virtual Cluster health daemon v{0: <20} |".format(version))
logger.out("| Debug: {0: <53} |".format(str(config["debug"]))) logger.out("| Debug: {0: <53} |".format(str(config["debug"])))
logger.out("| Cluster: {0: <51} |".format(config["cluster_name"]))
logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"])) logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"]))
logger.out("| Host: {0: <54} |".format(config["node_hostname"])) logger.out("| Host: {0: <54} |".format(config["node_hostname"]))
logger.out("| ID: {0: <56} |".format(config["node_id"])) logger.out("| ID: {0: <56} |".format(config["node_id"]))

View File

@@ -49,7 +49,7 @@ import re
import json import json
# Daemon version # Daemon version
version = "0.9.106" version = "0.9.107"
########################################################## ##########################################################
@@ -83,6 +83,7 @@ def entrypoint():
logger.out("|--------------------------------------------------------------|") logger.out("|--------------------------------------------------------------|")
logger.out("| Parallel Virtual Cluster node daemon v{0: <22} |".format(version)) logger.out("| Parallel Virtual Cluster node daemon v{0: <22} |".format(version))
logger.out("| Debug: {0: <53} |".format(str(config["debug"]))) logger.out("| Debug: {0: <53} |".format(str(config["debug"])))
logger.out("| Cluster: {0: <51} |".format(config["cluster_name"]))
logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"])) logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"]))
logger.out("| Host: {0: <54} |".format(config["node_hostname"])) logger.out("| Host: {0: <54} |".format(config["node_hostname"]))
logger.out("| ID: {0: <56} |".format(config["node_id"])) logger.out("| ID: {0: <56} |".format(config["node_id"]))
@@ -301,6 +302,9 @@ def entrypoint():
# Set up this node in Zookeeper # Set up this node in Zookeeper
pvcnoded.util.zookeeper.setup_node(logger, config, zkhandler) pvcnoded.util.zookeeper.setup_node(logger, config, zkhandler)
# Set the cluster name in Zookeeper
zkhandler.write([("base.config", config["cluster_name"])])
# Check that the primary node key exists and create it with us as primary if not # Check that the primary node key exists and create it with us as primary if not
try: try:
current_primary = zkhandler.read("base.config.primary_node") current_primary = zkhandler.read("base.config.primary_node")

View File

@@ -58,7 +58,7 @@ from daemon_lib.automirror import (
) )
# Daemon version # Daemon version
version = "0.9.106" version = "0.9.107"
config = cfg.get_configuration() config = cfg.get_configuration()