Compare commits

..

13 Commits

Author SHA1 Message Date
bb77c5f1fc Move lxml imports to runtime
Avoid loading these if unneeded
2025-03-12 23:50:12 -04:00
fc740927cc Switch to modern Python build system
Remove setuptools and use pyproject.toml instead.
2025-03-12 23:46:52 -04:00
34149fe933 Move multipart.encoder import to runtime
Another library with a ridiculously long load time.
2025-03-12 23:11:42 -04:00
a2fed1885c Remove distutils strtobool
This import takes a ridiculously long time just to implement a function
that can be done in one line in O(1) time.
2025-03-12 23:09:44 -04:00
ee055bdb81 Improve loading efficiency of common.py 2025-03-12 22:55:11 -04:00
60967b5606 Fix formatters colour bug for mirror state 2025-03-02 14:58:26 -05:00
89bfbe1fd8 Add translation of domain UUIDs to names
Allows frontends to better handle the domain list gracefully, as humans
don't care about the UUIDs.
2025-02-28 21:52:42 -05:00
be092756a9 Add cluster name to Zookeeper and log+API output 2025-02-27 00:57:07 -05:00
387fcfdf6b Bump version to 0.9.107 2025-02-10 23:15:21 -05:00
d695e855f9 Catch errors if snapshot fails to remove
A missing snapshot could cause an exception here which would break the
entire autobackup run. Catch the exception and continue on as this
should never be a fatal situation.
2025-02-10 16:33:44 -05:00
309b203f5d Bump version to 0.9.106 2024-12-09 16:45:10 -05:00
de4241161e Use stored_bytes in utilization calculation
used_bytes is a broken value since it does not take object replicas into
account, thus throwing off these calculations. Use stored_bytes instead
which properly represents this value.
2024-12-09 16:43:43 -05:00
1950d22876 Correct indentation 2024-11-26 13:38:51 -05:00
23 changed files with 132 additions and 87 deletions

1
.gitignore vendored
View File

@@ -8,3 +8,4 @@ debian/pvc-*/
debian/*.log
debian/*.substvars
debian/files
client-cli/build/

View File

@@ -1 +1 @@
0.9.105
0.9.107

View File

@@ -1,5 +1,13 @@
## PVC Changelog
###### [v0.9.107](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.107)
* [Worker Daemon] Fixes a bug where snapshot removal fails during autobackups
###### [v0.9.106](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.106)
* [API Daemon] Fixes a calculation bug when checking storage free space
###### [v0.9.105](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.105)
* [API Daemon/Provisioner] Corrects some small bugs with OVA handling

View File

@@ -81,6 +81,7 @@ def create_app():
print("|--------------------------------------------------------------|")
print("| Parallel Virtual Cluster API daemon v{0: <23} |".format(version))
print("| Debug: {0: <53} |".format(str(config["debug"])))
print("| Cluster: {0: <51} |".format(config["cluster_name"]))
print("| API version: v{0: <46} |".format(API_VERSION))
print(
"| Listen: {0: <52} |".format(

View File

@@ -25,7 +25,6 @@ from functools import wraps
from json import dump as jdump
from json import dumps as jdumps
from json import loads as jloads
from lxml.etree import fromstring, tostring
from os import environ, makedirs, path
from re import sub, match
from yaml import load as yload
@@ -1192,6 +1191,8 @@ def cli_vm_define(
# Verify our XML is sensible
try:
from lxml.etree import fromstring, tostring
xml_data = fromstring(vmconfig_data)
new_cfg = tostring(xml_data, pretty_print=True).decode("utf8")
except Exception:
@@ -1377,6 +1378,9 @@ def cli_vm_modify(
# Grab the current config
current_vm_cfg_raw = vm_information.get("xml")
from lxml.etree import fromstring, tostring
xml_data = fromstring(current_vm_cfg_raw)
current_vm_cfgfile = tostring(xml_data, pretty_print=True).decode("utf8").strip()
@@ -1435,6 +1439,8 @@ def cli_vm_modify(
# Verify our XML is sensible
try:
from lxml.etree import fromstring, tostring
xml_data = fromstring(new_vm_cfgfile)
new_cfg = tostring(xml_data, pretty_print=True).decode("utf8")
except Exception as e:
@@ -3265,6 +3271,9 @@ def cli_vm_dump(filename, domain):
finish(False, 'ERROR: Could not find VM "{}"!'.format(domain))
current_vm_cfg_raw = retdata.get("xml")
from lxml.etree import fromstring, tostring
xml_data = fromstring(current_vm_cfg_raw)
current_vm_cfgfile = tostring(xml_data, pretty_print=True).decode("utf8")
xml = current_vm_cfgfile.strip()

View File

@@ -221,7 +221,7 @@ def cli_cluster_status_format_pretty(CLI_CONFIG, data):
continue
if state in ["start"]:
state_colour = ansii["green"]
elif state in ["migrate", "disable", "provision", "mirror"]:
elif state in ["migrate", "disable", "provision"]:
state_colour = ansii["blue"]
elif state in ["mirror"]:
state_colour = ansii["purple"]
@@ -331,7 +331,7 @@ def cli_cluster_status_format_short(CLI_CONFIG, data):
)
)
messages = "\n ".join(message_list)
messages = "\n ".join(message_list)
else:
messages = "None"
output.append(f"{ansii['purple']}Active faults:{ansii['end']} {messages}")

View File

@@ -20,7 +20,6 @@
###############################################################################
from click import echo as click_echo
from distutils.util import strtobool
from json import load as jload
from json import dump as jdump
from os import chmod, environ, getpid, path, get_terminal_size
@@ -150,9 +149,7 @@ def get_config(store_data, connection=None):
if connection == "local":
config["verify_ssl"] = False
else:
config["verify_ssl"] = bool(
strtobool(environ.get("PVC_CLIENT_VERIFY_SSL", "True"))
)
config["verify_ssl"] = environ.get("PVC_CLIENT_VERIFY_SSL", "True") == "True"
return config

View File

@@ -19,12 +19,13 @@
#
###############################################################################
import os
import math
import time
import requests
import click
from ast import literal_eval
from click import echo, progressbar
from math import ceil
from os.path import getsize
from requests import get, post, put, patch, delete, Response
from requests.exceptions import ConnectionError
from time import time
from urllib3 import disable_warnings
@@ -39,7 +40,7 @@ def format_bytes(size_bytes):
}
human_bytes = "0B"
for unit in sorted(byte_unit_matrix, key=byte_unit_matrix.get):
formatted_bytes = int(math.ceil(size_bytes / byte_unit_matrix[unit]))
formatted_bytes = int(ceil(size_bytes / byte_unit_matrix[unit]))
if formatted_bytes < 10000:
human_bytes = "{}{}".format(formatted_bytes, unit)
break
@@ -57,7 +58,7 @@ def format_metric(integer):
}
human_integer = "0"
for unit in sorted(integer_unit_matrix, key=integer_unit_matrix.get):
formatted_integer = int(math.ceil(integer / integer_unit_matrix[unit]))
formatted_integer = int(ceil(integer / integer_unit_matrix[unit]))
if formatted_integer < 10000:
human_integer = "{}{}".format(formatted_integer, unit)
break
@@ -97,12 +98,12 @@ def format_age(age_secs):
class UploadProgressBar(object):
def __init__(self, filename, end_message="", end_nl=True):
file_size = os.path.getsize(filename)
file_size = getsize(filename)
file_size_human = format_bytes(file_size)
click.echo("Uploading file (total size {})...".format(file_size_human))
echo("Uploading file (total size {})...".format(file_size_human))
self.length = file_size
self.time_last = int(round(time.time() * 1000)) - 1000
self.time_last = int(round(time() * 1000)) - 1000
self.bytes_last = 0
self.bytes_diff = 0
self.is_end = False
@@ -114,7 +115,7 @@ class UploadProgressBar(object):
else:
self.end_suffix = ""
self.bar = click.progressbar(length=self.length, width=20, show_eta=True)
self.bar = progressbar(length=self.length, width=20, show_eta=True)
def update(self, monitor):
bytes_cur = monitor.bytes_read
@@ -123,7 +124,7 @@ class UploadProgressBar(object):
self.is_end = True
self.bytes_last = bytes_cur
time_cur = int(round(time.time() * 1000))
time_cur = int(round(time() * 1000))
if (time_cur - 1000) > self.time_last:
self.time_last = time_cur
self.bar.update(self.bytes_diff)
@@ -132,13 +133,13 @@ class UploadProgressBar(object):
if self.is_end:
self.bar.update(self.bytes_diff)
self.bytes_diff = 0
click.echo()
click.echo()
echo()
echo()
if self.end_message:
click.echo(self.end_message + self.end_suffix, nl=self.end_nl)
echo(self.end_message + self.end_suffix, nl=self.end_nl)
class ErrorResponse(requests.Response):
class ErrorResponse(Response):
def __init__(self, json_data, status_code, headers):
self.json_data = json_data
self.status_code = status_code
@@ -178,7 +179,7 @@ def call_api(
for i in range(3):
failed = False
try:
response = requests.get(
response = get(
uri,
timeout=timeout,
headers=headers,
@@ -190,16 +191,14 @@ def call_api(
failed = True
continue
break
except requests.exceptions.ConnectionError:
except ConnectionError:
failed = True
continue
if failed:
error = f"Code {response.status_code}" if response else "Timeout"
raise requests.exceptions.ConnectionError(
f"Failed to connect after 3 tries ({error})"
)
raise ConnectionError(f"Failed to connect after 3 tries ({error})")
if operation == "post":
response = requests.post(
response = post(
uri,
timeout=timeout,
headers=headers,
@@ -209,7 +208,7 @@ def call_api(
verify=config["verify_ssl"],
)
if operation == "put":
response = requests.put(
response = put(
uri,
timeout=timeout,
headers=headers,
@@ -219,7 +218,7 @@ def call_api(
verify=config["verify_ssl"],
)
if operation == "patch":
response = requests.patch(
response = patch(
uri,
timeout=timeout,
headers=headers,
@@ -228,7 +227,7 @@ def call_api(
verify=config["verify_ssl"],
)
if operation == "delete":
response = requests.delete(
response = patch, delete(
uri,
timeout=timeout,
headers=headers,
@@ -243,10 +242,10 @@ def call_api(
# Display debug output
if config["debug"]:
click.echo("API endpoint: {}".format(uri), err=True)
click.echo("Response code: {}".format(response.status_code), err=True)
click.echo("Response headers: {}".format(response.headers), err=True)
click.echo(err=True)
echo("API endpoint: {}".format(uri), err=True)
echo("Response code: {}".format(response.status_code), err=True)
echo("Response headers: {}".format(response.headers), err=True)
echo(err=True)
# Return the response object
return response

View File

@@ -19,11 +19,6 @@
#
###############################################################################
from requests_toolbelt.multipart.encoder import (
MultipartEncoder,
MultipartEncoderMonitor,
)
import pvc.lib.ansiprint as ansiprint
from pvc.lib.common import UploadProgressBar, call_api, get_wait_retdata
@@ -549,6 +544,12 @@ def ova_upload(config, name, ova_file, params):
bar = UploadProgressBar(
ova_file, end_message="Parsing file on remote side...", end_nl=False
)
from requests_toolbelt.multipart.encoder import (
MultipartEncoder,
MultipartEncoderMonitor,
)
upload_data = MultipartEncoder(
fields={"file": ("filename", open(ova_file, "rb"), "application/octet-stream")}
)

View File

@@ -23,10 +23,6 @@ import math
from os import path
from json import loads
from requests_toolbelt.multipart.encoder import (
MultipartEncoder,
MultipartEncoderMonitor,
)
import pvc.lib.ansiprint as ansiprint
from pvc.lib.common import UploadProgressBar, call_api, get_wait_retdata
@@ -1212,6 +1208,12 @@ def ceph_volume_upload(config, pool, volume, image_format, image_file):
bar = UploadProgressBar(
image_file, end_message="Parsing file on remote side...", end_nl=False
)
from requests_toolbelt.multipart.encoder import (
MultipartEncoder,
MultipartEncoderMonitor,
)
upload_data = MultipartEncoder(
fields={
"file": ("filename", open(image_file, "rb"), "application/octet-stream")

21
client-cli/pyproject.toml Normal file
View File

@@ -0,0 +1,21 @@
[build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "pvc"
version = "0.9.107"
dependencies = [
"Click",
"PyYAML",
"lxml",
"colorama",
"requests",
"requests-toolbelt",
]
[tool.setuptools]
packages = ["pvc.cli", "pvc.lib"]
[project.scripts]
pvc = "pvc.cli.cli:cli"

View File

@@ -1,20 +0,0 @@
from setuptools import setup
setup(
name="pvc",
version="0.9.105",
packages=["pvc.cli", "pvc.lib"],
install_requires=[
"Click",
"PyYAML",
"lxml",
"colorama",
"requests",
"requests-toolbelt",
],
entry_points={
"console_scripts": [
"pvc = pvc.cli.cli:cli",
],
},
)

View File

@@ -469,15 +469,16 @@ def run_vm_backup(zkhandler, celery, config, vm_detail, force_full=False):
if len(marked_for_deletion) > 0:
for backup_to_delete in marked_for_deletion:
ret = vm.vm_worker_remove_snapshot(
zkhandler, None, vm_name, backup_to_delete["snapshot_name"]
)
if ret is False:
error_message = f"Failed to remove obsolete backup snapshot '{backup_to_delete['snapshot_name']}', leaving in tracked backups"
try:
ret = vm.vm_worker_remove_snapshot(
zkhandler, None, vm_name, backup_to_delete["snapshot_name"]
)
except Exception:
error_message = f"Failed to remove obsolete backup snapshot '{backup_to_delete['snapshot_name']}', removing from tracked backups anyways"
log_err(celery, error_message)
else:
rmtree(f"{vm_backup_path}/{backup_to_delete['snapshot_name']}")
tracked_backups.remove(backup_to_delete)
rmtree(f"{vm_backup_path}/{backup_to_delete['snapshot_name']}")
tracked_backups.remove(backup_to_delete)
tracked_backups = update_tracked_backups()
return tracked_backups

View File

@@ -596,11 +596,11 @@ def add_volume(zkhandler, pool, name, size, force_flag=False, zk_only=False):
# Check if we're greater than 80% utilization after the create; error if so unless we have the force flag
pool_total_bytes = (
int(pool_information["stats"]["used_bytes"]) + pool_total_free_bytes
int(pool_information["stats"]["stored_bytes"]) + pool_total_free_bytes
)
pool_safe_total_bytes = int(pool_total_bytes * 0.80)
pool_safe_free_bytes = pool_safe_total_bytes - int(
pool_information["stats"]["used_bytes"]
pool_information["stats"]["stored_bytes"]
)
if size_bytes >= pool_safe_free_bytes and not force_flag:
return (
@@ -656,11 +656,11 @@ def clone_volume(zkhandler, pool, name_src, name_new, force_flag=False):
# Check if we're greater than 80% utilization after the create; error if so unless we have the force flag
pool_total_bytes = (
int(pool_information["stats"]["used_bytes"]) + pool_total_free_bytes
int(pool_information["stats"]["stored_bytes"]) + pool_total_free_bytes
)
pool_safe_total_bytes = int(pool_total_bytes * 0.80)
pool_safe_free_bytes = pool_safe_total_bytes - int(
pool_information["stats"]["used_bytes"]
pool_information["stats"]["stored_bytes"]
)
if size_bytes >= pool_safe_free_bytes and not force_flag:
return (
@@ -721,11 +721,11 @@ def resize_volume(zkhandler, pool, name, size, force_flag=False):
# Check if we're greater than 80% utilization after the create; error if so unless we have the force flag
pool_total_bytes = (
int(pool_information["stats"]["used_bytes"]) + pool_total_free_bytes
int(pool_information["stats"]["stored_bytes"]) + pool_total_free_bytes
)
pool_safe_total_bytes = int(pool_total_bytes * 0.80)
pool_safe_free_bytes = pool_safe_total_bytes - int(
pool_information["stats"]["used_bytes"]
pool_information["stats"]["stored_bytes"]
)
if size_bytes >= pool_safe_free_bytes and not force_flag:
return (

View File

@@ -496,6 +496,7 @@ def getClusterInformation(zkhandler):
# Format the status data
cluster_information = {
"cluster_name": zkhandler.read("base.config"),
"cluster_health": getClusterHealthFromFaults(zkhandler, faults_data),
"node_health": getNodeHealth(zkhandler, node_list),
"maintenance": maintenance_state,

View File

@@ -1212,3 +1212,7 @@ def get_detect_device(detect_string):
return device
else:
return None
def translate_domains_to_names(zkhandler, domain_list):
return list(zkhandler.read_many([("domain.name", d) for d in domain_list]))

View File

@@ -142,7 +142,9 @@ def getNodeInformation(zkhandler, node_name):
node_mem_free = int(_node_mem_free)
node_load = float(_node_load)
node_domains_count = int(_node_domains_count)
node_running_domains = _node_running_domains.split()
node_running_domains = common.translate_domains_to_names(
zkhandler, _node_running_domains.split()
)
try:
node_health = int(_node_health)

12
debian/changelog vendored
View File

@@ -1,3 +1,15 @@
pvc (0.9.107-0) unstable; urgency=high
* [Worker Daemon] Fixes a bug where snapshot removal fails during autobackups
-- Joshua M. Boniface <joshua@boniface.me> Mon, 10 Feb 2025 23:15:21 -0500
pvc (0.9.106-0) unstable; urgency=high
* [API Daemon] Fixes a calculation bug when checking storage free space
-- Joshua M. Boniface <joshua@boniface.me> Mon, 09 Dec 2024 16:45:10 -0500
pvc (0.9.105-0) unstable; urgency=high
* [API Daemon/Provisioner] Corrects some small bugs with OVA handling

2
debian/compat vendored
View File

@@ -1 +1 @@
9
13

5
debian/rules vendored
View File

@@ -7,13 +7,14 @@ export DH_VERBOSE = 1
dh $@ --with python3
override_dh_python3:
cd $(CURDIR)/client-cli; pybuild --system=distutils --dest-dir=../debian/pvc-client-cli/
cd $(CURDIR)/client-cli; pybuild --system=pyproject --dest-dir=../debian/pvc-client-cli/
mkdir -p debian/pvc-client-cli/usr/lib/python3
mv debian/pvc-client-cli/usr/lib/python3*/* debian/pvc-client-cli/usr/lib/python3/
rm -r $(CURDIR)/client-cli/.pybuild $(CURDIR)/client-cli/pvc.egg-info
override_dh_auto_clean:
find . -name "__pycache__" -o -name ".pybuild" -exec rm -fr {} + || true
find $(CURDIR) -name "__pycache__" -o -name ".pybuild" -exec rm -fr {} + || true
rm -r $(CURDIR)/client-cli/build
# If you need to rebuild the Sphinx documentation
# Add spinxdoc to the dh --with line

View File

@@ -33,7 +33,7 @@ import os
import signal
# Daemon version
version = "0.9.105"
version = "0.9.107"
##########################################################
@@ -64,6 +64,7 @@ def entrypoint():
logger.out("|--------------------------------------------------------------|")
logger.out("| Parallel Virtual Cluster health daemon v{0: <20} |".format(version))
logger.out("| Debug: {0: <53} |".format(str(config["debug"])))
logger.out("| Cluster: {0: <51} |".format(config["cluster_name"]))
logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"]))
logger.out("| Host: {0: <54} |".format(config["node_hostname"]))
logger.out("| ID: {0: <56} |".format(config["node_id"]))

View File

@@ -49,7 +49,7 @@ import re
import json
# Daemon version
version = "0.9.105"
version = "0.9.107"
##########################################################
@@ -83,6 +83,7 @@ def entrypoint():
logger.out("|--------------------------------------------------------------|")
logger.out("| Parallel Virtual Cluster node daemon v{0: <22} |".format(version))
logger.out("| Debug: {0: <53} |".format(str(config["debug"])))
logger.out("| Cluster: {0: <51} |".format(config["cluster_name"]))
logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"]))
logger.out("| Host: {0: <54} |".format(config["node_hostname"]))
logger.out("| ID: {0: <56} |".format(config["node_id"]))
@@ -301,6 +302,9 @@ def entrypoint():
# Set up this node in Zookeeper
pvcnoded.util.zookeeper.setup_node(logger, config, zkhandler)
# Set the cluster name in Zookeeper
zkhandler.write([("base.config", config["cluster_name"])])
# Check that the primary node key exists and create it with us as primary if not
try:
current_primary = zkhandler.read("base.config.primary_node")

View File

@@ -58,7 +58,7 @@ from daemon_lib.automirror import (
)
# Daemon version
version = "0.9.105"
version = "0.9.107"
config = cfg.get_configuration()