Compare commits

..

12 Commits

Author SHA1 Message Date
89bfbe1fd8 Add translation of domain UUIDs to names
Allows frontends to better handle the domain list gracefully, as humans
don't care about the UUIDs.
2025-02-28 21:52:42 -05:00
be092756a9 Add cluster name to Zookeeper and log+API output 2025-02-27 00:57:07 -05:00
387fcfdf6b Bump version to 0.9.107 2025-02-10 23:15:21 -05:00
d695e855f9 Catch errors if snapshot fails to remove
A missing snapshot could cause an exception here which would break the
entire autobackup run. Catch the exception and continue on as this
should never be a fatal situation.
2025-02-10 16:33:44 -05:00
309b203f5d Bump version to 0.9.106 2024-12-09 16:45:10 -05:00
de4241161e Use stored_bytes in utilization calculation
used_bytes is a broken value since it does not take object replicas into
account, thus throwing off these calculations. Use stored_bytes instead
which properly represents this value.
2024-12-09 16:43:43 -05:00
1950d22876 Correct indentation 2024-11-26 13:38:51 -05:00
7a32d8da9d Bump version to 0.9.105 2024-11-19 14:43:43 -05:00
e3b8673789 Correct default maximum downtime
Needed because a none-type is not valid later.
2024-11-18 17:14:19 -05:00
9db46d48e4 Ensure OVAs set migration_max_downtime 2024-11-18 17:12:49 -05:00
d74c3a2d45 Fix incorrect OVA script names 2024-11-18 16:57:28 -05:00
f4e946c262 Ensure datahuman is a string if it's not 2024-11-18 16:53:22 -05:00
16 changed files with 82 additions and 24 deletions

View File

@@ -1 +1 @@
0.9.104
0.9.107

View File

@@ -1,5 +1,17 @@
## PVC Changelog
###### [v0.9.107](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.107)
* [Worker Daemon] Fixes a bug where snapshot removal fails during autobackups
###### [v0.9.106](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.106)
* [API Daemon] Fixes a calculation bug when checking storage free space
###### [v0.9.105](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.105)
* [API Daemon/Provisioner] Corrects some small bugs with OVA handling
###### [v0.9.104](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.104)
* [API Daemon] Fixes a bug that failed uploading of RAW block devices in "storage volume upload"

View File

@@ -81,6 +81,7 @@ def create_app():
print("|--------------------------------------------------------------|")
print("| Parallel Virtual Cluster API daemon v{0: <23} |".format(version))
print("| Debug: {0: <53} |".format(str(config["debug"])))
print("| Cluster: {0: <51} |".format(config["cluster_name"]))
print("| API version: v{0: <46} |".format(API_VERSION))
print(
"| Listen: {0: <52} |".format(

View File

@@ -179,6 +179,10 @@ def upload_ova(zkhandler, pool, name, ova_size):
}
retcode = 400
return output, retcode
else:
ova_script = "default_ova"
else:
ova_script = "ova"
ova_archive = None
@@ -397,7 +401,14 @@ def upload_ova(zkhandler, pool, name, ova_size):
vnc = False
serial = True
retdata, retcode = provisioner.create_template_system(
name, vcpu_count, vram_mb, serial, vnc, vnc_bind=None, ova=ova_id
name,
vcpu_count,
vram_mb,
serial,
vnc,
vnc_bind=None,
ova=ova_id,
migration_max_downtime=300,
)
if retcode != 200:
return retdata, retcode
@@ -414,7 +425,7 @@ def upload_ova(zkhandler, pool, name, ova_size):
None,
None,
userdata=None,
script="default_ova",
script=ova_script,
ova=name,
arguments=None,
)

View File

@@ -221,7 +221,7 @@ def create_template_system(
node_selector=None,
node_autostart=False,
migration_method=None,
migration_max_downtime=None,
migration_max_downtime=300,
ova=None,
):
if list_template_system(name, is_fuzzy=False)[-1] != 404:

View File

@@ -331,7 +331,7 @@ def cli_cluster_status_format_short(CLI_CONFIG, data):
)
)
messages = "\n ".join(message_list)
messages = "\n ".join(message_list)
else:
messages = "None"
output.append(f"{ansii['purple']}Active faults:{ansii['end']} {messages}")

View File

@@ -2,7 +2,7 @@ from setuptools import setup
setup(
name="pvc",
version="0.9.104",
version="0.9.107",
packages=["pvc.cli", "pvc.lib"],
install_requires=[
"Click",

View File

@@ -469,15 +469,16 @@ def run_vm_backup(zkhandler, celery, config, vm_detail, force_full=False):
if len(marked_for_deletion) > 0:
for backup_to_delete in marked_for_deletion:
ret = vm.vm_worker_remove_snapshot(
zkhandler, None, vm_name, backup_to_delete["snapshot_name"]
)
if ret is False:
error_message = f"Failed to remove obsolete backup snapshot '{backup_to_delete['snapshot_name']}', leaving in tracked backups"
try:
ret = vm.vm_worker_remove_snapshot(
zkhandler, None, vm_name, backup_to_delete["snapshot_name"]
)
except Exception:
error_message = f"Failed to remove obsolete backup snapshot '{backup_to_delete['snapshot_name']}', removing from tracked backups anyways"
log_err(celery, error_message)
else:
rmtree(f"{vm_backup_path}/{backup_to_delete['snapshot_name']}")
tracked_backups.remove(backup_to_delete)
rmtree(f"{vm_backup_path}/{backup_to_delete['snapshot_name']}")
tracked_backups.remove(backup_to_delete)
tracked_backups = update_tracked_backups()
return tracked_backups

View File

@@ -121,6 +121,9 @@ def format_bytes_tohuman(databytes):
def format_bytes_fromhuman(datahuman):
if not isinstance(datahuman, str):
datahuman = str(datahuman)
if not re.search(r"[A-Za-z]+", datahuman):
dataunit = "B"
datasize = float(datahuman)
@@ -593,11 +596,11 @@ def add_volume(zkhandler, pool, name, size, force_flag=False, zk_only=False):
# Check if we're greater than 80% utilization after the create; error if so unless we have the force flag
pool_total_bytes = (
int(pool_information["stats"]["used_bytes"]) + pool_total_free_bytes
int(pool_information["stats"]["stored_bytes"]) + pool_total_free_bytes
)
pool_safe_total_bytes = int(pool_total_bytes * 0.80)
pool_safe_free_bytes = pool_safe_total_bytes - int(
pool_information["stats"]["used_bytes"]
pool_information["stats"]["stored_bytes"]
)
if size_bytes >= pool_safe_free_bytes and not force_flag:
return (
@@ -653,11 +656,11 @@ def clone_volume(zkhandler, pool, name_src, name_new, force_flag=False):
# Check if we're greater than 80% utilization after the create; error if so unless we have the force flag
pool_total_bytes = (
int(pool_information["stats"]["used_bytes"]) + pool_total_free_bytes
int(pool_information["stats"]["stored_bytes"]) + pool_total_free_bytes
)
pool_safe_total_bytes = int(pool_total_bytes * 0.80)
pool_safe_free_bytes = pool_safe_total_bytes - int(
pool_information["stats"]["used_bytes"]
pool_information["stats"]["stored_bytes"]
)
if size_bytes >= pool_safe_free_bytes and not force_flag:
return (
@@ -718,11 +721,11 @@ def resize_volume(zkhandler, pool, name, size, force_flag=False):
# Check if we're greater than 80% utilization after the create; error if so unless we have the force flag
pool_total_bytes = (
int(pool_information["stats"]["used_bytes"]) + pool_total_free_bytes
int(pool_information["stats"]["stored_bytes"]) + pool_total_free_bytes
)
pool_safe_total_bytes = int(pool_total_bytes * 0.80)
pool_safe_free_bytes = pool_safe_total_bytes - int(
pool_information["stats"]["used_bytes"]
pool_information["stats"]["stored_bytes"]
)
if size_bytes >= pool_safe_free_bytes and not force_flag:
return (

View File

@@ -496,6 +496,7 @@ def getClusterInformation(zkhandler):
# Format the status data
cluster_information = {
"cluster_name": zkhandler.read("base.config"),
"cluster_health": getClusterHealthFromFaults(zkhandler, faults_data),
"node_health": getNodeHealth(zkhandler, node_list),
"maintenance": maintenance_state,

View File

@@ -1212,3 +1212,7 @@ def get_detect_device(detect_string):
return device
else:
return None
def translate_domains_to_names(zkhandler, domain_list):
return list(zkhandler.read_many([("domain.name", d) for d in domain_list]))

View File

@@ -142,7 +142,9 @@ def getNodeInformation(zkhandler, node_name):
node_mem_free = int(_node_mem_free)
node_load = float(_node_load)
node_domains_count = int(_node_domains_count)
node_running_domains = _node_running_domains.split()
node_running_domains = common.translate_domains_to_names(
zkhandler, _node_running_domains.split()
)
try:
node_health = int(_node_health)

18
debian/changelog vendored
View File

@@ -1,3 +1,21 @@
pvc (0.9.107-0) unstable; urgency=high
* [Worker Daemon] Fixes a bug where snapshot removal fails during autobackups
-- Joshua M. Boniface <joshua@boniface.me> Mon, 10 Feb 2025 23:15:21 -0500
pvc (0.9.106-0) unstable; urgency=high
* [API Daemon] Fixes a calculation bug when checking storage free space
-- Joshua M. Boniface <joshua@boniface.me> Mon, 09 Dec 2024 16:45:10 -0500
pvc (0.9.105-0) unstable; urgency=high
* [API Daemon/Provisioner] Corrects some small bugs with OVA handling
-- Joshua M. Boniface <joshua@boniface.me> Tue, 19 Nov 2024 14:43:43 -0500
pvc (0.9.104-0) unstable; urgency=high
* [API Daemon] Fixes a bug that failed uploading of RAW block devices in "storage volume upload"

View File

@@ -33,7 +33,7 @@ import os
import signal
# Daemon version
version = "0.9.104"
version = "0.9.107"
##########################################################
@@ -64,6 +64,7 @@ def entrypoint():
logger.out("|--------------------------------------------------------------|")
logger.out("| Parallel Virtual Cluster health daemon v{0: <20} |".format(version))
logger.out("| Debug: {0: <53} |".format(str(config["debug"])))
logger.out("| Cluster: {0: <51} |".format(config["cluster_name"]))
logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"]))
logger.out("| Host: {0: <54} |".format(config["node_hostname"]))
logger.out("| ID: {0: <56} |".format(config["node_id"]))

View File

@@ -49,7 +49,7 @@ import re
import json
# Daemon version
version = "0.9.104"
version = "0.9.107"
##########################################################
@@ -83,6 +83,7 @@ def entrypoint():
logger.out("|--------------------------------------------------------------|")
logger.out("| Parallel Virtual Cluster node daemon v{0: <22} |".format(version))
logger.out("| Debug: {0: <53} |".format(str(config["debug"])))
logger.out("| Cluster: {0: <51} |".format(config["cluster_name"]))
logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"]))
logger.out("| Host: {0: <54} |".format(config["node_hostname"]))
logger.out("| ID: {0: <56} |".format(config["node_id"]))
@@ -301,6 +302,9 @@ def entrypoint():
# Set up this node in Zookeeper
pvcnoded.util.zookeeper.setup_node(logger, config, zkhandler)
# Set the cluster name in Zookeeper
zkhandler.write([("base.config", config["cluster_name"])])
# Check that the primary node key exists and create it with us as primary if not
try:
current_primary = zkhandler.read("base.config.primary_node")

View File

@@ -58,7 +58,7 @@ from daemon_lib.automirror import (
)
# Daemon version
version = "0.9.104"
version = "0.9.107"
config = cfg.get_configuration()