Compare commits

...

5 Commits

Author SHA1 Message Date
60967b5606 Fix formatters colour bug for mirror state 2025-03-02 14:58:26 -05:00
89bfbe1fd8 Add translation of domain UUIDs to names
Allows frontends to better handle the domain list gracefully, as humans
don't care about the UUIDs.
2025-02-28 21:52:42 -05:00
be092756a9 Add cluster name to Zookeeper and log+API output 2025-02-27 00:57:07 -05:00
387fcfdf6b Bump version to 0.9.107 2025-02-10 23:15:21 -05:00
d695e855f9 Catch errors if snapshot fails to remove
A missing snapshot could cause an exception here which would break the
entire autobackup run. Catch the exception and continue on as this
should never be a fatal situation.
2025-02-10 16:33:44 -05:00
13 changed files with 39 additions and 15 deletions

View File

@ -1 +1 @@
0.9.106 0.9.107

View File

@ -1,5 +1,9 @@
## PVC Changelog ## PVC Changelog
###### [v0.9.107](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.107)
* [Worker Daemon] Fixes a bug where snapshot removal fails during autobackups
###### [v0.9.106](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.106) ###### [v0.9.106](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.106)
* [API Daemon] Fixes a calculation bug when checking storage free space * [API Daemon] Fixes a calculation bug when checking storage free space

View File

@ -81,6 +81,7 @@ def create_app():
print("|--------------------------------------------------------------|") print("|--------------------------------------------------------------|")
print("| Parallel Virtual Cluster API daemon v{0: <23} |".format(version)) print("| Parallel Virtual Cluster API daemon v{0: <23} |".format(version))
print("| Debug: {0: <53} |".format(str(config["debug"]))) print("| Debug: {0: <53} |".format(str(config["debug"])))
print("| Cluster: {0: <51} |".format(config["cluster_name"]))
print("| API version: v{0: <46} |".format(API_VERSION)) print("| API version: v{0: <46} |".format(API_VERSION))
print( print(
"| Listen: {0: <52} |".format( "| Listen: {0: <52} |".format(

View File

@ -221,7 +221,7 @@ def cli_cluster_status_format_pretty(CLI_CONFIG, data):
continue continue
if state in ["start"]: if state in ["start"]:
state_colour = ansii["green"] state_colour = ansii["green"]
elif state in ["migrate", "disable", "provision", "mirror"]: elif state in ["migrate", "disable", "provision"]:
state_colour = ansii["blue"] state_colour = ansii["blue"]
elif state in ["mirror"]: elif state in ["mirror"]:
state_colour = ansii["purple"] state_colour = ansii["purple"]

View File

@ -2,7 +2,7 @@ from setuptools import setup
setup( setup(
name="pvc", name="pvc",
version="0.9.106", version="0.9.107",
packages=["pvc.cli", "pvc.lib"], packages=["pvc.cli", "pvc.lib"],
install_requires=[ install_requires=[
"Click", "Click",

View File

@ -469,13 +469,14 @@ def run_vm_backup(zkhandler, celery, config, vm_detail, force_full=False):
if len(marked_for_deletion) > 0: if len(marked_for_deletion) > 0:
for backup_to_delete in marked_for_deletion: for backup_to_delete in marked_for_deletion:
try:
ret = vm.vm_worker_remove_snapshot( ret = vm.vm_worker_remove_snapshot(
zkhandler, None, vm_name, backup_to_delete["snapshot_name"] zkhandler, None, vm_name, backup_to_delete["snapshot_name"]
) )
if ret is False: except Exception:
error_message = f"Failed to remove obsolete backup snapshot '{backup_to_delete['snapshot_name']}', leaving in tracked backups" error_message = f"Failed to remove obsolete backup snapshot '{backup_to_delete['snapshot_name']}', removing from tracked backups anyways"
log_err(celery, error_message) log_err(celery, error_message)
else:
rmtree(f"{vm_backup_path}/{backup_to_delete['snapshot_name']}") rmtree(f"{vm_backup_path}/{backup_to_delete['snapshot_name']}")
tracked_backups.remove(backup_to_delete) tracked_backups.remove(backup_to_delete)

View File

@ -496,6 +496,7 @@ def getClusterInformation(zkhandler):
# Format the status data # Format the status data
cluster_information = { cluster_information = {
"cluster_name": zkhandler.read("base.config"),
"cluster_health": getClusterHealthFromFaults(zkhandler, faults_data), "cluster_health": getClusterHealthFromFaults(zkhandler, faults_data),
"node_health": getNodeHealth(zkhandler, node_list), "node_health": getNodeHealth(zkhandler, node_list),
"maintenance": maintenance_state, "maintenance": maintenance_state,

View File

@ -1212,3 +1212,7 @@ def get_detect_device(detect_string):
return device return device
else: else:
return None return None
def translate_domains_to_names(zkhandler, domain_list):
return list(zkhandler.read_many([("domain.name", d) for d in domain_list]))

View File

@ -142,7 +142,9 @@ def getNodeInformation(zkhandler, node_name):
node_mem_free = int(_node_mem_free) node_mem_free = int(_node_mem_free)
node_load = float(_node_load) node_load = float(_node_load)
node_domains_count = int(_node_domains_count) node_domains_count = int(_node_domains_count)
node_running_domains = _node_running_domains.split() node_running_domains = common.translate_domains_to_names(
zkhandler, _node_running_domains.split()
)
try: try:
node_health = int(_node_health) node_health = int(_node_health)

6
debian/changelog vendored
View File

@ -1,3 +1,9 @@
pvc (0.9.107-0) unstable; urgency=high
* [Worker Daemon] Fixes a bug where snapshot removal fails during autobackups
-- Joshua M. Boniface <joshua@boniface.me> Mon, 10 Feb 2025 23:15:21 -0500
pvc (0.9.106-0) unstable; urgency=high pvc (0.9.106-0) unstable; urgency=high
* [API Daemon] Fixes a calculation bug when checking storage free space * [API Daemon] Fixes a calculation bug when checking storage free space

View File

@ -33,7 +33,7 @@ import os
import signal import signal
# Daemon version # Daemon version
version = "0.9.106" version = "0.9.107"
########################################################## ##########################################################
@ -64,6 +64,7 @@ def entrypoint():
logger.out("|--------------------------------------------------------------|") logger.out("|--------------------------------------------------------------|")
logger.out("| Parallel Virtual Cluster health daemon v{0: <20} |".format(version)) logger.out("| Parallel Virtual Cluster health daemon v{0: <20} |".format(version))
logger.out("| Debug: {0: <53} |".format(str(config["debug"]))) logger.out("| Debug: {0: <53} |".format(str(config["debug"])))
logger.out("| Cluster: {0: <51} |".format(config["cluster_name"]))
logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"])) logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"]))
logger.out("| Host: {0: <54} |".format(config["node_hostname"])) logger.out("| Host: {0: <54} |".format(config["node_hostname"]))
logger.out("| ID: {0: <56} |".format(config["node_id"])) logger.out("| ID: {0: <56} |".format(config["node_id"]))

View File

@ -49,7 +49,7 @@ import re
import json import json
# Daemon version # Daemon version
version = "0.9.106" version = "0.9.107"
########################################################## ##########################################################
@ -83,6 +83,7 @@ def entrypoint():
logger.out("|--------------------------------------------------------------|") logger.out("|--------------------------------------------------------------|")
logger.out("| Parallel Virtual Cluster node daemon v{0: <22} |".format(version)) logger.out("| Parallel Virtual Cluster node daemon v{0: <22} |".format(version))
logger.out("| Debug: {0: <53} |".format(str(config["debug"]))) logger.out("| Debug: {0: <53} |".format(str(config["debug"])))
logger.out("| Cluster: {0: <51} |".format(config["cluster_name"]))
logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"])) logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"]))
logger.out("| Host: {0: <54} |".format(config["node_hostname"])) logger.out("| Host: {0: <54} |".format(config["node_hostname"]))
logger.out("| ID: {0: <56} |".format(config["node_id"])) logger.out("| ID: {0: <56} |".format(config["node_id"]))
@ -301,6 +302,9 @@ def entrypoint():
# Set up this node in Zookeeper # Set up this node in Zookeeper
pvcnoded.util.zookeeper.setup_node(logger, config, zkhandler) pvcnoded.util.zookeeper.setup_node(logger, config, zkhandler)
# Set the cluster name in Zookeeper
zkhandler.write([("base.config", config["cluster_name"])])
# Check that the primary node key exists and create it with us as primary if not # Check that the primary node key exists and create it with us as primary if not
try: try:
current_primary = zkhandler.read("base.config.primary_node") current_primary = zkhandler.read("base.config.primary_node")

View File

@ -58,7 +58,7 @@ from daemon_lib.automirror import (
) )
# Daemon version # Daemon version
version = "0.9.106" version = "0.9.107"
config = cfg.get_configuration() config = cfg.get_configuration()