Compare commits

...

8 Commits

Author SHA1 Message Date
60967b5606 Fix formatters colour bug for mirror state 2025-03-02 14:58:26 -05:00
89bfbe1fd8 Add translation of domain UUIDs to names
Allows frontends to better handle the domain list gracefully, as humans
don't care about the UUIDs.
2025-02-28 21:52:42 -05:00
be092756a9 Add cluster name to Zookeeper and log+API output 2025-02-27 00:57:07 -05:00
387fcfdf6b Bump version to 0.9.107 2025-02-10 23:15:21 -05:00
d695e855f9 Catch errors if snapshot fails to remove
A missing snapshot could cause an exception here which would break the
entire autobackup run. Catch the exception and continue on as this
should never be a fatal situation.
2025-02-10 16:33:44 -05:00
309b203f5d Bump version to 0.9.106 2024-12-09 16:45:10 -05:00
de4241161e Use stored_bytes in utilization calculation
used_bytes is a broken value since it does not take object replicas into
account, thus throwing off these calculations. Use stored_bytes instead
which properly represents this value.
2024-12-09 16:43:43 -05:00
1950d22876 Correct indentation 2024-11-26 13:38:51 -05:00
14 changed files with 56 additions and 22 deletions

View File

@ -1 +1 @@
0.9.105
0.9.107

View File

@ -1,5 +1,13 @@
## PVC Changelog
###### [v0.9.107](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.107)
* [Worker Daemon] Fixes a bug where snapshot removal fails during autobackups
###### [v0.9.106](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.106)
* [API Daemon] Fixes a calculation bug when checking storage free space
###### [v0.9.105](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.105)
* [API Daemon/Provisioner] Corrects some small bugs with OVA handling

View File

@ -81,6 +81,7 @@ def create_app():
print("|--------------------------------------------------------------|")
print("| Parallel Virtual Cluster API daemon v{0: <23} |".format(version))
print("| Debug: {0: <53} |".format(str(config["debug"])))
print("| Cluster: {0: <51} |".format(config["cluster_name"]))
print("| API version: v{0: <46} |".format(API_VERSION))
print(
"| Listen: {0: <52} |".format(

View File

@ -221,7 +221,7 @@ def cli_cluster_status_format_pretty(CLI_CONFIG, data):
continue
if state in ["start"]:
state_colour = ansii["green"]
elif state in ["migrate", "disable", "provision", "mirror"]:
elif state in ["migrate", "disable", "provision"]:
state_colour = ansii["blue"]
elif state in ["mirror"]:
state_colour = ansii["purple"]
@ -331,7 +331,7 @@ def cli_cluster_status_format_short(CLI_CONFIG, data):
)
)
messages = "\n ".join(message_list)
messages = "\n ".join(message_list)
else:
messages = "None"
output.append(f"{ansii['purple']}Active faults:{ansii['end']} {messages}")

View File

@ -2,7 +2,7 @@ from setuptools import setup
setup(
name="pvc",
version="0.9.105",
version="0.9.107",
packages=["pvc.cli", "pvc.lib"],
install_requires=[
"Click",

View File

@ -469,15 +469,16 @@ def run_vm_backup(zkhandler, celery, config, vm_detail, force_full=False):
if len(marked_for_deletion) > 0:
for backup_to_delete in marked_for_deletion:
ret = vm.vm_worker_remove_snapshot(
zkhandler, None, vm_name, backup_to_delete["snapshot_name"]
)
if ret is False:
error_message = f"Failed to remove obsolete backup snapshot '{backup_to_delete['snapshot_name']}', leaving in tracked backups"
try:
ret = vm.vm_worker_remove_snapshot(
zkhandler, None, vm_name, backup_to_delete["snapshot_name"]
)
except Exception:
error_message = f"Failed to remove obsolete backup snapshot '{backup_to_delete['snapshot_name']}', removing from tracked backups anyways"
log_err(celery, error_message)
else:
rmtree(f"{vm_backup_path}/{backup_to_delete['snapshot_name']}")
tracked_backups.remove(backup_to_delete)
rmtree(f"{vm_backup_path}/{backup_to_delete['snapshot_name']}")
tracked_backups.remove(backup_to_delete)
tracked_backups = update_tracked_backups()
return tracked_backups

View File

@ -596,11 +596,11 @@ def add_volume(zkhandler, pool, name, size, force_flag=False, zk_only=False):
# Check if we're greater than 80% utilization after the create; error if so unless we have the force flag
pool_total_bytes = (
int(pool_information["stats"]["used_bytes"]) + pool_total_free_bytes
int(pool_information["stats"]["stored_bytes"]) + pool_total_free_bytes
)
pool_safe_total_bytes = int(pool_total_bytes * 0.80)
pool_safe_free_bytes = pool_safe_total_bytes - int(
pool_information["stats"]["used_bytes"]
pool_information["stats"]["stored_bytes"]
)
if size_bytes >= pool_safe_free_bytes and not force_flag:
return (
@ -656,11 +656,11 @@ def clone_volume(zkhandler, pool, name_src, name_new, force_flag=False):
# Check if we're greater than 80% utilization after the create; error if so unless we have the force flag
pool_total_bytes = (
int(pool_information["stats"]["used_bytes"]) + pool_total_free_bytes
int(pool_information["stats"]["stored_bytes"]) + pool_total_free_bytes
)
pool_safe_total_bytes = int(pool_total_bytes * 0.80)
pool_safe_free_bytes = pool_safe_total_bytes - int(
pool_information["stats"]["used_bytes"]
pool_information["stats"]["stored_bytes"]
)
if size_bytes >= pool_safe_free_bytes and not force_flag:
return (
@ -721,11 +721,11 @@ def resize_volume(zkhandler, pool, name, size, force_flag=False):
# Check if we're greater than 80% utilization after the create; error if so unless we have the force flag
pool_total_bytes = (
int(pool_information["stats"]["used_bytes"]) + pool_total_free_bytes
int(pool_information["stats"]["stored_bytes"]) + pool_total_free_bytes
)
pool_safe_total_bytes = int(pool_total_bytes * 0.80)
pool_safe_free_bytes = pool_safe_total_bytes - int(
pool_information["stats"]["used_bytes"]
pool_information["stats"]["stored_bytes"]
)
if size_bytes >= pool_safe_free_bytes and not force_flag:
return (

View File

@ -496,6 +496,7 @@ def getClusterInformation(zkhandler):
# Format the status data
cluster_information = {
"cluster_name": zkhandler.read("base.config"),
"cluster_health": getClusterHealthFromFaults(zkhandler, faults_data),
"node_health": getNodeHealth(zkhandler, node_list),
"maintenance": maintenance_state,

View File

@ -1212,3 +1212,7 @@ def get_detect_device(detect_string):
return device
else:
return None
def translate_domains_to_names(zkhandler, domain_list):
return list(zkhandler.read_many([("domain.name", d) for d in domain_list]))

View File

@ -142,7 +142,9 @@ def getNodeInformation(zkhandler, node_name):
node_mem_free = int(_node_mem_free)
node_load = float(_node_load)
node_domains_count = int(_node_domains_count)
node_running_domains = _node_running_domains.split()
node_running_domains = common.translate_domains_to_names(
zkhandler, _node_running_domains.split()
)
try:
node_health = int(_node_health)

12
debian/changelog vendored
View File

@ -1,3 +1,15 @@
pvc (0.9.107-0) unstable; urgency=high
* [Worker Daemon] Fixes a bug where snapshot removal fails during autobackups
-- Joshua M. Boniface <joshua@boniface.me> Mon, 10 Feb 2025 23:15:21 -0500
pvc (0.9.106-0) unstable; urgency=high
* [API Daemon] Fixes a calculation bug when checking storage free space
-- Joshua M. Boniface <joshua@boniface.me> Mon, 09 Dec 2024 16:45:10 -0500
pvc (0.9.105-0) unstable; urgency=high
* [API Daemon/Provisioner] Corrects some small bugs with OVA handling

View File

@ -33,7 +33,7 @@ import os
import signal
# Daemon version
version = "0.9.105"
version = "0.9.107"
##########################################################
@ -64,6 +64,7 @@ def entrypoint():
logger.out("|--------------------------------------------------------------|")
logger.out("| Parallel Virtual Cluster health daemon v{0: <20} |".format(version))
logger.out("| Debug: {0: <53} |".format(str(config["debug"])))
logger.out("| Cluster: {0: <51} |".format(config["cluster_name"]))
logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"]))
logger.out("| Host: {0: <54} |".format(config["node_hostname"]))
logger.out("| ID: {0: <56} |".format(config["node_id"]))

View File

@ -49,7 +49,7 @@ import re
import json
# Daemon version
version = "0.9.105"
version = "0.9.107"
##########################################################
@ -83,6 +83,7 @@ def entrypoint():
logger.out("|--------------------------------------------------------------|")
logger.out("| Parallel Virtual Cluster node daemon v{0: <22} |".format(version))
logger.out("| Debug: {0: <53} |".format(str(config["debug"])))
logger.out("| Cluster: {0: <51} |".format(config["cluster_name"]))
logger.out("| FQDN: {0: <54} |".format(config["node_fqdn"]))
logger.out("| Host: {0: <54} |".format(config["node_hostname"]))
logger.out("| ID: {0: <56} |".format(config["node_id"]))
@ -301,6 +302,9 @@ def entrypoint():
# Set up this node in Zookeeper
pvcnoded.util.zookeeper.setup_node(logger, config, zkhandler)
# Set the cluster name in Zookeeper
zkhandler.write([("base.config", config["cluster_name"])])
# Check that the primary node key exists and create it with us as primary if not
try:
current_primary = zkhandler.read("base.config.primary_node")

View File

@ -58,7 +58,7 @@ from daemon_lib.automirror import (
)
# Daemon version
version = "0.9.105"
version = "0.9.107"
config = cfg.get_configuration()