Compare commits

...

10 Commits

18 changed files with 82 additions and 30 deletions

View File

@ -1 +1 @@
0.9.95
0.9.97

View File

@ -1,5 +1,17 @@
## PVC Changelog
###### [v0.9.97](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.97)
* [Client CLI] Ensures --lines is always an integer value
* [Node Daemon] Fixes a bug if d_network changes during iteration
* [Node Daemon] Moves to using allocated instead of free memory for node reporting
* [API Daemon] Fixes a bug if lingering RBD snapshots exist when removing a volume (#180)
###### [v0.9.96](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.96)
* [API Daemon] Fixes a bug when reporting node stats
* [API Daemon] Fixes a bug deleteing successful benchmark results
###### [v0.9.95](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.95)
* [API Daemon/CLI Client] Adds a flag to allow duplicate VNIs in network templates

View File

@ -27,7 +27,7 @@ from distutils.util import strtobool as dustrtobool
import daemon_lib.config as cfg
# Daemon version
version = "0.9.95"
version = "0.9.97"
# API version
API_VERSION = 1.0

View File

@ -671,9 +671,9 @@ def cli_cluster_maintenance_off():
@format_opt(
{
"pretty": cli_cluster_task_format_pretty,
"raw": lambda d: "\n".join([t["id"] for t in d])
if isinstance(d, list)
else d["state"],
"raw": lambda d: (
"\n".join([t["id"] for t in d]) if isinstance(d, list) else d["state"]
),
"json": lambda d: jdumps(d),
"json-pretty": lambda d: jdumps(d, indent=2),
}
@ -892,6 +892,7 @@ def cli_node_ready(
"--lines",
"lines",
default=None,
type=int,
show_default=False,
help="Display this many log lines from the end of the log buffer. [default: 1000; with follow: 10]",
)
@ -2516,6 +2517,7 @@ def cli_vm_volume_remove(domain, volume, live_flag, restart_flag):
"--lines",
"lines",
default=None,
type=int,
show_default=False,
help="Display this many log lines from the end of the log buffer. [default: 1000; with follow: 10]",
)

View File

@ -580,9 +580,11 @@ def cli_cluster_fault_list_format_long(CLI_CONFIG, fault_data):
fault_id=fault["id"],
fault_status=fault["status"].title(),
fault_health_delta=f"-{fault['health_delta']}%",
fault_acknowledged_at=fault["acknowledged_at"]
if fault["acknowledged_at"] != ""
else "N/A",
fault_acknowledged_at=(
fault["acknowledged_at"]
if fault["acknowledged_at"] != ""
else "N/A"
),
fault_last_reported=fault["last_reported"],
fault_first_reported=fault["first_reported"],
)

View File

@ -1765,9 +1765,9 @@ def format_info(config, domain_information, long_output):
tags_name=tag["name"],
tags_type=tag["type"],
tags_protected=str(tag["protected"]),
tags_protected_colour=ansiprint.green()
if tag["protected"]
else ansiprint.blue(),
tags_protected_colour=(
ansiprint.green() if tag["protected"] else ansiprint.blue()
),
end=ansiprint.end(),
)
)

View File

@ -2,7 +2,7 @@ from setuptools import setup
setup(
name="pvc",
version="0.9.95",
version="0.9.97",
packages=["pvc.cli", "pvc.lib"],
install_requires=[
"Click",

View File

@ -115,12 +115,13 @@ class BenchmarkError(Exception):
#
def cleanup(job_name, db_conn=None, db_cur=None, zkhandler=None):
def cleanup(job_name, db_conn=None, db_cur=None, zkhandler=None, final=False):
if db_conn is not None and db_cur is not None:
# Clean up our dangling result
query = "DELETE FROM storage_benchmarks WHERE job = %s;"
args = (job_name,)
db_cur.execute(query, args)
if not final:
# Clean up our dangling result (non-final runs only)
query = "DELETE FROM storage_benchmarks WHERE job = %s;"
args = (job_name,)
db_cur.execute(query, args)
db_conn.commit()
# Close the database connections cleanly
close_database(db_conn, db_cur)
@ -410,6 +411,7 @@ def worker_run_benchmark(zkhandler, celery, config, pool):
db_conn=db_conn,
db_cur=db_cur,
zkhandler=zkhandler,
final=True,
)
current_stage += 1

View File

@ -320,7 +320,11 @@ def get_list_osd(zkhandler, limit=None, is_fuzzy=True):
#
def getPoolInformation(zkhandler, pool):
# Parse the stats data
(pool_stats_raw, tier, pgs,) = zkhandler.read_many(
(
pool_stats_raw,
tier,
pgs,
) = zkhandler.read_many(
[
("pool.stats", pool),
("pool.tier", pool),
@ -824,10 +828,22 @@ def remove_volume(zkhandler, pool, name):
name, pool
)
# 1. Remove volume snapshots
# 1a. Remove PVC-managed volume snapshots
for snapshot in zkhandler.children(("snapshot", f"{pool}/{name}")):
remove_snapshot(zkhandler, pool, name, snapshot)
# 1b. Purge any remaining volume snapshots
retcode, stdout, stderr = common.run_os_command(
"rbd snap purge {}/{}".format(pool, name)
)
if retcode:
return (
False,
'ERROR: Failed to purge snapshots from RBD volume "{}" in pool "{}": {}'.format(
name, pool, stderr
),
)
# 2. Remove the volume
retcode, stdout, stderr = common.run_os_command("rbd rm {}/{}".format(pool, name))
if retcode:

View File

@ -244,9 +244,9 @@ def get_parsed_configuration(config_file):
]
][0]
config_cluster_networks_specific[
f"{network_type}_dev_ip"
] = f"{list(network.hosts())[address_id]}/{network.prefixlen}"
config_cluster_networks_specific[f"{network_type}_dev_ip"] = (
f"{list(network.hosts())[address_id]}/{network.prefixlen}"
)
config = {**config, **config_cluster_networks_specific}

View File

@ -69,6 +69,8 @@ def getNodeHealthDetails(zkhandler, node_name, node_health_plugins):
plugin_message,
plugin_data,
) = tuple(all_plugin_data[pos_start:pos_end])
if plugin_data is None:
continue
plugin_output = {
"name": plugin,
"last_run": int(plugin_last_run) if plugin_last_run is not None else None,
@ -156,9 +158,9 @@ def getNodeInformation(zkhandler, node_name):
zkhandler, node_name, node_health_plugins
)
if _node_network_stats is not None:
try:
node_network_stats = json.loads(_node_network_stats)
else:
except Exception:
node_network_stats = dict()
# Construct a data structure to represent the data

16
debian/changelog vendored
View File

@ -1,3 +1,19 @@
pvc (0.9.97-0) unstable; urgency=high
* [Client CLI] Ensures --lines is always an integer value
* [Node Daemon] Fixes a bug if d_network changes during iteration
* [Node Daemon] Moves to using allocated instead of free memory for node reporting
* [API Daemon] Fixes a bug if lingering RBD snapshots exist when removing a volume (#180)
-- Joshua M. Boniface <joshua@boniface.me> Fri, 19 Apr 2024 10:32:16 -0400
pvc (0.9.96-0) unstable; urgency=high
* [API Daemon] Fixes a bug when reporting node stats
* [API Daemon] Fixes a bug deleteing successful benchmark results
-- Joshua M. Boniface <joshua@boniface.me> Fri, 08 Mar 2024 14:23:06 -0500
pvc (0.9.95-0) unstable; urgency=high
* [API Daemon/CLI Client] Adds a flag to allow duplicate VNIs in network templates

View File

@ -33,7 +33,7 @@ import os
import signal
# Daemon version
version = "0.9.95"
version = "0.9.97"
##########################################################

View File

@ -49,7 +49,7 @@ import re
import json
# Daemon version
version = "0.9.95"
version = "0.9.97"
##########################################################

View File

@ -231,7 +231,7 @@ class NetstatsInstance(object):
# Get a list of all active interfaces
net_root_path = "/sys/class/net"
all_ifaces = list()
for (_, dirnames, _) in walk(net_root_path):
for _, dirnames, _ in walk(net_root_path):
all_ifaces.extend(dirnames)
all_ifaces.sort()

View File

@ -521,7 +521,7 @@ class NodeInstance(object):
self.logger.out("Acquired write lock for synchronization phase F", state="o")
time.sleep(0.2) # Time fir reader to acquire the lock
# 4. Add gateway IPs
for network in self.d_network:
for network in self.d_network.copy():
self.d_network[network].createGateways()
self.logger.out("Releasing write lock for synchronization phase F", state="i")
self.zkhandler.write([("base.config.primary_node.sync_lock", "")])

View File

@ -743,7 +743,7 @@ def node_keepalive(logger, config, zkhandler, this_node, netstats):
# Get node performance statistics
this_node.memtotal = int(psutil.virtual_memory().total / 1024 / 1024)
this_node.memused = int(psutil.virtual_memory().used / 1024 / 1024)
this_node.memfree = int(psutil.virtual_memory().free / 1024 / 1024)
this_node.memfree = int(psutil.virtual_memory().available / 1024 / 1024)
this_node.cpuload = round(os.getloadavg()[0], 2)
# Get node network statistics via netstats instance

View File

@ -44,7 +44,7 @@ from daemon_lib.vmbuilder import (
)
# Daemon version
version = "0.9.95"
version = "0.9.97"
config = cfg.get_configuration()