Compare commits
13 Commits
736f37d0f9
...
v0.9.97
Author | SHA1 | Date | |
---|---|---|---|
f1fe0c63f5 | |||
ab944f9b95 | |||
9714ac20b2 | |||
79ad09ae59 | |||
4c6aabec6a | |||
559400ed90 | |||
78c774b607 | |||
a461791ce8 | |||
9fdb6d8708 | |||
2fb7c40497 | |||
dee8d186cf | |||
1e9871241e | |||
9cd88ebccb |
18
CHANGELOG.md
18
CHANGELOG.md
@ -1,5 +1,23 @@
|
|||||||
## PVC Changelog
|
## PVC Changelog
|
||||||
|
|
||||||
|
###### [v0.9.97](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.97)
|
||||||
|
|
||||||
|
* [Client CLI] Ensures --lines is always an integer value
|
||||||
|
* [Node Daemon] Fixes a bug if d_network changes during iteration
|
||||||
|
* [Node Daemon] Moves to using allocated instead of free memory for node reporting
|
||||||
|
* [API Daemon] Fixes a bug if lingering RBD snapshots exist when removing a volume (#180)
|
||||||
|
|
||||||
|
###### [v0.9.96](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.96)
|
||||||
|
|
||||||
|
* [API Daemon] Fixes a bug when reporting node stats
|
||||||
|
* [API Daemon] Fixes a bug deleteing successful benchmark results
|
||||||
|
|
||||||
|
###### [v0.9.95](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.95)
|
||||||
|
|
||||||
|
* [API Daemon/CLI Client] Adds a flag to allow duplicate VNIs in network templates
|
||||||
|
* [API Daemon] Ensures that storage template disks are returned in disk ID order
|
||||||
|
* [Client CLI] Fixes a display bug showing all OSDs as split
|
||||||
|
|
||||||
###### [v0.9.94](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.94)
|
###### [v0.9.94](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.94)
|
||||||
|
|
||||||
* [CLI Client] Fixes an incorrect ordering issue with autobackup summary emails
|
* [CLI Client] Fixes an incorrect ordering issue with autobackup summary emails
|
||||||
|
@ -27,7 +27,7 @@ from distutils.util import strtobool as dustrtobool
|
|||||||
import daemon_lib.config as cfg
|
import daemon_lib.config as cfg
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.94"
|
version = "0.9.97"
|
||||||
|
|
||||||
# API version
|
# API version
|
||||||
API_VERSION = 1.0
|
API_VERSION = 1.0
|
||||||
|
@ -125,7 +125,7 @@ def list_template(limit, table, is_fuzzy=True):
|
|||||||
args = (template_data["id"],)
|
args = (template_data["id"],)
|
||||||
cur.execute(query, args)
|
cur.execute(query, args)
|
||||||
disks = cur.fetchall()
|
disks = cur.fetchall()
|
||||||
data[template_id]["disks"] = disks
|
data[template_id]["disks"] = sorted(disks, key=lambda x: x["disk_id"])
|
||||||
|
|
||||||
close_database(conn, cur)
|
close_database(conn, cur)
|
||||||
|
|
||||||
|
@ -671,9 +671,9 @@ def cli_cluster_maintenance_off():
|
|||||||
@format_opt(
|
@format_opt(
|
||||||
{
|
{
|
||||||
"pretty": cli_cluster_task_format_pretty,
|
"pretty": cli_cluster_task_format_pretty,
|
||||||
"raw": lambda d: "\n".join([t["id"] for t in d])
|
"raw": lambda d: (
|
||||||
if isinstance(d, list)
|
"\n".join([t["id"] for t in d]) if isinstance(d, list) else d["state"]
|
||||||
else d["state"],
|
),
|
||||||
"json": lambda d: jdumps(d),
|
"json": lambda d: jdumps(d),
|
||||||
"json-pretty": lambda d: jdumps(d, indent=2),
|
"json-pretty": lambda d: jdumps(d, indent=2),
|
||||||
}
|
}
|
||||||
@ -892,6 +892,7 @@ def cli_node_ready(
|
|||||||
"--lines",
|
"--lines",
|
||||||
"lines",
|
"lines",
|
||||||
default=None,
|
default=None,
|
||||||
|
type=int,
|
||||||
show_default=False,
|
show_default=False,
|
||||||
help="Display this many log lines from the end of the log buffer. [default: 1000; with follow: 10]",
|
help="Display this many log lines from the end of the log buffer. [default: 1000; with follow: 10]",
|
||||||
)
|
)
|
||||||
@ -2516,6 +2517,7 @@ def cli_vm_volume_remove(domain, volume, live_flag, restart_flag):
|
|||||||
"--lines",
|
"--lines",
|
||||||
"lines",
|
"lines",
|
||||||
default=None,
|
default=None,
|
||||||
|
type=int,
|
||||||
show_default=False,
|
show_default=False,
|
||||||
help="Display this many log lines from the end of the log buffer. [default: 1000; with follow: 10]",
|
help="Display this many log lines from the end of the log buffer. [default: 1000; with follow: 10]",
|
||||||
)
|
)
|
||||||
|
@ -580,9 +580,11 @@ def cli_cluster_fault_list_format_long(CLI_CONFIG, fault_data):
|
|||||||
fault_id=fault["id"],
|
fault_id=fault["id"],
|
||||||
fault_status=fault["status"].title(),
|
fault_status=fault["status"].title(),
|
||||||
fault_health_delta=f"-{fault['health_delta']}%",
|
fault_health_delta=f"-{fault['health_delta']}%",
|
||||||
fault_acknowledged_at=fault["acknowledged_at"]
|
fault_acknowledged_at=(
|
||||||
if fault["acknowledged_at"] != ""
|
fault["acknowledged_at"]
|
||||||
else "N/A",
|
if fault["acknowledged_at"] != ""
|
||||||
|
else "N/A"
|
||||||
|
),
|
||||||
fault_last_reported=fault["last_reported"],
|
fault_last_reported=fault["last_reported"],
|
||||||
fault_first_reported=fault["first_reported"],
|
fault_first_reported=fault["first_reported"],
|
||||||
)
|
)
|
||||||
|
@ -430,7 +430,9 @@ def format_list_osd(config, osd_list):
|
|||||||
)
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if osd_information.get("is_split") is not None:
|
if osd_information.get("is_split") is not None and osd_information.get(
|
||||||
|
"is_split"
|
||||||
|
):
|
||||||
osd_information["device"] = f"{osd_information['device']} [s]"
|
osd_information["device"] = f"{osd_information['device']} [s]"
|
||||||
|
|
||||||
# Deal with the size to human readable
|
# Deal with the size to human readable
|
||||||
|
@ -1765,9 +1765,9 @@ def format_info(config, domain_information, long_output):
|
|||||||
tags_name=tag["name"],
|
tags_name=tag["name"],
|
||||||
tags_type=tag["type"],
|
tags_type=tag["type"],
|
||||||
tags_protected=str(tag["protected"]),
|
tags_protected=str(tag["protected"]),
|
||||||
tags_protected_colour=ansiprint.green()
|
tags_protected_colour=(
|
||||||
if tag["protected"]
|
ansiprint.green() if tag["protected"] else ansiprint.blue()
|
||||||
else ansiprint.blue(),
|
),
|
||||||
end=ansiprint.end(),
|
end=ansiprint.end(),
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -2,7 +2,7 @@ from setuptools import setup
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="pvc",
|
name="pvc",
|
||||||
version="0.9.94",
|
version="0.9.97",
|
||||||
packages=["pvc.cli", "pvc.lib"],
|
packages=["pvc.cli", "pvc.lib"],
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"Click",
|
"Click",
|
||||||
|
@ -115,12 +115,13 @@ class BenchmarkError(Exception):
|
|||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
def cleanup(job_name, db_conn=None, db_cur=None, zkhandler=None):
|
def cleanup(job_name, db_conn=None, db_cur=None, zkhandler=None, final=False):
|
||||||
if db_conn is not None and db_cur is not None:
|
if db_conn is not None and db_cur is not None:
|
||||||
# Clean up our dangling result
|
if not final:
|
||||||
query = "DELETE FROM storage_benchmarks WHERE job = %s;"
|
# Clean up our dangling result (non-final runs only)
|
||||||
args = (job_name,)
|
query = "DELETE FROM storage_benchmarks WHERE job = %s;"
|
||||||
db_cur.execute(query, args)
|
args = (job_name,)
|
||||||
|
db_cur.execute(query, args)
|
||||||
db_conn.commit()
|
db_conn.commit()
|
||||||
# Close the database connections cleanly
|
# Close the database connections cleanly
|
||||||
close_database(db_conn, db_cur)
|
close_database(db_conn, db_cur)
|
||||||
@ -410,6 +411,7 @@ def worker_run_benchmark(zkhandler, celery, config, pool):
|
|||||||
db_conn=db_conn,
|
db_conn=db_conn,
|
||||||
db_cur=db_cur,
|
db_cur=db_cur,
|
||||||
zkhandler=zkhandler,
|
zkhandler=zkhandler,
|
||||||
|
final=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
current_stage += 1
|
current_stage += 1
|
||||||
|
@ -320,7 +320,11 @@ def get_list_osd(zkhandler, limit=None, is_fuzzy=True):
|
|||||||
#
|
#
|
||||||
def getPoolInformation(zkhandler, pool):
|
def getPoolInformation(zkhandler, pool):
|
||||||
# Parse the stats data
|
# Parse the stats data
|
||||||
(pool_stats_raw, tier, pgs,) = zkhandler.read_many(
|
(
|
||||||
|
pool_stats_raw,
|
||||||
|
tier,
|
||||||
|
pgs,
|
||||||
|
) = zkhandler.read_many(
|
||||||
[
|
[
|
||||||
("pool.stats", pool),
|
("pool.stats", pool),
|
||||||
("pool.tier", pool),
|
("pool.tier", pool),
|
||||||
@ -824,10 +828,22 @@ def remove_volume(zkhandler, pool, name):
|
|||||||
name, pool
|
name, pool
|
||||||
)
|
)
|
||||||
|
|
||||||
# 1. Remove volume snapshots
|
# 1a. Remove PVC-managed volume snapshots
|
||||||
for snapshot in zkhandler.children(("snapshot", f"{pool}/{name}")):
|
for snapshot in zkhandler.children(("snapshot", f"{pool}/{name}")):
|
||||||
remove_snapshot(zkhandler, pool, name, snapshot)
|
remove_snapshot(zkhandler, pool, name, snapshot)
|
||||||
|
|
||||||
|
# 1b. Purge any remaining volume snapshots
|
||||||
|
retcode, stdout, stderr = common.run_os_command(
|
||||||
|
"rbd snap purge {}/{}".format(pool, name)
|
||||||
|
)
|
||||||
|
if retcode:
|
||||||
|
return (
|
||||||
|
False,
|
||||||
|
'ERROR: Failed to purge snapshots from RBD volume "{}" in pool "{}": {}'.format(
|
||||||
|
name, pool, stderr
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
# 2. Remove the volume
|
# 2. Remove the volume
|
||||||
retcode, stdout, stderr = common.run_os_command("rbd rm {}/{}".format(pool, name))
|
retcode, stdout, stderr = common.run_os_command("rbd rm {}/{}".format(pool, name))
|
||||||
if retcode:
|
if retcode:
|
||||||
|
@ -244,9 +244,9 @@ def get_parsed_configuration(config_file):
|
|||||||
]
|
]
|
||||||
][0]
|
][0]
|
||||||
|
|
||||||
config_cluster_networks_specific[
|
config_cluster_networks_specific[f"{network_type}_dev_ip"] = (
|
||||||
f"{network_type}_dev_ip"
|
f"{list(network.hosts())[address_id]}/{network.prefixlen}"
|
||||||
] = f"{list(network.hosts())[address_id]}/{network.prefixlen}"
|
)
|
||||||
|
|
||||||
config = {**config, **config_cluster_networks_specific}
|
config = {**config, **config_cluster_networks_specific}
|
||||||
|
|
||||||
|
@ -69,6 +69,8 @@ def getNodeHealthDetails(zkhandler, node_name, node_health_plugins):
|
|||||||
plugin_message,
|
plugin_message,
|
||||||
plugin_data,
|
plugin_data,
|
||||||
) = tuple(all_plugin_data[pos_start:pos_end])
|
) = tuple(all_plugin_data[pos_start:pos_end])
|
||||||
|
if plugin_data is None:
|
||||||
|
continue
|
||||||
plugin_output = {
|
plugin_output = {
|
||||||
"name": plugin,
|
"name": plugin,
|
||||||
"last_run": int(plugin_last_run) if plugin_last_run is not None else None,
|
"last_run": int(plugin_last_run) if plugin_last_run is not None else None,
|
||||||
@ -156,9 +158,9 @@ def getNodeInformation(zkhandler, node_name):
|
|||||||
zkhandler, node_name, node_health_plugins
|
zkhandler, node_name, node_health_plugins
|
||||||
)
|
)
|
||||||
|
|
||||||
if _node_network_stats is not None:
|
try:
|
||||||
node_network_stats = json.loads(_node_network_stats)
|
node_network_stats = json.loads(_node_network_stats)
|
||||||
else:
|
except Exception:
|
||||||
node_network_stats = dict()
|
node_network_stats = dict()
|
||||||
|
|
||||||
# Construct a data structure to represent the data
|
# Construct a data structure to represent the data
|
||||||
|
24
debian/changelog
vendored
24
debian/changelog
vendored
@ -1,3 +1,27 @@
|
|||||||
|
pvc (0.9.97-0) unstable; urgency=high
|
||||||
|
|
||||||
|
* [Client CLI] Ensures --lines is always an integer value
|
||||||
|
* [Node Daemon] Fixes a bug if d_network changes during iteration
|
||||||
|
* [Node Daemon] Moves to using allocated instead of free memory for node reporting
|
||||||
|
* [API Daemon] Fixes a bug if lingering RBD snapshots exist when removing a volume (#180)
|
||||||
|
|
||||||
|
-- Joshua M. Boniface <joshua@boniface.me> Fri, 19 Apr 2024 10:32:16 -0400
|
||||||
|
|
||||||
|
pvc (0.9.96-0) unstable; urgency=high
|
||||||
|
|
||||||
|
* [API Daemon] Fixes a bug when reporting node stats
|
||||||
|
* [API Daemon] Fixes a bug deleteing successful benchmark results
|
||||||
|
|
||||||
|
-- Joshua M. Boniface <joshua@boniface.me> Fri, 08 Mar 2024 14:23:06 -0500
|
||||||
|
|
||||||
|
pvc (0.9.95-0) unstable; urgency=high
|
||||||
|
|
||||||
|
* [API Daemon/CLI Client] Adds a flag to allow duplicate VNIs in network templates
|
||||||
|
* [API Daemon] Ensures that storage template disks are returned in disk ID order
|
||||||
|
* [Client CLI] Fixes a display bug showing all OSDs as split
|
||||||
|
|
||||||
|
-- Joshua M. Boniface <joshua@boniface.me> Fri, 09 Feb 2024 12:42:00 -0500
|
||||||
|
|
||||||
pvc (0.9.94-0) unstable; urgency=high
|
pvc (0.9.94-0) unstable; urgency=high
|
||||||
|
|
||||||
* [CLI Client] Fixes an incorrect ordering issue with autobackup summary emails
|
* [CLI Client] Fixes an incorrect ordering issue with autobackup summary emails
|
||||||
|
@ -33,7 +33,7 @@ import os
|
|||||||
import signal
|
import signal
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.94"
|
version = "0.9.97"
|
||||||
|
|
||||||
|
|
||||||
##########################################################
|
##########################################################
|
||||||
|
@ -49,7 +49,7 @@ import re
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.94"
|
version = "0.9.97"
|
||||||
|
|
||||||
|
|
||||||
##########################################################
|
##########################################################
|
||||||
|
@ -231,7 +231,7 @@ class NetstatsInstance(object):
|
|||||||
# Get a list of all active interfaces
|
# Get a list of all active interfaces
|
||||||
net_root_path = "/sys/class/net"
|
net_root_path = "/sys/class/net"
|
||||||
all_ifaces = list()
|
all_ifaces = list()
|
||||||
for (_, dirnames, _) in walk(net_root_path):
|
for _, dirnames, _ in walk(net_root_path):
|
||||||
all_ifaces.extend(dirnames)
|
all_ifaces.extend(dirnames)
|
||||||
all_ifaces.sort()
|
all_ifaces.sort()
|
||||||
|
|
||||||
|
@ -521,7 +521,7 @@ class NodeInstance(object):
|
|||||||
self.logger.out("Acquired write lock for synchronization phase F", state="o")
|
self.logger.out("Acquired write lock for synchronization phase F", state="o")
|
||||||
time.sleep(0.2) # Time fir reader to acquire the lock
|
time.sleep(0.2) # Time fir reader to acquire the lock
|
||||||
# 4. Add gateway IPs
|
# 4. Add gateway IPs
|
||||||
for network in self.d_network:
|
for network in self.d_network.copy():
|
||||||
self.d_network[network].createGateways()
|
self.d_network[network].createGateways()
|
||||||
self.logger.out("Releasing write lock for synchronization phase F", state="i")
|
self.logger.out("Releasing write lock for synchronization phase F", state="i")
|
||||||
self.zkhandler.write([("base.config.primary_node.sync_lock", "")])
|
self.zkhandler.write([("base.config.primary_node.sync_lock", "")])
|
||||||
|
@ -743,7 +743,7 @@ def node_keepalive(logger, config, zkhandler, this_node, netstats):
|
|||||||
# Get node performance statistics
|
# Get node performance statistics
|
||||||
this_node.memtotal = int(psutil.virtual_memory().total / 1024 / 1024)
|
this_node.memtotal = int(psutil.virtual_memory().total / 1024 / 1024)
|
||||||
this_node.memused = int(psutil.virtual_memory().used / 1024 / 1024)
|
this_node.memused = int(psutil.virtual_memory().used / 1024 / 1024)
|
||||||
this_node.memfree = int(psutil.virtual_memory().free / 1024 / 1024)
|
this_node.memfree = int(psutil.virtual_memory().available / 1024 / 1024)
|
||||||
this_node.cpuload = round(os.getloadavg()[0], 2)
|
this_node.cpuload = round(os.getloadavg()[0], 2)
|
||||||
|
|
||||||
# Get node network statistics via netstats instance
|
# Get node network statistics via netstats instance
|
||||||
|
@ -44,7 +44,7 @@ from daemon_lib.vmbuilder import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.94"
|
version = "0.9.97"
|
||||||
|
|
||||||
|
|
||||||
config = cfg.get_configuration()
|
config = cfg.get_configuration()
|
||||||
|
Reference in New Issue
Block a user