Compare commits
10 Commits
Author | SHA1 | Date | |
---|---|---|---|
92feeefd26 | |||
38d63d9837 | |||
095bcb2373 | |||
91e450f399 | |||
79eb994a5e | |||
d65f512897 | |||
8af7189dd0 | |||
ea7a4b2b85 | |||
59f97ebbfb | |||
072337f1f0 |
15
CHANGELOG.md
15
CHANGELOG.md
@ -1,5 +1,20 @@
|
|||||||
## PVC Changelog
|
## PVC Changelog
|
||||||
|
|
||||||
|
###### [v0.9.59](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.59)
|
||||||
|
|
||||||
|
* [API] Flips the mem(prov) and mem(free) selectors making mem(free) the default for "mem" and "memprov" explicit
|
||||||
|
|
||||||
|
###### [v0.9.58](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.58)
|
||||||
|
|
||||||
|
* [API] Fixes a bug where migration selector could have case-sensitive operational faults
|
||||||
|
|
||||||
|
###### [v0.9.57](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.57)
|
||||||
|
|
||||||
|
* [CLI] Removes an invalid reference to VXLAN
|
||||||
|
* [CLI] Improves the handling of invalid networks in VM lists and on attach
|
||||||
|
* [API] Modularizes the benchmarking library so it can be used externally too
|
||||||
|
* [Daemon Library] Adds a module tag file so it can be used externally too
|
||||||
|
|
||||||
###### [v0.9.56](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.56)
|
###### [v0.9.56](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.56)
|
||||||
|
|
||||||
**Breaking Change**: Existing provisioner scripts are no longer valid; new example scripts are provided.
|
**Breaking Change**: Existing provisioner scripts are no longer valid; new example scripts are provided.
|
||||||
|
@ -27,7 +27,7 @@ from ssl import SSLContext, TLSVersion
|
|||||||
from distutils.util import strtobool as dustrtobool
|
from distutils.util import strtobool as dustrtobool
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.56"
|
version = "0.9.59"
|
||||||
|
|
||||||
# API version
|
# API version
|
||||||
API_VERSION = 1.0
|
API_VERSION = 1.0
|
||||||
|
@ -32,6 +32,74 @@ import daemon_lib.common as pvc_common
|
|||||||
import daemon_lib.ceph as pvc_ceph
|
import daemon_lib.ceph as pvc_ceph
|
||||||
|
|
||||||
|
|
||||||
|
# We run a total of 8 tests, to give a generalized idea of performance on the cluster:
|
||||||
|
# 1. A sequential read test of 8GB with a 4M block size
|
||||||
|
# 2. A sequential write test of 8GB with a 4M block size
|
||||||
|
# 3. A random read test of 8GB with a 4M block size
|
||||||
|
# 4. A random write test of 8GB with a 4M block size
|
||||||
|
# 5. A random read test of 8GB with a 256k block size
|
||||||
|
# 6. A random write test of 8GB with a 256k block size
|
||||||
|
# 7. A random read test of 8GB with a 4k block size
|
||||||
|
# 8. A random write test of 8GB with a 4k block size
|
||||||
|
# Taken together, these 8 results should give a very good indication of the overall storage performance
|
||||||
|
# for a variety of workloads.
|
||||||
|
test_matrix = {
|
||||||
|
"seq_read": {
|
||||||
|
"direction": "read",
|
||||||
|
"iodepth": "64",
|
||||||
|
"bs": "4M",
|
||||||
|
"rw": "read",
|
||||||
|
},
|
||||||
|
"seq_write": {
|
||||||
|
"direction": "write",
|
||||||
|
"iodepth": "64",
|
||||||
|
"bs": "4M",
|
||||||
|
"rw": "write",
|
||||||
|
},
|
||||||
|
"rand_read_4M": {
|
||||||
|
"direction": "read",
|
||||||
|
"iodepth": "64",
|
||||||
|
"bs": "4M",
|
||||||
|
"rw": "randread",
|
||||||
|
},
|
||||||
|
"rand_write_4M": {
|
||||||
|
"direction": "write",
|
||||||
|
"iodepth": "64",
|
||||||
|
"bs": "4M",
|
||||||
|
"rw": "randwrite",
|
||||||
|
},
|
||||||
|
"rand_read_4K": {
|
||||||
|
"direction": "read",
|
||||||
|
"iodepth": "64",
|
||||||
|
"bs": "4K",
|
||||||
|
"rw": "randread",
|
||||||
|
},
|
||||||
|
"rand_write_4K": {
|
||||||
|
"direction": "write",
|
||||||
|
"iodepth": "64",
|
||||||
|
"bs": "4K",
|
||||||
|
"rw": "randwrite",
|
||||||
|
},
|
||||||
|
"rand_read_4K_lowdepth": {
|
||||||
|
"direction": "read",
|
||||||
|
"iodepth": "1",
|
||||||
|
"bs": "4K",
|
||||||
|
"rw": "randread",
|
||||||
|
},
|
||||||
|
"rand_write_4K_lowdepth": {
|
||||||
|
"direction": "write",
|
||||||
|
"iodepth": "1",
|
||||||
|
"bs": "4K",
|
||||||
|
"rw": "randwrite",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Specify the benchmark volume name and size
|
||||||
|
benchmark_volume_name = "pvcbenchmark"
|
||||||
|
benchmark_volume_size = "8G"
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Exceptions (used by Celery tasks)
|
# Exceptions (used by Celery tasks)
|
||||||
#
|
#
|
||||||
@ -44,7 +112,7 @@ class BenchmarkError(Exception):
|
|||||||
self, message, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
self, message, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
||||||
):
|
):
|
||||||
self.message = message
|
self.message = message
|
||||||
if job_name is not None:
|
if job_name is not None and db_conn is not None and db_cur is not None:
|
||||||
# Clean up our dangling result
|
# Clean up our dangling result
|
||||||
query = "DELETE FROM storage_benchmarks WHERE job = %s;"
|
query = "DELETE FROM storage_benchmarks WHERE job = %s;"
|
||||||
args = (job_name,)
|
args = (job_name,)
|
||||||
@ -52,6 +120,7 @@ class BenchmarkError(Exception):
|
|||||||
db_conn.commit()
|
db_conn.commit()
|
||||||
# Close the database connections cleanly
|
# Close the database connections cleanly
|
||||||
close_database(db_conn, db_cur)
|
close_database(db_conn, db_cur)
|
||||||
|
if job_name is not None and zkhandler is not None:
|
||||||
zkhandler.disconnect()
|
zkhandler.disconnect()
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
@ -116,6 +185,90 @@ def list_benchmarks(job=None):
|
|||||||
return {"message": "No benchmark found."}, 404
|
return {"message": "No benchmark found."}, 404
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_benchmark_volume(
|
||||||
|
pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
||||||
|
):
|
||||||
|
# Create the RBD volume
|
||||||
|
retcode, retmsg = pvc_ceph.add_volume(
|
||||||
|
zkhandler, pool, benchmark_volume_name, benchmark_volume_size
|
||||||
|
)
|
||||||
|
if not retcode:
|
||||||
|
raise BenchmarkError(
|
||||||
|
'Failed to create volume "{}" on pool "{}": {}'.format(
|
||||||
|
benchmark_volume_name, pool, retmsg
|
||||||
|
),
|
||||||
|
job_name=job_name,
|
||||||
|
db_conn=db_conn,
|
||||||
|
db_cur=db_cur,
|
||||||
|
zkhandler=zkhandler,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print(retmsg)
|
||||||
|
|
||||||
|
|
||||||
|
def cleanup_benchmark_volume(
|
||||||
|
pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
||||||
|
):
|
||||||
|
# Remove the RBD volume
|
||||||
|
retcode, retmsg = pvc_ceph.remove_volume(zkhandler, pool, benchmark_volume_name)
|
||||||
|
if not retcode:
|
||||||
|
raise BenchmarkError(
|
||||||
|
'Failed to remove volume "{}" on pool "{}": {}'.format(
|
||||||
|
benchmark_volume_name, pool, retmsg
|
||||||
|
),
|
||||||
|
job_name=job_name,
|
||||||
|
db_conn=db_conn,
|
||||||
|
db_cur=db_cur,
|
||||||
|
zkhandler=zkhandler,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
print(retmsg)
|
||||||
|
|
||||||
|
|
||||||
|
def run_benchmark_job(
|
||||||
|
test, pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
||||||
|
):
|
||||||
|
test_spec = test_matrix[test]
|
||||||
|
print("Running test '{}'".format(test))
|
||||||
|
fio_cmd = """
|
||||||
|
fio \
|
||||||
|
--name={test} \
|
||||||
|
--ioengine=rbd \
|
||||||
|
--pool={pool} \
|
||||||
|
--rbdname={volume} \
|
||||||
|
--output-format=json \
|
||||||
|
--direct=1 \
|
||||||
|
--randrepeat=1 \
|
||||||
|
--numjobs=1 \
|
||||||
|
--time_based \
|
||||||
|
--runtime=75 \
|
||||||
|
--group_reporting \
|
||||||
|
--iodepth={iodepth} \
|
||||||
|
--bs={bs} \
|
||||||
|
--readwrite={rw}
|
||||||
|
""".format(
|
||||||
|
test=test,
|
||||||
|
pool=pool,
|
||||||
|
volume=benchmark_volume_name,
|
||||||
|
iodepth=test_spec["iodepth"],
|
||||||
|
bs=test_spec["bs"],
|
||||||
|
rw=test_spec["rw"],
|
||||||
|
)
|
||||||
|
|
||||||
|
print("Running fio job: {}".format(" ".join(fio_cmd.split())))
|
||||||
|
retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd)
|
||||||
|
if retcode:
|
||||||
|
raise BenchmarkError(
|
||||||
|
"Failed to run fio test: {}".format(stderr),
|
||||||
|
job_name=job_name,
|
||||||
|
db_conn=db_conn,
|
||||||
|
db_cur=db_cur,
|
||||||
|
zkhandler=zkhandler,
|
||||||
|
)
|
||||||
|
|
||||||
|
return loads(stdout)
|
||||||
|
|
||||||
|
|
||||||
def run_benchmark(self, pool):
|
def run_benchmark(self, pool):
|
||||||
# Runtime imports
|
# Runtime imports
|
||||||
import time
|
import time
|
||||||
@ -172,20 +325,13 @@ def run_benchmark(self, pool):
|
|||||||
)
|
)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
volume = "pvcbenchmark"
|
prepare_benchmark_volume(
|
||||||
|
pool,
|
||||||
# Create the RBD volume
|
job_name=job_name,
|
||||||
retcode, retmsg = pvc_ceph.add_volume(zkhandler, pool, volume, "8G")
|
db_conn=db_conn,
|
||||||
if not retcode:
|
db_cur=db_cur,
|
||||||
raise BenchmarkError(
|
zkhandler=zkhandler,
|
||||||
'Failed to create volume "{}": {}'.format(volume, retmsg),
|
)
|
||||||
job_name=job_name,
|
|
||||||
db_conn=db_conn,
|
|
||||||
db_cur=db_cur,
|
|
||||||
zkhandler=zkhandler,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
print(retmsg)
|
|
||||||
|
|
||||||
# Phase 2 - benchmark run
|
# Phase 2 - benchmark run
|
||||||
self.update_state(
|
self.update_state(
|
||||||
@ -194,99 +340,17 @@ def run_benchmark(self, pool):
|
|||||||
)
|
)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
# We run a total of 8 tests, to give a generalized idea of performance on the cluster:
|
|
||||||
# 1. A sequential read test of 8GB with a 4M block size
|
|
||||||
# 2. A sequential write test of 8GB with a 4M block size
|
|
||||||
# 3. A random read test of 8GB with a 4M block size
|
|
||||||
# 4. A random write test of 8GB with a 4M block size
|
|
||||||
# 5. A random read test of 8GB with a 256k block size
|
|
||||||
# 6. A random write test of 8GB with a 256k block size
|
|
||||||
# 7. A random read test of 8GB with a 4k block size
|
|
||||||
# 8. A random write test of 8GB with a 4k block size
|
|
||||||
# Taken together, these 8 results should give a very good indication of the overall storage performance
|
|
||||||
# for a variety of workloads.
|
|
||||||
test_matrix = {
|
|
||||||
"seq_read": {"direction": "read", "iodepth": "64", "bs": "4M", "rw": "read"},
|
|
||||||
"seq_write": {"direction": "write", "iodepth": "64", "bs": "4M", "rw": "write"},
|
|
||||||
"rand_read_4M": {
|
|
||||||
"direction": "read",
|
|
||||||
"iodepth": "64",
|
|
||||||
"bs": "4M",
|
|
||||||
"rw": "randread",
|
|
||||||
},
|
|
||||||
"rand_write_4M": {
|
|
||||||
"direction": "write",
|
|
||||||
"iodepth": "64",
|
|
||||||
"bs": "4M",
|
|
||||||
"rw": "randwrite",
|
|
||||||
},
|
|
||||||
"rand_read_4K": {
|
|
||||||
"direction": "read",
|
|
||||||
"iodepth": "64",
|
|
||||||
"bs": "4K",
|
|
||||||
"rw": "randread",
|
|
||||||
},
|
|
||||||
"rand_write_4K": {
|
|
||||||
"direction": "write",
|
|
||||||
"iodepth": "64",
|
|
||||||
"bs": "4K",
|
|
||||||
"rw": "randwrite",
|
|
||||||
},
|
|
||||||
"rand_read_4K_lowdepth": {
|
|
||||||
"direction": "read",
|
|
||||||
"iodepth": "1",
|
|
||||||
"bs": "4K",
|
|
||||||
"rw": "randread",
|
|
||||||
},
|
|
||||||
"rand_write_4K_lowdepth": {
|
|
||||||
"direction": "write",
|
|
||||||
"iodepth": "1",
|
|
||||||
"bs": "4K",
|
|
||||||
"rw": "randwrite",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
results = dict()
|
results = dict()
|
||||||
for test in test_matrix:
|
for test in test_matrix:
|
||||||
print("Running test '{}'".format(test))
|
results[test] = run_benchmark_job(
|
||||||
fio_cmd = """
|
test,
|
||||||
fio \
|
pool,
|
||||||
--name={test} \
|
job_name=job_name,
|
||||||
--ioengine=rbd \
|
db_conn=db_conn,
|
||||||
--pool={pool} \
|
db_cur=db_cur,
|
||||||
--rbdname={volume} \
|
zkhandler=zkhandler,
|
||||||
--output-format=json \
|
|
||||||
--direct=1 \
|
|
||||||
--randrepeat=1 \
|
|
||||||
--numjobs=1 \
|
|
||||||
--time_based \
|
|
||||||
--runtime=75 \
|
|
||||||
--group_reporting \
|
|
||||||
--iodepth={iodepth} \
|
|
||||||
--bs={bs} \
|
|
||||||
--readwrite={rw}
|
|
||||||
""".format(
|
|
||||||
test=test,
|
|
||||||
pool=pool,
|
|
||||||
volume=volume,
|
|
||||||
iodepth=test_matrix[test]["iodepth"],
|
|
||||||
bs=test_matrix[test]["bs"],
|
|
||||||
rw=test_matrix[test]["rw"],
|
|
||||||
)
|
)
|
||||||
|
|
||||||
print("Running fio job: {}".format(" ".join(fio_cmd.split())))
|
|
||||||
retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd)
|
|
||||||
if retcode:
|
|
||||||
raise BenchmarkError(
|
|
||||||
"Failed to run fio test: {}".format(stderr),
|
|
||||||
job_name=job_name,
|
|
||||||
db_conn=db_conn,
|
|
||||||
db_cur=db_cur,
|
|
||||||
zkhandler=zkhandler,
|
|
||||||
)
|
|
||||||
|
|
||||||
results[test] = loads(stdout)
|
|
||||||
|
|
||||||
# Phase 3 - cleanup
|
# Phase 3 - cleanup
|
||||||
self.update_state(
|
self.update_state(
|
||||||
state="RUNNING",
|
state="RUNNING",
|
||||||
@ -294,18 +358,13 @@ def run_benchmark(self, pool):
|
|||||||
)
|
)
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
# Remove the RBD volume
|
cleanup_benchmark_volume(
|
||||||
retcode, retmsg = pvc_ceph.remove_volume(zkhandler, pool, volume)
|
pool,
|
||||||
if not retcode:
|
job_name=job_name,
|
||||||
raise BenchmarkError(
|
db_conn=db_conn,
|
||||||
'Failed to remove volume "{}": {}'.format(volume, retmsg),
|
db_cur=db_cur,
|
||||||
job_name=job_name,
|
zkhandler=zkhandler,
|
||||||
db_conn=db_conn,
|
)
|
||||||
db_cur=db_cur,
|
|
||||||
zkhandler=zkhandler,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
print(retmsg)
|
|
||||||
|
|
||||||
print("Storing result of tests for job '{}' in database".format(job_name))
|
print("Storing result of tests for job '{}' in database".format(job_name))
|
||||||
try:
|
try:
|
||||||
|
@ -1253,7 +1253,7 @@ class API_VM_Root(Resource):
|
|||||||
{"name": "node"},
|
{"name": "node"},
|
||||||
{
|
{
|
||||||
"name": "selector",
|
"name": "selector",
|
||||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
"choices": ("mem", "memprov", "vcpus", "load", "vms", "none"),
|
||||||
"helptext": "A valid selector must be specified",
|
"helptext": "A valid selector must be specified",
|
||||||
},
|
},
|
||||||
{"name": "autostart"},
|
{"name": "autostart"},
|
||||||
@ -1302,7 +1302,7 @@ class API_VM_Root(Resource):
|
|||||||
default: none
|
default: none
|
||||||
enum:
|
enum:
|
||||||
- mem
|
- mem
|
||||||
- memfree
|
- memprov
|
||||||
- vcpus
|
- vcpus
|
||||||
- load
|
- load
|
||||||
- vms
|
- vms
|
||||||
@ -1400,7 +1400,7 @@ class API_VM_Element(Resource):
|
|||||||
{"name": "node"},
|
{"name": "node"},
|
||||||
{
|
{
|
||||||
"name": "selector",
|
"name": "selector",
|
||||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
"choices": ("mem", "memprov", "vcpus", "load", "vms", "none"),
|
||||||
"helptext": "A valid selector must be specified",
|
"helptext": "A valid selector must be specified",
|
||||||
},
|
},
|
||||||
{"name": "autostart"},
|
{"name": "autostart"},
|
||||||
@ -1451,7 +1451,7 @@ class API_VM_Element(Resource):
|
|||||||
default: none
|
default: none
|
||||||
enum:
|
enum:
|
||||||
- mem
|
- mem
|
||||||
- memfree
|
- memprov
|
||||||
- vcpus
|
- vcpus
|
||||||
- load
|
- load
|
||||||
- vms
|
- vms
|
||||||
@ -1650,7 +1650,7 @@ class API_VM_Metadata(Resource):
|
|||||||
{"name": "limit"},
|
{"name": "limit"},
|
||||||
{
|
{
|
||||||
"name": "selector",
|
"name": "selector",
|
||||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
"choices": ("mem", "memprov", "vcpus", "load", "vms", "none"),
|
||||||
"helptext": "A valid selector must be specified",
|
"helptext": "A valid selector must be specified",
|
||||||
},
|
},
|
||||||
{"name": "autostart"},
|
{"name": "autostart"},
|
||||||
@ -1682,7 +1682,7 @@ class API_VM_Metadata(Resource):
|
|||||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||||
enum:
|
enum:
|
||||||
- mem
|
- mem
|
||||||
- memfree
|
- memprov
|
||||||
- vcpus
|
- vcpus
|
||||||
- load
|
- load
|
||||||
- vms
|
- vms
|
||||||
|
@ -539,9 +539,9 @@ def get_vm_meta(zkhandler, vm):
|
|||||||
retdata = {
|
retdata = {
|
||||||
"name": vm,
|
"name": vm,
|
||||||
"node_limit": domain_node_limit,
|
"node_limit": domain_node_limit,
|
||||||
"node_selector": domain_node_selector,
|
"node_selector": domain_node_selector.lower(),
|
||||||
"node_autostart": domain_node_autostart,
|
"node_autostart": domain_node_autostart,
|
||||||
"migration_method": domain_migrate_method,
|
"migration_method": domain_migrate_method.lower(),
|
||||||
}
|
}
|
||||||
|
|
||||||
return retdata, retcode
|
return retdata, retcode
|
||||||
|
@ -679,6 +679,10 @@ def vm_networks_add(
|
|||||||
from random import randint
|
from random import randint
|
||||||
import pvc.cli_lib.network as pvc_network
|
import pvc.cli_lib.network as pvc_network
|
||||||
|
|
||||||
|
network_exists, _ = pvc_network.net_info(config, network)
|
||||||
|
if not network_exists:
|
||||||
|
return False, "Network {} not found on the cluster.".format(network)
|
||||||
|
|
||||||
status, domain_information = vm_info(config, vm)
|
status, domain_information = vm_info(config, vm)
|
||||||
if not status:
|
if not status:
|
||||||
return status, domain_information
|
return status, domain_information
|
||||||
@ -2016,7 +2020,8 @@ def format_list(config, vm_list, raw):
|
|||||||
tag_list = getNiceTagName(domain_information)
|
tag_list = getNiceTagName(domain_information)
|
||||||
if len(tag_list) < 1:
|
if len(tag_list) < 1:
|
||||||
tag_list = ["N/A"]
|
tag_list = ["N/A"]
|
||||||
vm_net_colour = ""
|
|
||||||
|
net_invalid_list = []
|
||||||
for net_vni in net_list:
|
for net_vni in net_list:
|
||||||
if (
|
if (
|
||||||
net_vni not in ["cluster", "storage", "upstream"]
|
net_vni not in ["cluster", "storage", "upstream"]
|
||||||
@ -2024,13 +2029,33 @@ def format_list(config, vm_list, raw):
|
|||||||
and not re.match(r"^hostdev:.*", net_vni)
|
and not re.match(r"^hostdev:.*", net_vni)
|
||||||
):
|
):
|
||||||
if int(net_vni) not in [net["vni"] for net in cluster_net_list]:
|
if int(net_vni) not in [net["vni"] for net in cluster_net_list]:
|
||||||
vm_net_colour = ansiprint.red()
|
net_invalid_list.append(True)
|
||||||
|
else:
|
||||||
|
net_invalid_list.append(False)
|
||||||
|
else:
|
||||||
|
net_invalid_list.append(False)
|
||||||
|
|
||||||
|
net_string_list = []
|
||||||
|
for net_idx, net_vni in enumerate(net_list):
|
||||||
|
if net_invalid_list[net_idx]:
|
||||||
|
net_string_list.append(
|
||||||
|
"{}{}{}".format(
|
||||||
|
ansiprint.red(),
|
||||||
|
net_vni,
|
||||||
|
ansiprint.end(),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
# Fix the length due to the extra fake characters
|
||||||
|
vm_nets_length -= len(net_vni)
|
||||||
|
vm_nets_length += len(net_string_list[net_idx])
|
||||||
|
else:
|
||||||
|
net_string_list.append(net_vni)
|
||||||
|
|
||||||
vm_list_output.append(
|
vm_list_output.append(
|
||||||
"{bold}{vm_name: <{vm_name_length}} \
|
"{bold}{vm_name: <{vm_name_length}} \
|
||||||
{vm_state_colour}{vm_state: <{vm_state_length}}{end_colour} \
|
{vm_state_colour}{vm_state: <{vm_state_length}}{end_colour} \
|
||||||
{vm_tags: <{vm_tags_length}} \
|
{vm_tags: <{vm_tags_length}} \
|
||||||
{vm_net_colour}{vm_networks: <{vm_nets_length}}{end_colour} \
|
{vm_networks: <{vm_nets_length}} \
|
||||||
{vm_memory: <{vm_ram_length}} {vm_vcpu: <{vm_vcpu_length}} \
|
{vm_memory: <{vm_ram_length}} {vm_vcpu: <{vm_vcpu_length}} \
|
||||||
{vm_node: <{vm_node_length}} \
|
{vm_node: <{vm_node_length}} \
|
||||||
{vm_migrated: <{vm_migrated_length}}{end_bold}".format(
|
{vm_migrated: <{vm_migrated_length}}{end_bold}".format(
|
||||||
@ -2049,8 +2074,7 @@ def format_list(config, vm_list, raw):
|
|||||||
vm_name=domain_information["name"],
|
vm_name=domain_information["name"],
|
||||||
vm_state=domain_information["state"],
|
vm_state=domain_information["state"],
|
||||||
vm_tags=",".join(tag_list),
|
vm_tags=",".join(tag_list),
|
||||||
vm_net_colour=vm_net_colour,
|
vm_networks=",".join(net_string_list),
|
||||||
vm_networks=",".join(net_list),
|
|
||||||
vm_memory=domain_information["memory"],
|
vm_memory=domain_information["memory"],
|
||||||
vm_vcpu=domain_information["vcpu"],
|
vm_vcpu=domain_information["vcpu"],
|
||||||
vm_node=domain_information["node"],
|
vm_node=domain_information["node"],
|
||||||
|
@ -807,7 +807,7 @@ def cli_vm():
|
|||||||
"node_selector",
|
"node_selector",
|
||||||
default="none",
|
default="none",
|
||||||
show_default=True,
|
show_default=True,
|
||||||
type=click.Choice(["mem", "memfree", "load", "vcpus", "vms", "none"]),
|
type=click.Choice(["mem", "memprov", "load", "vcpus", "vms", "none"]),
|
||||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
@ -859,15 +859,15 @@ def vm_define(
|
|||||||
Define a new virtual machine from Libvirt XML configuration file VMCONFIG.
|
Define a new virtual machine from Libvirt XML configuration file VMCONFIG.
|
||||||
|
|
||||||
The target node selector ("--node-selector"/"-s") can be "none" to use the cluster default, or one of the following values:
|
The target node selector ("--node-selector"/"-s") can be "none" to use the cluster default, or one of the following values:
|
||||||
* "mem": choose the node with the least provisioned VM memory
|
* "mem": choose the node with the most (real) free memory
|
||||||
* "memfree": choose the node with the most (real) free memory
|
* "memprov": choose the node with the least provisioned VM memory
|
||||||
* "vcpus": choose the node with the least allocated VM vCPUs
|
* "vcpus": choose the node with the least allocated VM vCPUs
|
||||||
* "load": choose the node with the lowest current load average
|
* "load": choose the node with the lowest current load average
|
||||||
* "vms": choose the node with the least number of provisioned VMs
|
* "vms": choose the node with the least number of provisioned VMs
|
||||||
|
|
||||||
For most clusters, "mem" should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
|
For most clusters, "mem" should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
|
||||||
* "mem" looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
* "mem" looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected.
|
||||||
* "memfree" looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. This might be preferable to "mem" on clusters with very high memory utilization versus total capacity or if many VMs are stopped/disabled.
|
* "memprov" looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
||||||
* "load" looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
|
* "load" looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -914,7 +914,7 @@ def vm_define(
|
|||||||
"node_selector",
|
"node_selector",
|
||||||
default=None,
|
default=None,
|
||||||
show_default=False,
|
show_default=False,
|
||||||
type=click.Choice(["mem", "memfree", "load", "vcpus", "vms", "none"]),
|
type=click.Choice(["mem", "memprov", "load", "vcpus", "vms", "none"]),
|
||||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
@ -2404,7 +2404,7 @@ def vm_list(target_node, target_state, target_tag, limit, raw, negate):
|
|||||||
)
|
)
|
||||||
def cli_network():
|
def cli_network():
|
||||||
"""
|
"""
|
||||||
Manage the state of a VXLAN network in the PVC cluster.
|
Manage the state of a network in the PVC cluster.
|
||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -4134,7 +4134,7 @@ def provisioner_template_system_list(limit):
|
|||||||
"--node-selector",
|
"--node-selector",
|
||||||
"node_selector",
|
"node_selector",
|
||||||
type=click.Choice(
|
type=click.Choice(
|
||||||
["mem", "memfree", "vcpus", "vms", "load", "none"], case_sensitive=False
|
["mem", "memprov", "vcpus", "vms", "load", "none"], case_sensitive=False
|
||||||
),
|
),
|
||||||
default="none",
|
default="none",
|
||||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||||
@ -4230,7 +4230,7 @@ def provisioner_template_system_add(
|
|||||||
"--node-selector",
|
"--node-selector",
|
||||||
"node_selector",
|
"node_selector",
|
||||||
type=click.Choice(
|
type=click.Choice(
|
||||||
["mem", "memfree", "vcpus", "vms", "load", "none"], case_sensitive=False
|
["mem", "memprov", "vcpus", "vms", "load", "none"], case_sensitive=False
|
||||||
),
|
),
|
||||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||||
)
|
)
|
||||||
|
@ -2,7 +2,7 @@ from setuptools import setup
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="pvc",
|
name="pvc",
|
||||||
version="0.9.56",
|
version="0.9.59",
|
||||||
packages=["pvc", "pvc.cli_lib"],
|
packages=["pvc", "pvc.cli_lib"],
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"Click",
|
"Click",
|
||||||
|
0
daemon-common/__init__.py
Normal file
0
daemon-common/__init__.py
Normal file
@ -633,14 +633,14 @@ def findTargetNode(zkhandler, dom_uuid):
|
|||||||
search_field = None
|
search_field = None
|
||||||
|
|
||||||
# If our search field is invalid, use the default
|
# If our search field is invalid, use the default
|
||||||
if search_field is None or search_field == "None":
|
if search_field is None or search_field in ["None", "none"]:
|
||||||
search_field = zkhandler.read("base.config.migration_target_selector")
|
search_field = zkhandler.read("base.config.migration_target_selector")
|
||||||
|
|
||||||
# Execute the search
|
# Execute the search
|
||||||
if search_field == "mem":
|
if search_field == "mem":
|
||||||
return findTargetNodeMem(zkhandler, node_limit, dom_uuid)
|
|
||||||
if search_field == "memfree":
|
|
||||||
return findTargetNodeMemFree(zkhandler, node_limit, dom_uuid)
|
return findTargetNodeMemFree(zkhandler, node_limit, dom_uuid)
|
||||||
|
if search_field == "memprov":
|
||||||
|
return findTargetNodeMemProv(zkhandler, node_limit, dom_uuid)
|
||||||
if search_field == "load":
|
if search_field == "load":
|
||||||
return findTargetNodeLoad(zkhandler, node_limit, dom_uuid)
|
return findTargetNodeLoad(zkhandler, node_limit, dom_uuid)
|
||||||
if search_field == "vcpus":
|
if search_field == "vcpus":
|
||||||
@ -678,10 +678,28 @@ def getNodes(zkhandler, node_limit, dom_uuid):
|
|||||||
return valid_node_list
|
return valid_node_list
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# via free memory
|
||||||
|
#
|
||||||
|
def findTargetNodeMemFree(zkhandler, node_limit, dom_uuid):
|
||||||
|
most_memfree = 0
|
||||||
|
target_node = None
|
||||||
|
|
||||||
|
node_list = getNodes(zkhandler, node_limit, dom_uuid)
|
||||||
|
for node in node_list:
|
||||||
|
memfree = int(zkhandler.read(("node.memory.free", node)))
|
||||||
|
|
||||||
|
if memfree > most_memfree:
|
||||||
|
most_memfree = memfree
|
||||||
|
target_node = node
|
||||||
|
|
||||||
|
return target_node
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# via provisioned memory
|
# via provisioned memory
|
||||||
#
|
#
|
||||||
def findTargetNodeMem(zkhandler, node_limit, dom_uuid):
|
def findTargetNodeMemProv(zkhandler, node_limit, dom_uuid):
|
||||||
most_provfree = 0
|
most_provfree = 0
|
||||||
target_node = None
|
target_node = None
|
||||||
|
|
||||||
@ -700,24 +718,6 @@ def findTargetNodeMem(zkhandler, node_limit, dom_uuid):
|
|||||||
return target_node
|
return target_node
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# via free memory
|
|
||||||
#
|
|
||||||
def findTargetNodeMemFree(zkhandler, node_limit, dom_uuid):
|
|
||||||
most_memfree = 0
|
|
||||||
target_node = None
|
|
||||||
|
|
||||||
node_list = getNodes(zkhandler, node_limit, dom_uuid)
|
|
||||||
for node in node_list:
|
|
||||||
memfree = int(zkhandler.read(("node.memory.free", node)))
|
|
||||||
|
|
||||||
if memfree > most_memfree:
|
|
||||||
most_memfree = memfree
|
|
||||||
target_node = node
|
|
||||||
|
|
||||||
return target_node
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# via load average
|
# via load average
|
||||||
#
|
#
|
||||||
|
@ -308,9 +308,9 @@ def define_vm(
|
|||||||
(("domain.console.log", dom_uuid), ""),
|
(("domain.console.log", dom_uuid), ""),
|
||||||
(("domain.console.vnc", dom_uuid), ""),
|
(("domain.console.vnc", dom_uuid), ""),
|
||||||
(("domain.meta.autostart", dom_uuid), node_autostart),
|
(("domain.meta.autostart", dom_uuid), node_autostart),
|
||||||
(("domain.meta.migrate_method", dom_uuid), migration_method),
|
(("domain.meta.migrate_method", dom_uuid), str(migration_method).lower()),
|
||||||
(("domain.meta.node_limit", dom_uuid), formatted_node_limit),
|
(("domain.meta.node_limit", dom_uuid), formatted_node_limit),
|
||||||
(("domain.meta.node_selector", dom_uuid), node_selector),
|
(("domain.meta.node_selector", dom_uuid), str(node_selector).lower()),
|
||||||
(("domain.meta.tags", dom_uuid), ""),
|
(("domain.meta.tags", dom_uuid), ""),
|
||||||
(("domain.migrate.sync_lock", dom_uuid), ""),
|
(("domain.migrate.sync_lock", dom_uuid), ""),
|
||||||
]
|
]
|
||||||
@ -447,7 +447,9 @@ def modify_vm_metadata(
|
|||||||
update_list.append((("domain.meta.node_limit", dom_uuid), node_limit))
|
update_list.append((("domain.meta.node_limit", dom_uuid), node_limit))
|
||||||
|
|
||||||
if node_selector is not None:
|
if node_selector is not None:
|
||||||
update_list.append((("domain.meta.node_selector", dom_uuid), node_selector))
|
update_list.append(
|
||||||
|
(("domain.meta.node_selector", dom_uuid), str(node_selector).lower())
|
||||||
|
)
|
||||||
|
|
||||||
if node_autostart is not None:
|
if node_autostart is not None:
|
||||||
update_list.append((("domain.meta.autostart", dom_uuid), node_autostart))
|
update_list.append((("domain.meta.autostart", dom_uuid), node_autostart))
|
||||||
@ -456,7 +458,9 @@ def modify_vm_metadata(
|
|||||||
update_list.append((("domain.profile", dom_uuid), provisioner_profile))
|
update_list.append((("domain.profile", dom_uuid), provisioner_profile))
|
||||||
|
|
||||||
if migration_method is not None:
|
if migration_method is not None:
|
||||||
update_list.append((("domain.meta.migrate_method", dom_uuid), migration_method))
|
update_list.append(
|
||||||
|
(("domain.meta.migrate_method", dom_uuid), str(migration_method).lower())
|
||||||
|
)
|
||||||
|
|
||||||
if len(update_list) < 1:
|
if len(update_list) < 1:
|
||||||
return False, "ERROR: No updates to apply."
|
return False, "ERROR: No updates to apply."
|
||||||
|
21
debian/changelog
vendored
21
debian/changelog
vendored
@ -1,3 +1,24 @@
|
|||||||
|
pvc (0.9.59-0) unstable; urgency=high
|
||||||
|
|
||||||
|
* [API] Flips the mem(prov) and mem(free) selectors making mem(free) the default for "mem" and "memprov" explicit
|
||||||
|
|
||||||
|
-- Joshua M. Boniface <joshua@boniface.me> Tue, 15 Nov 2022 15:50:15 -0500
|
||||||
|
|
||||||
|
pvc (0.9.58-0) unstable; urgency=high
|
||||||
|
|
||||||
|
* [API] Fixes a bug where migration selector could have case-sensitive operational faults
|
||||||
|
|
||||||
|
-- Joshua M. Boniface <joshua@boniface.me> Mon, 07 Nov 2022 12:27:48 -0500
|
||||||
|
|
||||||
|
pvc (0.9.57-0) unstable; urgency=high
|
||||||
|
|
||||||
|
* [CLI] Removes an invalid reference to VXLAN
|
||||||
|
* [CLI] Improves the handling of invalid networks in VM lists and on attach
|
||||||
|
* [API] Modularizes the benchmarking library so it can be used externally too
|
||||||
|
* [Daemon Library] Adds a module tag file so it can be used externally too
|
||||||
|
|
||||||
|
-- Joshua M. Boniface <joshua@boniface.me> Sun, 06 Nov 2022 01:39:50 -0400
|
||||||
|
|
||||||
pvc (0.9.56-0) unstable; urgency=high
|
pvc (0.9.56-0) unstable; urgency=high
|
||||||
|
|
||||||
* [API/Provisioner] Fundamentally revamps the provisioner script framework to provide more extensibility (BREAKING CHANGE)
|
* [API/Provisioner] Fundamentally revamps the provisioner script framework to provide more extensibility (BREAKING CHANGE)
|
||||||
|
@ -356,15 +356,15 @@ The password for the PVC node daemon to log in to the IPMI interface.
|
|||||||
The default selector algorithm to use when migrating VMs away from a node; individual VMs can override this default.
|
The default selector algorithm to use when migrating VMs away from a node; individual VMs can override this default.
|
||||||
|
|
||||||
Valid `target_selector` values are:
|
Valid `target_selector` values are:
|
||||||
* `mem`: choose the node with the least provisioned VM memory
|
* `mem`: choose the node with the most (real) free memory
|
||||||
* `memfree`: choose the node with the most (real) free memory
|
* `memprov`: choose the node with the least provisioned VM memory
|
||||||
* `vcpus`: choose the node with the least allocated VM vCPUs
|
* `vcpus`: choose the node with the least allocated VM vCPUs
|
||||||
* `load`: choose the node with the lowest current load average
|
* `load`: choose the node with the lowest current load average
|
||||||
* `vms`: choose the node with the least number of provisioned VMs
|
* `vms`: choose the node with the least number of provisioned VMs
|
||||||
|
|
||||||
For most clusters, `mem` should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
|
For most clusters, `mem` should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
|
||||||
* `mem` looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
* `mem` looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected.
|
||||||
* `memfree` looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. This might be preferable to `mem` on clusters with very high memory utilization versus total capacity or if many VMs are stopped/disabled.
|
* `memprov` looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
||||||
* `load` looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
|
* `load` looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
|
||||||
|
|
||||||
#### `system` → `configuration` → `directories` → `dynamic_directory`
|
#### `system` → `configuration` → `directories` → `dynamic_directory`
|
||||||
|
@ -122,7 +122,7 @@ pvc:
|
|||||||
pass: Passw0rd
|
pass: Passw0rd
|
||||||
# migration: Migration option configuration
|
# migration: Migration option configuration
|
||||||
migration:
|
migration:
|
||||||
# target_selector: Criteria to select the ideal migration target, options: mem, memfree, load, vcpus, vms
|
# target_selector: Criteria to select the ideal migration target, options: mem, memprov, load, vcpus, vms
|
||||||
target_selector: mem
|
target_selector: mem
|
||||||
# configuration: Local system configurations
|
# configuration: Local system configurations
|
||||||
configuration:
|
configuration:
|
||||||
|
@ -48,7 +48,7 @@ import re
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.56"
|
version = "0.9.59"
|
||||||
|
|
||||||
|
|
||||||
##########################################################
|
##########################################################
|
||||||
|
Reference in New Issue
Block a user