Compare commits

..

10 Commits

Author SHA1 Message Date
92feeefd26 Bump version to 0.9.59 2022-11-15 15:50:15 -05:00
38d63d9837 Flip behaviour of memory selectors
It didn't make any sense to me for mem(prov) to be the default selector,
since this has too many caveats versus mem(free). Switch to using
mem(free) as the default (i.e. "mem") and make memprov the alternative.
2022-11-15 15:45:59 -05:00
095bcb2373 Bump version to 0.9.58 2022-11-07 12:27:48 -05:00
91e450f399 Remove extra lower() call where not needed 2022-11-07 12:26:50 -05:00
79eb994a5e Ensure equality of none and None for selector 2022-11-07 11:59:53 -05:00
d65f512897 Bump version to 0.9.57 2022-11-06 01:39:50 -04:00
8af7189dd0 Add module tag for daemon lib 2022-11-04 03:47:18 -04:00
ea7a4b2b85 Make benchmarker function as a module
1. Move the test_matrix, volume name, and size to module-level variables
so they can be accessed externally if this is imported.
2. Separate the volume creation and volume cleanup into functions.
3. Separate the individual benchmark runs into a function.

This should enable easier calling of the various subcomponents
externally, e.g. for external benchmark scripts.
2022-11-03 21:33:32 -04:00
59f97ebbfb Better handle invalid nets in VMs
1. Error out when trying to add a new network to a VM if the network
doesn't exist on the cluster.
2. When showing the VM list, only show invalid networks in red, not the
whole list.
2022-11-01 10:24:24 -04:00
072337f1f0 Remove VXLAN ref where it isn't correct 2022-11-01 09:40:13 -04:00
16 changed files with 296 additions and 173 deletions

View File

@ -1 +1 @@
0.9.56
0.9.59

View File

@ -1,5 +1,20 @@
## PVC Changelog
###### [v0.9.59](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.59)
* [API] Flips the mem(prov) and mem(free) selectors making mem(free) the default for "mem" and "memprov" explicit
###### [v0.9.58](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.58)
* [API] Fixes a bug where migration selector could have case-sensitive operational faults
###### [v0.9.57](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.57)
* [CLI] Removes an invalid reference to VXLAN
* [CLI] Improves the handling of invalid networks in VM lists and on attach
* [API] Modularizes the benchmarking library so it can be used externally too
* [Daemon Library] Adds a module tag file so it can be used externally too
###### [v0.9.56](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.56)
**Breaking Change**: Existing provisioner scripts are no longer valid; new example scripts are provided.

View File

@ -27,7 +27,7 @@ from ssl import SSLContext, TLSVersion
from distutils.util import strtobool as dustrtobool
# Daemon version
version = "0.9.56"
version = "0.9.59"
# API version
API_VERSION = 1.0

View File

@ -32,6 +32,74 @@ import daemon_lib.common as pvc_common
import daemon_lib.ceph as pvc_ceph
# We run a total of 8 tests, to give a generalized idea of performance on the cluster:
# 1. A sequential read test of 8GB with a 4M block size
# 2. A sequential write test of 8GB with a 4M block size
# 3. A random read test of 8GB with a 4M block size
# 4. A random write test of 8GB with a 4M block size
# 5. A random read test of 8GB with a 256k block size
# 6. A random write test of 8GB with a 256k block size
# 7. A random read test of 8GB with a 4k block size
# 8. A random write test of 8GB with a 4k block size
# Taken together, these 8 results should give a very good indication of the overall storage performance
# for a variety of workloads.
test_matrix = {
"seq_read": {
"direction": "read",
"iodepth": "64",
"bs": "4M",
"rw": "read",
},
"seq_write": {
"direction": "write",
"iodepth": "64",
"bs": "4M",
"rw": "write",
},
"rand_read_4M": {
"direction": "read",
"iodepth": "64",
"bs": "4M",
"rw": "randread",
},
"rand_write_4M": {
"direction": "write",
"iodepth": "64",
"bs": "4M",
"rw": "randwrite",
},
"rand_read_4K": {
"direction": "read",
"iodepth": "64",
"bs": "4K",
"rw": "randread",
},
"rand_write_4K": {
"direction": "write",
"iodepth": "64",
"bs": "4K",
"rw": "randwrite",
},
"rand_read_4K_lowdepth": {
"direction": "read",
"iodepth": "1",
"bs": "4K",
"rw": "randread",
},
"rand_write_4K_lowdepth": {
"direction": "write",
"iodepth": "1",
"bs": "4K",
"rw": "randwrite",
},
}
# Specify the benchmark volume name and size
benchmark_volume_name = "pvcbenchmark"
benchmark_volume_size = "8G"
#
# Exceptions (used by Celery tasks)
#
@ -44,7 +112,7 @@ class BenchmarkError(Exception):
self, message, job_name=None, db_conn=None, db_cur=None, zkhandler=None
):
self.message = message
if job_name is not None:
if job_name is not None and db_conn is not None and db_cur is not None:
# Clean up our dangling result
query = "DELETE FROM storage_benchmarks WHERE job = %s;"
args = (job_name,)
@ -52,6 +120,7 @@ class BenchmarkError(Exception):
db_conn.commit()
# Close the database connections cleanly
close_database(db_conn, db_cur)
if job_name is not None and zkhandler is not None:
zkhandler.disconnect()
def __str__(self):
@ -116,6 +185,90 @@ def list_benchmarks(job=None):
return {"message": "No benchmark found."}, 404
def prepare_benchmark_volume(
pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
):
# Create the RBD volume
retcode, retmsg = pvc_ceph.add_volume(
zkhandler, pool, benchmark_volume_name, benchmark_volume_size
)
if not retcode:
raise BenchmarkError(
'Failed to create volume "{}" on pool "{}": {}'.format(
benchmark_volume_name, pool, retmsg
),
job_name=job_name,
db_conn=db_conn,
db_cur=db_cur,
zkhandler=zkhandler,
)
else:
print(retmsg)
def cleanup_benchmark_volume(
pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
):
# Remove the RBD volume
retcode, retmsg = pvc_ceph.remove_volume(zkhandler, pool, benchmark_volume_name)
if not retcode:
raise BenchmarkError(
'Failed to remove volume "{}" on pool "{}": {}'.format(
benchmark_volume_name, pool, retmsg
),
job_name=job_name,
db_conn=db_conn,
db_cur=db_cur,
zkhandler=zkhandler,
)
else:
print(retmsg)
def run_benchmark_job(
test, pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
):
test_spec = test_matrix[test]
print("Running test '{}'".format(test))
fio_cmd = """
fio \
--name={test} \
--ioengine=rbd \
--pool={pool} \
--rbdname={volume} \
--output-format=json \
--direct=1 \
--randrepeat=1 \
--numjobs=1 \
--time_based \
--runtime=75 \
--group_reporting \
--iodepth={iodepth} \
--bs={bs} \
--readwrite={rw}
""".format(
test=test,
pool=pool,
volume=benchmark_volume_name,
iodepth=test_spec["iodepth"],
bs=test_spec["bs"],
rw=test_spec["rw"],
)
print("Running fio job: {}".format(" ".join(fio_cmd.split())))
retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd)
if retcode:
raise BenchmarkError(
"Failed to run fio test: {}".format(stderr),
job_name=job_name,
db_conn=db_conn,
db_cur=db_cur,
zkhandler=zkhandler,
)
return loads(stdout)
def run_benchmark(self, pool):
# Runtime imports
import time
@ -172,20 +325,13 @@ def run_benchmark(self, pool):
)
time.sleep(1)
volume = "pvcbenchmark"
# Create the RBD volume
retcode, retmsg = pvc_ceph.add_volume(zkhandler, pool, volume, "8G")
if not retcode:
raise BenchmarkError(
'Failed to create volume "{}": {}'.format(volume, retmsg),
job_name=job_name,
db_conn=db_conn,
db_cur=db_cur,
zkhandler=zkhandler,
)
else:
print(retmsg)
prepare_benchmark_volume(
pool,
job_name=job_name,
db_conn=db_conn,
db_cur=db_cur,
zkhandler=zkhandler,
)
# Phase 2 - benchmark run
self.update_state(
@ -194,99 +340,17 @@ def run_benchmark(self, pool):
)
time.sleep(1)
# We run a total of 8 tests, to give a generalized idea of performance on the cluster:
# 1. A sequential read test of 8GB with a 4M block size
# 2. A sequential write test of 8GB with a 4M block size
# 3. A random read test of 8GB with a 4M block size
# 4. A random write test of 8GB with a 4M block size
# 5. A random read test of 8GB with a 256k block size
# 6. A random write test of 8GB with a 256k block size
# 7. A random read test of 8GB with a 4k block size
# 8. A random write test of 8GB with a 4k block size
# Taken together, these 8 results should give a very good indication of the overall storage performance
# for a variety of workloads.
test_matrix = {
"seq_read": {"direction": "read", "iodepth": "64", "bs": "4M", "rw": "read"},
"seq_write": {"direction": "write", "iodepth": "64", "bs": "4M", "rw": "write"},
"rand_read_4M": {
"direction": "read",
"iodepth": "64",
"bs": "4M",
"rw": "randread",
},
"rand_write_4M": {
"direction": "write",
"iodepth": "64",
"bs": "4M",
"rw": "randwrite",
},
"rand_read_4K": {
"direction": "read",
"iodepth": "64",
"bs": "4K",
"rw": "randread",
},
"rand_write_4K": {
"direction": "write",
"iodepth": "64",
"bs": "4K",
"rw": "randwrite",
},
"rand_read_4K_lowdepth": {
"direction": "read",
"iodepth": "1",
"bs": "4K",
"rw": "randread",
},
"rand_write_4K_lowdepth": {
"direction": "write",
"iodepth": "1",
"bs": "4K",
"rw": "randwrite",
},
}
results = dict()
for test in test_matrix:
print("Running test '{}'".format(test))
fio_cmd = """
fio \
--name={test} \
--ioengine=rbd \
--pool={pool} \
--rbdname={volume} \
--output-format=json \
--direct=1 \
--randrepeat=1 \
--numjobs=1 \
--time_based \
--runtime=75 \
--group_reporting \
--iodepth={iodepth} \
--bs={bs} \
--readwrite={rw}
""".format(
test=test,
pool=pool,
volume=volume,
iodepth=test_matrix[test]["iodepth"],
bs=test_matrix[test]["bs"],
rw=test_matrix[test]["rw"],
results[test] = run_benchmark_job(
test,
pool,
job_name=job_name,
db_conn=db_conn,
db_cur=db_cur,
zkhandler=zkhandler,
)
print("Running fio job: {}".format(" ".join(fio_cmd.split())))
retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd)
if retcode:
raise BenchmarkError(
"Failed to run fio test: {}".format(stderr),
job_name=job_name,
db_conn=db_conn,
db_cur=db_cur,
zkhandler=zkhandler,
)
results[test] = loads(stdout)
# Phase 3 - cleanup
self.update_state(
state="RUNNING",
@ -294,18 +358,13 @@ def run_benchmark(self, pool):
)
time.sleep(1)
# Remove the RBD volume
retcode, retmsg = pvc_ceph.remove_volume(zkhandler, pool, volume)
if not retcode:
raise BenchmarkError(
'Failed to remove volume "{}": {}'.format(volume, retmsg),
job_name=job_name,
db_conn=db_conn,
db_cur=db_cur,
zkhandler=zkhandler,
)
else:
print(retmsg)
cleanup_benchmark_volume(
pool,
job_name=job_name,
db_conn=db_conn,
db_cur=db_cur,
zkhandler=zkhandler,
)
print("Storing result of tests for job '{}' in database".format(job_name))
try:

View File

@ -1253,7 +1253,7 @@ class API_VM_Root(Resource):
{"name": "node"},
{
"name": "selector",
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
"choices": ("mem", "memprov", "vcpus", "load", "vms", "none"),
"helptext": "A valid selector must be specified",
},
{"name": "autostart"},
@ -1302,7 +1302,7 @@ class API_VM_Root(Resource):
default: none
enum:
- mem
- memfree
- memprov
- vcpus
- load
- vms
@ -1400,7 +1400,7 @@ class API_VM_Element(Resource):
{"name": "node"},
{
"name": "selector",
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
"choices": ("mem", "memprov", "vcpus", "load", "vms", "none"),
"helptext": "A valid selector must be specified",
},
{"name": "autostart"},
@ -1451,7 +1451,7 @@ class API_VM_Element(Resource):
default: none
enum:
- mem
- memfree
- memprov
- vcpus
- load
- vms
@ -1650,7 +1650,7 @@ class API_VM_Metadata(Resource):
{"name": "limit"},
{
"name": "selector",
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
"choices": ("mem", "memprov", "vcpus", "load", "vms", "none"),
"helptext": "A valid selector must be specified",
},
{"name": "autostart"},
@ -1682,7 +1682,7 @@ class API_VM_Metadata(Resource):
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
enum:
- mem
- memfree
- memprov
- vcpus
- load
- vms

View File

@ -539,9 +539,9 @@ def get_vm_meta(zkhandler, vm):
retdata = {
"name": vm,
"node_limit": domain_node_limit,
"node_selector": domain_node_selector,
"node_selector": domain_node_selector.lower(),
"node_autostart": domain_node_autostart,
"migration_method": domain_migrate_method,
"migration_method": domain_migrate_method.lower(),
}
return retdata, retcode

View File

@ -679,6 +679,10 @@ def vm_networks_add(
from random import randint
import pvc.cli_lib.network as pvc_network
network_exists, _ = pvc_network.net_info(config, network)
if not network_exists:
return False, "Network {} not found on the cluster.".format(network)
status, domain_information = vm_info(config, vm)
if not status:
return status, domain_information
@ -2016,7 +2020,8 @@ def format_list(config, vm_list, raw):
tag_list = getNiceTagName(domain_information)
if len(tag_list) < 1:
tag_list = ["N/A"]
vm_net_colour = ""
net_invalid_list = []
for net_vni in net_list:
if (
net_vni not in ["cluster", "storage", "upstream"]
@ -2024,13 +2029,33 @@ def format_list(config, vm_list, raw):
and not re.match(r"^hostdev:.*", net_vni)
):
if int(net_vni) not in [net["vni"] for net in cluster_net_list]:
vm_net_colour = ansiprint.red()
net_invalid_list.append(True)
else:
net_invalid_list.append(False)
else:
net_invalid_list.append(False)
net_string_list = []
for net_idx, net_vni in enumerate(net_list):
if net_invalid_list[net_idx]:
net_string_list.append(
"{}{}{}".format(
ansiprint.red(),
net_vni,
ansiprint.end(),
)
)
# Fix the length due to the extra fake characters
vm_nets_length -= len(net_vni)
vm_nets_length += len(net_string_list[net_idx])
else:
net_string_list.append(net_vni)
vm_list_output.append(
"{bold}{vm_name: <{vm_name_length}} \
{vm_state_colour}{vm_state: <{vm_state_length}}{end_colour} \
{vm_tags: <{vm_tags_length}} \
{vm_net_colour}{vm_networks: <{vm_nets_length}}{end_colour} \
{vm_networks: <{vm_nets_length}} \
{vm_memory: <{vm_ram_length}} {vm_vcpu: <{vm_vcpu_length}} \
{vm_node: <{vm_node_length}} \
{vm_migrated: <{vm_migrated_length}}{end_bold}".format(
@ -2049,8 +2074,7 @@ def format_list(config, vm_list, raw):
vm_name=domain_information["name"],
vm_state=domain_information["state"],
vm_tags=",".join(tag_list),
vm_net_colour=vm_net_colour,
vm_networks=",".join(net_list),
vm_networks=",".join(net_string_list),
vm_memory=domain_information["memory"],
vm_vcpu=domain_information["vcpu"],
vm_node=domain_information["node"],

View File

@ -807,7 +807,7 @@ def cli_vm():
"node_selector",
default="none",
show_default=True,
type=click.Choice(["mem", "memfree", "load", "vcpus", "vms", "none"]),
type=click.Choice(["mem", "memprov", "load", "vcpus", "vms", "none"]),
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
)
@click.option(
@ -859,15 +859,15 @@ def vm_define(
Define a new virtual machine from Libvirt XML configuration file VMCONFIG.
The target node selector ("--node-selector"/"-s") can be "none" to use the cluster default, or one of the following values:
* "mem": choose the node with the least provisioned VM memory
* "memfree": choose the node with the most (real) free memory
* "mem": choose the node with the most (real) free memory
* "memprov": choose the node with the least provisioned VM memory
* "vcpus": choose the node with the least allocated VM vCPUs
* "load": choose the node with the lowest current load average
* "vms": choose the node with the least number of provisioned VMs
For most clusters, "mem" should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
* "mem" looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
* "memfree" looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. This might be preferable to "mem" on clusters with very high memory utilization versus total capacity or if many VMs are stopped/disabled.
* "mem" looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected.
* "memprov" looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
* "load" looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
"""
@ -914,7 +914,7 @@ def vm_define(
"node_selector",
default=None,
show_default=False,
type=click.Choice(["mem", "memfree", "load", "vcpus", "vms", "none"]),
type=click.Choice(["mem", "memprov", "load", "vcpus", "vms", "none"]),
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
)
@click.option(
@ -2404,7 +2404,7 @@ def vm_list(target_node, target_state, target_tag, limit, raw, negate):
)
def cli_network():
"""
Manage the state of a VXLAN network in the PVC cluster.
Manage the state of a network in the PVC cluster.
"""
pass
@ -4134,7 +4134,7 @@ def provisioner_template_system_list(limit):
"--node-selector",
"node_selector",
type=click.Choice(
["mem", "memfree", "vcpus", "vms", "load", "none"], case_sensitive=False
["mem", "memprov", "vcpus", "vms", "load", "none"], case_sensitive=False
),
default="none",
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
@ -4230,7 +4230,7 @@ def provisioner_template_system_add(
"--node-selector",
"node_selector",
type=click.Choice(
["mem", "memfree", "vcpus", "vms", "load", "none"], case_sensitive=False
["mem", "memprov", "vcpus", "vms", "load", "none"], case_sensitive=False
),
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
)

View File

@ -2,7 +2,7 @@ from setuptools import setup
setup(
name="pvc",
version="0.9.56",
version="0.9.59",
packages=["pvc", "pvc.cli_lib"],
install_requires=[
"Click",

View File

View File

@ -633,14 +633,14 @@ def findTargetNode(zkhandler, dom_uuid):
search_field = None
# If our search field is invalid, use the default
if search_field is None or search_field == "None":
if search_field is None or search_field in ["None", "none"]:
search_field = zkhandler.read("base.config.migration_target_selector")
# Execute the search
if search_field == "mem":
return findTargetNodeMem(zkhandler, node_limit, dom_uuid)
if search_field == "memfree":
return findTargetNodeMemFree(zkhandler, node_limit, dom_uuid)
if search_field == "memprov":
return findTargetNodeMemProv(zkhandler, node_limit, dom_uuid)
if search_field == "load":
return findTargetNodeLoad(zkhandler, node_limit, dom_uuid)
if search_field == "vcpus":
@ -678,10 +678,28 @@ def getNodes(zkhandler, node_limit, dom_uuid):
return valid_node_list
#
# via free memory
#
def findTargetNodeMemFree(zkhandler, node_limit, dom_uuid):
most_memfree = 0
target_node = None
node_list = getNodes(zkhandler, node_limit, dom_uuid)
for node in node_list:
memfree = int(zkhandler.read(("node.memory.free", node)))
if memfree > most_memfree:
most_memfree = memfree
target_node = node
return target_node
#
# via provisioned memory
#
def findTargetNodeMem(zkhandler, node_limit, dom_uuid):
def findTargetNodeMemProv(zkhandler, node_limit, dom_uuid):
most_provfree = 0
target_node = None
@ -700,24 +718,6 @@ def findTargetNodeMem(zkhandler, node_limit, dom_uuid):
return target_node
#
# via free memory
#
def findTargetNodeMemFree(zkhandler, node_limit, dom_uuid):
most_memfree = 0
target_node = None
node_list = getNodes(zkhandler, node_limit, dom_uuid)
for node in node_list:
memfree = int(zkhandler.read(("node.memory.free", node)))
if memfree > most_memfree:
most_memfree = memfree
target_node = node
return target_node
#
# via load average
#

View File

@ -308,9 +308,9 @@ def define_vm(
(("domain.console.log", dom_uuid), ""),
(("domain.console.vnc", dom_uuid), ""),
(("domain.meta.autostart", dom_uuid), node_autostart),
(("domain.meta.migrate_method", dom_uuid), migration_method),
(("domain.meta.migrate_method", dom_uuid), str(migration_method).lower()),
(("domain.meta.node_limit", dom_uuid), formatted_node_limit),
(("domain.meta.node_selector", dom_uuid), node_selector),
(("domain.meta.node_selector", dom_uuid), str(node_selector).lower()),
(("domain.meta.tags", dom_uuid), ""),
(("domain.migrate.sync_lock", dom_uuid), ""),
]
@ -447,7 +447,9 @@ def modify_vm_metadata(
update_list.append((("domain.meta.node_limit", dom_uuid), node_limit))
if node_selector is not None:
update_list.append((("domain.meta.node_selector", dom_uuid), node_selector))
update_list.append(
(("domain.meta.node_selector", dom_uuid), str(node_selector).lower())
)
if node_autostart is not None:
update_list.append((("domain.meta.autostart", dom_uuid), node_autostart))
@ -456,7 +458,9 @@ def modify_vm_metadata(
update_list.append((("domain.profile", dom_uuid), provisioner_profile))
if migration_method is not None:
update_list.append((("domain.meta.migrate_method", dom_uuid), migration_method))
update_list.append(
(("domain.meta.migrate_method", dom_uuid), str(migration_method).lower())
)
if len(update_list) < 1:
return False, "ERROR: No updates to apply."

21
debian/changelog vendored
View File

@ -1,3 +1,24 @@
pvc (0.9.59-0) unstable; urgency=high
* [API] Flips the mem(prov) and mem(free) selectors making mem(free) the default for "mem" and "memprov" explicit
-- Joshua M. Boniface <joshua@boniface.me> Tue, 15 Nov 2022 15:50:15 -0500
pvc (0.9.58-0) unstable; urgency=high
* [API] Fixes a bug where migration selector could have case-sensitive operational faults
-- Joshua M. Boniface <joshua@boniface.me> Mon, 07 Nov 2022 12:27:48 -0500
pvc (0.9.57-0) unstable; urgency=high
* [CLI] Removes an invalid reference to VXLAN
* [CLI] Improves the handling of invalid networks in VM lists and on attach
* [API] Modularizes the benchmarking library so it can be used externally too
* [Daemon Library] Adds a module tag file so it can be used externally too
-- Joshua M. Boniface <joshua@boniface.me> Sun, 06 Nov 2022 01:39:50 -0400
pvc (0.9.56-0) unstable; urgency=high
* [API/Provisioner] Fundamentally revamps the provisioner script framework to provide more extensibility (BREAKING CHANGE)

View File

@ -356,15 +356,15 @@ The password for the PVC node daemon to log in to the IPMI interface.
The default selector algorithm to use when migrating VMs away from a node; individual VMs can override this default.
Valid `target_selector` values are:
* `mem`: choose the node with the least provisioned VM memory
* `memfree`: choose the node with the most (real) free memory
* `mem`: choose the node with the most (real) free memory
* `memprov`: choose the node with the least provisioned VM memory
* `vcpus`: choose the node with the least allocated VM vCPUs
* `load`: choose the node with the lowest current load average
* `vms`: choose the node with the least number of provisioned VMs
For most clusters, `mem` should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
* `mem` looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
* `memfree` looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. This might be preferable to `mem` on clusters with very high memory utilization versus total capacity or if many VMs are stopped/disabled.
* `mem` looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected.
* `memprov` looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
* `load` looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
#### `system` → `configuration` → `directories` → `dynamic_directory`

View File

@ -122,7 +122,7 @@ pvc:
pass: Passw0rd
# migration: Migration option configuration
migration:
# target_selector: Criteria to select the ideal migration target, options: mem, memfree, load, vcpus, vms
# target_selector: Criteria to select the ideal migration target, options: mem, memprov, load, vcpus, vms
target_selector: mem
# configuration: Local system configurations
configuration:

View File

@ -48,7 +48,7 @@ import re
import json
# Daemon version
version = "0.9.56"
version = "0.9.59"
##########################################################