Compare commits
21 Commits
Author | SHA1 | Date | |
---|---|---|---|
239c392892 | |||
172d0a86e4 | |||
d8e57a26c5 | |||
9b499b9f48 | |||
881550b610 | |||
2a21d48128 | |||
8d0f26ff7a | |||
bcabd7d079 | |||
05a316cdd6 | |||
4b36753f27 | |||
171f6ac9ed | |||
645b525ad7 | |||
ec559aec0d | |||
71ffd5a191 | |||
2739c27299 | |||
56129a3636 | |||
932b3c55a3 | |||
92e2ff7449 | |||
d8d3feee22 | |||
b1357cafdb | |||
f8cdcb30ba |
27
CHANGELOG.md
27
CHANGELOG.md
@ -1,5 +1,32 @@
|
||||
## PVC Changelog
|
||||
|
||||
###### [v0.9.54](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.54)
|
||||
|
||||
[CLI Client] Fixes a bad variable reference from the previous change
|
||||
[API Daemon] Enables TLSv1 with an SSLContext object for maximum compatibility
|
||||
|
||||
###### [v0.9.53](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.53)
|
||||
|
||||
* [API] Fixes sort order of VM list (for real this time)
|
||||
|
||||
###### [v0.9.52](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.52)
|
||||
|
||||
* [CLI] Fixes a bug with vm modify not requiring a cluster
|
||||
* [Docs] Adds a reference to the bootstrap daemon
|
||||
* [API] Adds sorting to node and VM lists for consistency
|
||||
* [Node Daemon/API] Adds kb_ stats values for OSD stats
|
||||
|
||||
###### [v0.9.51](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.51)
|
||||
|
||||
* [CLI Client] Fixes a faulty literal_eval when viewing task status
|
||||
* [CLI Client] Adds a confirmation flag to the vm disable command
|
||||
* [Node Daemon] Removes the pvc-flush service
|
||||
|
||||
###### [v0.9.50](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.50)
|
||||
|
||||
* [Node Daemon/API/CLI] Adds free memory node selector
|
||||
* [Node Daemon] Fixes bug sending space-containing detect disk strings
|
||||
|
||||
###### [v0.9.49](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.49)
|
||||
|
||||
* [Node Daemon] Fixes bugs with OSD stat population on creation
|
||||
|
@ -19,7 +19,7 @@ As a consequence of its features, PVC makes administrating very high-uptime VMs
|
||||
|
||||
PVC also features an optional, fully customizable VM provisioning framework, designed to automate and simplify VM deployments using custom provisioning profiles, scripts, and CloudInit userdata API support.
|
||||
|
||||
Installation of PVC is accomplished by two main components: a [Node installer ISO](https://github.com/parallelvirtualcluster/pvc-installer) which creates on-demand installer ISOs, and an [Ansible role framework](https://github.com/parallelvirtualcluster/pvc-ansible) to configure, bootstrap, and administrate the nodes. Once up, the cluster is managed via an HTTP REST API, accessible via a Python Click CLI client or WebUI.
|
||||
Installation of PVC is accomplished by two main components: a [Node installer ISO](https://github.com/parallelvirtualcluster/pvc-installer) which creates on-demand installer ISOs, and an [Ansible role framework](https://github.com/parallelvirtualcluster/pvc-ansible) to configure, bootstrap, and administrate the nodes. Installation can also be fully automated with a companion [cluster bootstrapping system](https://github.com/parallelvirtualcluster/pvc-bootstrap). Once up, the cluster is managed via an HTTP REST API, accessible via a Python Click CLI client or WebUI.
|
||||
|
||||
Just give it physical servers, and it will run your VMs without you having to think about it, all in just an hour or two of setup time.
|
||||
|
||||
|
@ -22,10 +22,12 @@
|
||||
import os
|
||||
import yaml
|
||||
|
||||
from ssl import SSLContext, TLSVersion
|
||||
|
||||
from distutils.util import strtobool as dustrtobool
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.49"
|
||||
version = "0.9.54"
|
||||
|
||||
# API version
|
||||
API_VERSION = 1.0
|
||||
@ -123,7 +125,10 @@ def entrypoint():
|
||||
import pvcapid.flaskapi as pvc_api # noqa: E402
|
||||
|
||||
if config["ssl_enabled"]:
|
||||
context = (config["ssl_cert_file"], config["ssl_key_file"])
|
||||
context = SSLContext()
|
||||
context.minimum_version = TLSVersion.TLSv1
|
||||
context.get_ca_certs()
|
||||
context.load_cert_chain(config["ssl_cert_file"], keyfile=config["ssl_key_file"])
|
||||
else:
|
||||
context = None
|
||||
|
||||
|
@ -1002,7 +1002,7 @@ class API_VM_Root(Resource):
|
||||
type: string
|
||||
node_selector:
|
||||
type: string
|
||||
description: The selector used to determine candidate nodes during migration
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
node_autostart:
|
||||
type: boolean
|
||||
description: Whether to autostart the VM when its node returns to ready domain state
|
||||
@ -1252,7 +1252,7 @@ class API_VM_Root(Resource):
|
||||
{"name": "node"},
|
||||
{
|
||||
"name": "selector",
|
||||
"choices": ("mem", "vcpus", "load", "vms", "none"),
|
||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
||||
"helptext": "A valid selector must be specified",
|
||||
},
|
||||
{"name": "autostart"},
|
||||
@ -1297,13 +1297,15 @@ class API_VM_Root(Resource):
|
||||
name: selector
|
||||
type: string
|
||||
required: false
|
||||
description: The selector used to determine candidate nodes during migration
|
||||
default: mem
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
default: none
|
||||
enum:
|
||||
- mem
|
||||
- memfree
|
||||
- vcpus
|
||||
- load
|
||||
- vms
|
||||
- none (cluster default)
|
||||
- in: query
|
||||
name: autostart
|
||||
type: boolean
|
||||
@ -1397,7 +1399,7 @@ class API_VM_Element(Resource):
|
||||
{"name": "node"},
|
||||
{
|
||||
"name": "selector",
|
||||
"choices": ("mem", "vcpus", "load", "vms", "none"),
|
||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
||||
"helptext": "A valid selector must be specified",
|
||||
},
|
||||
{"name": "autostart"},
|
||||
@ -1444,10 +1446,11 @@ class API_VM_Element(Resource):
|
||||
name: selector
|
||||
type: string
|
||||
required: false
|
||||
description: The selector used to determine candidate nodes during migration
|
||||
default: mem
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
default: none
|
||||
enum:
|
||||
- mem
|
||||
- memfree
|
||||
- vcpus
|
||||
- load
|
||||
- vms
|
||||
@ -1626,7 +1629,7 @@ class API_VM_Metadata(Resource):
|
||||
type: string
|
||||
node_selector:
|
||||
type: string
|
||||
description: The selector used to determine candidate nodes during migration
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
node_autostart:
|
||||
type: string
|
||||
description: Whether to autostart the VM when its node returns to ready domain state
|
||||
@ -1646,7 +1649,7 @@ class API_VM_Metadata(Resource):
|
||||
{"name": "limit"},
|
||||
{
|
||||
"name": "selector",
|
||||
"choices": ("mem", "vcpus", "load", "vms", "none"),
|
||||
"choices": ("mem", "memfree", "vcpus", "load", "vms", "none"),
|
||||
"helptext": "A valid selector must be specified",
|
||||
},
|
||||
{"name": "autostart"},
|
||||
@ -1675,12 +1678,14 @@ class API_VM_Metadata(Resource):
|
||||
name: selector
|
||||
type: string
|
||||
required: false
|
||||
description: The selector used to determine candidate nodes during migration
|
||||
description: The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference
|
||||
enum:
|
||||
- mem
|
||||
- memfree
|
||||
- vcpus
|
||||
- load
|
||||
- vms
|
||||
- none (cluster default)
|
||||
- in: query
|
||||
name: autostart
|
||||
type: boolean
|
||||
|
@ -23,7 +23,6 @@ from requests_toolbelt.multipart.encoder import (
|
||||
MultipartEncoder,
|
||||
MultipartEncoderMonitor,
|
||||
)
|
||||
from ast import literal_eval
|
||||
|
||||
import pvc.cli_lib.ansiprint as ansiprint
|
||||
from pvc.cli_lib.common import UploadProgressBar, call_api
|
||||
@ -793,10 +792,10 @@ def task_status(config, task_id=None, is_watching=False):
|
||||
task["type"] = task_type
|
||||
task["worker"] = task_host
|
||||
task["id"] = task_job.get("id")
|
||||
task_args = literal_eval(task_job.get("args"))
|
||||
task_args = task_job.get("args")
|
||||
task["vm_name"] = task_args[0]
|
||||
task["vm_profile"] = task_args[1]
|
||||
task_kwargs = literal_eval(task_job.get("kwargs"))
|
||||
task_kwargs = task_job.get("kwargs")
|
||||
task["vm_define"] = str(bool(task_kwargs["define_vm"]))
|
||||
task["vm_start"] = str(bool(task_kwargs["start_vm"]))
|
||||
task_data.append(task)
|
||||
|
@ -803,11 +803,11 @@ def cli_vm():
|
||||
)
|
||||
@click.option(
|
||||
"-s",
|
||||
"--selector",
|
||||
"--node-selector",
|
||||
"node_selector",
|
||||
default="mem",
|
||||
default="none",
|
||||
show_default=True,
|
||||
type=click.Choice(["mem", "load", "vcpus", "vms", "none"]),
|
||||
type=click.Choice(["mem", "memfree", "load", "vcpus", "vms", "none"]),
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
@click.option(
|
||||
@ -857,6 +857,18 @@ def vm_define(
|
||||
):
|
||||
"""
|
||||
Define a new virtual machine from Libvirt XML configuration file VMCONFIG.
|
||||
|
||||
The target node selector ("--node-selector"/"-s") can be "none" to use the cluster default, or one of the following values:
|
||||
* "mem": choose the node with the least provisioned VM memory
|
||||
* "memfree": choose the node with the most (real) free memory
|
||||
* "vcpus": choose the node with the least allocated VM vCPUs
|
||||
* "load": choose the node with the lowest current load average
|
||||
* "vms": choose the node with the least number of provisioned VMs
|
||||
|
||||
For most clusters, "mem" should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
|
||||
* "mem" looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
||||
* "memfree" looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. This might be preferable to "mem" on clusters with very high memory utilization versus total capacity or if many VMs are stopped/disabled.
|
||||
* "load" looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
|
||||
"""
|
||||
|
||||
# Open the XML file
|
||||
@ -898,11 +910,11 @@ def vm_define(
|
||||
)
|
||||
@click.option(
|
||||
"-s",
|
||||
"--selector",
|
||||
"--node-selector",
|
||||
"node_selector",
|
||||
default=None,
|
||||
show_default=False,
|
||||
type=click.Choice(["mem", "load", "vcpus", "vms", "none"]),
|
||||
type=click.Choice(["mem", "memfree", "load", "vcpus", "vms", "none"]),
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
@click.option(
|
||||
@ -942,6 +954,8 @@ def vm_meta(
|
||||
):
|
||||
"""
|
||||
Modify the PVC metadata of existing virtual machine DOMAIN. At least one option to update must be specified. DOMAIN may be a UUID or name.
|
||||
|
||||
For details on the "--node-selector"/"-s" values, please see help for the command "pvc vm define".
|
||||
"""
|
||||
|
||||
if (
|
||||
@ -1009,6 +1023,7 @@ def vm_meta(
|
||||
)
|
||||
@click.argument("domain")
|
||||
@click.argument("cfgfile", type=click.File(), default=None, required=False)
|
||||
@cluster_req
|
||||
def vm_modify(
|
||||
domain,
|
||||
cfgfile,
|
||||
@ -1338,20 +1353,36 @@ def vm_stop(domain, confirm_flag):
|
||||
@click.argument("domain")
|
||||
@click.option(
|
||||
"--force",
|
||||
"force",
|
||||
"force_flag",
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help="Forcibly stop the VM instead of waiting for shutdown.",
|
||||
)
|
||||
@click.option(
|
||||
"-y",
|
||||
"--yes",
|
||||
"confirm_flag",
|
||||
is_flag=True,
|
||||
default=False,
|
||||
help="Confirm the disable",
|
||||
)
|
||||
@cluster_req
|
||||
def vm_disable(domain, force):
|
||||
def vm_disable(domain, force_flag, confirm_flag):
|
||||
"""
|
||||
Shut down virtual machine DOMAIN and mark it as disabled. DOMAIN may be a UUID or name.
|
||||
|
||||
Disabled VMs will not be counted towards a degraded cluster health status, unlike stopped VMs. Use this option for a VM that will remain off for an extended period.
|
||||
"""
|
||||
|
||||
retcode, retmsg = pvc_vm.vm_state(config, domain, "disable", force=force)
|
||||
if not confirm_flag and not config["unsafe"]:
|
||||
try:
|
||||
click.confirm(
|
||||
"Disable VM {}".format(domain), prompt_suffix="? ", abort=True
|
||||
)
|
||||
except Exception:
|
||||
exit(0)
|
||||
|
||||
retcode, retmsg = pvc_vm.vm_state(config, domain, "disable", force=force_flag)
|
||||
cleanup(retcode, retmsg)
|
||||
|
||||
|
||||
@ -4102,7 +4133,9 @@ def provisioner_template_system_list(limit):
|
||||
@click.option(
|
||||
"--node-selector",
|
||||
"node_selector",
|
||||
type=click.Choice(["mem", "vcpus", "vms", "load", "none"], case_sensitive=False),
|
||||
type=click.Choice(
|
||||
["mem", "memfree", "vcpus", "vms", "load", "none"], case_sensitive=False
|
||||
),
|
||||
default="none",
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
@ -4135,6 +4168,8 @@ def provisioner_template_system_add(
|
||||
):
|
||||
"""
|
||||
Add a new system template NAME to the PVC cluster provisioner.
|
||||
|
||||
For details on the possible "--node-selector" values, please see help for the command "pvc vm define".
|
||||
"""
|
||||
params = dict()
|
||||
params["name"] = name
|
||||
@ -4194,7 +4229,9 @@ def provisioner_template_system_add(
|
||||
@click.option(
|
||||
"--node-selector",
|
||||
"node_selector",
|
||||
type=click.Choice(["mem", "vcpus", "vms", "load", "none"], case_sensitive=False),
|
||||
type=click.Choice(
|
||||
["mem", "memfree", "vcpus", "vms", "load", "none"], case_sensitive=False
|
||||
),
|
||||
help='Method to determine optimal target node during autoselect; "none" will use the default for the cluster.',
|
||||
)
|
||||
@click.option(
|
||||
@ -4226,6 +4263,8 @@ def provisioner_template_system_modify(
|
||||
):
|
||||
"""
|
||||
Add a new system template NAME to the PVC cluster provisioner.
|
||||
|
||||
For details on the possible "--node-selector" values, please see help for the command "pvc vm define".
|
||||
"""
|
||||
params = dict()
|
||||
params["vcpus"] = vcpus
|
||||
|
@ -2,7 +2,7 @@ from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="pvc",
|
||||
version="0.9.49",
|
||||
version="0.9.54",
|
||||
packages=["pvc", "pvc.cli_lib"],
|
||||
install_requires=[
|
||||
"Click",
|
||||
|
@ -639,6 +639,8 @@ def findTargetNode(zkhandler, dom_uuid):
|
||||
# Execute the search
|
||||
if search_field == "mem":
|
||||
return findTargetNodeMem(zkhandler, node_limit, dom_uuid)
|
||||
if search_field == "memfree":
|
||||
return findTargetNodeMemFree(zkhandler, node_limit, dom_uuid)
|
||||
if search_field == "load":
|
||||
return findTargetNodeLoad(zkhandler, node_limit, dom_uuid)
|
||||
if search_field == "vcpus":
|
||||
@ -677,7 +679,7 @@ def getNodes(zkhandler, node_limit, dom_uuid):
|
||||
|
||||
|
||||
#
|
||||
# via free memory (relative to allocated memory)
|
||||
# via provisioned memory
|
||||
#
|
||||
def findTargetNodeMem(zkhandler, node_limit, dom_uuid):
|
||||
most_provfree = 0
|
||||
@ -698,6 +700,24 @@ def findTargetNodeMem(zkhandler, node_limit, dom_uuid):
|
||||
return target_node
|
||||
|
||||
|
||||
#
|
||||
# via free memory
|
||||
#
|
||||
def findTargetNodeMemFree(zkhandler, node_limit, dom_uuid):
|
||||
most_memfree = 0
|
||||
target_node = None
|
||||
|
||||
node_list = getNodes(zkhandler, node_limit, dom_uuid)
|
||||
for node in node_list:
|
||||
memfree = int(zkhandler.read(("node.memory.free", node)))
|
||||
|
||||
if memfree > most_memfree:
|
||||
most_memfree = memfree
|
||||
target_node = node
|
||||
|
||||
return target_node
|
||||
|
||||
|
||||
#
|
||||
# via load average
|
||||
#
|
||||
|
@ -236,6 +236,7 @@ def get_list(
|
||||
):
|
||||
node_list = []
|
||||
full_node_list = zkhandler.children("base.node")
|
||||
full_node_list.sort()
|
||||
|
||||
if is_fuzzy and limit:
|
||||
# Implicitly assume fuzzy limits
|
||||
|
@ -1193,6 +1193,7 @@ def get_list(zkhandler, node, state, tag, limit, is_fuzzy=True, negate=False):
|
||||
return False, 'VM state "{}" is not valid.'.format(state)
|
||||
|
||||
full_vm_list = zkhandler.children("base.domain")
|
||||
full_vm_list.sort()
|
||||
|
||||
# Set our limit to a sensible regex
|
||||
if limit:
|
||||
@ -1291,4 +1292,4 @@ def get_list(zkhandler, node, state, tag, limit, is_fuzzy=True, negate=False):
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return True, vm_data_list
|
||||
return True, sorted(vm_data_list, key=lambda d: d["name"])
|
||||
|
37
debian/changelog
vendored
37
debian/changelog
vendored
@ -1,3 +1,40 @@
|
||||
pvc (0.9.54-0) unstable; urgency=high
|
||||
|
||||
[CLI Client] Fixes a bad variable reference from the previous change
|
||||
[API Daemon] Enables TLSv1 with an SSLContext object for maximum compatibility
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Tue, 23 Aug 2022 11:01:05 -0400
|
||||
|
||||
pvc (0.9.53-0) unstable; urgency=high
|
||||
|
||||
* [API] Fixes sort order of VM list (for real this time)
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Fri, 12 Aug 2022 17:47:11 -0400
|
||||
|
||||
pvc (0.9.52-0) unstable; urgency=high
|
||||
|
||||
* [CLI] Fixes a bug with vm modify not requiring a cluster
|
||||
* [Docs] Adds a reference to the bootstrap daemon
|
||||
* [API] Adds sorting to node and VM lists for consistency
|
||||
* [Node Daemon/API] Adds kb_ stats values for OSD stats
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Fri, 12 Aug 2022 11:09:25 -0400
|
||||
|
||||
pvc (0.9.51-0) unstable; urgency=high
|
||||
|
||||
* [CLI Client] Fixes a faulty literal_eval when viewing task status
|
||||
* [CLI Client] Adds a confirmation flag to the vm disable command
|
||||
* [Node Daemon] Removes the pvc-flush service
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Mon, 25 Jul 2022 23:25:41 -0400
|
||||
|
||||
pvc (0.9.50-0) unstable; urgency=high
|
||||
|
||||
* [Node Daemon/API/CLI] Adds free memory node selector
|
||||
* [Node Daemon] Fixes bug sending space-containing detect disk strings
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Wed, 06 Jul 2022 16:01:14 -0400
|
||||
|
||||
pvc (0.9.49-0) unstable; urgency=high
|
||||
|
||||
* [Node Daemon] Fixes bugs with OSD stat population on creation
|
||||
|
1
debian/pvc-daemon-node.install
vendored
1
debian/pvc-daemon-node.install
vendored
@ -3,5 +3,4 @@ node-daemon/pvcnoded.sample.yaml etc/pvc
|
||||
node-daemon/pvcnoded usr/share/pvc
|
||||
node-daemon/pvcnoded.service lib/systemd/system
|
||||
node-daemon/pvc.target lib/systemd/system
|
||||
node-daemon/pvc-flush.service lib/systemd/system
|
||||
node-daemon/monitoring usr/share/pvc
|
||||
|
5
debian/pvc-daemon-node.postinst
vendored
5
debian/pvc-daemon-node.postinst
vendored
@ -7,11 +7,6 @@ systemctl daemon-reload
|
||||
systemctl enable /lib/systemd/system/pvcnoded.service
|
||||
systemctl enable /lib/systemd/system/pvc.target
|
||||
|
||||
# Inform administrator of the autoflush daemon if it is not enabled
|
||||
if ! systemctl is-active --quiet pvc-flush.service; then
|
||||
echo "NOTE: The PVC autoflush daemon (pvc-flush.service) is not enabled by default; enable it to perform automatic flush/unflush actions on host shutdown/startup."
|
||||
fi
|
||||
|
||||
# Inform administrator of the service restart/startup not occurring automatically
|
||||
if systemctl is-active --quiet pvcnoded.service; then
|
||||
echo "NOTE: The PVC node daemon (pvcnoded.service) has not been restarted; this is up to the administrator."
|
||||
|
@ -18,7 +18,7 @@ As a consequence of its features, PVC makes administrating very high-uptime VMs
|
||||
|
||||
PVC also features an optional, fully customizable VM provisioning framework, designed to automate and simplify VM deployments using custom provisioning profiles, scripts, and CloudInit userdata API support.
|
||||
|
||||
Installation of PVC is accomplished by two main components: a [Node installer ISO](https://github.com/parallelvirtualcluster/pvc-installer) which creates on-demand installer ISOs, and an [Ansible role framework](https://github.com/parallelvirtualcluster/pvc-ansible) to configure, bootstrap, and administrate the nodes. Once up, the cluster is managed via an HTTP REST API, accessible via a Python Click CLI client or WebUI.
|
||||
Installation of PVC is accomplished by two main components: a [Node installer ISO](https://github.com/parallelvirtualcluster/pvc-installer) which creates on-demand installer ISOs, and an [Ansible role framework](https://github.com/parallelvirtualcluster/pvc-ansible) to configure, bootstrap, and administrate the nodes. Installation can also be fully automated with a companion [cluster bootstrapping system](https://github.com/parallelvirtualcluster/pvc-bootstrap). Once up, the cluster is managed via an HTTP REST API, accessible via a Python Click CLI client or WebUI.
|
||||
|
||||
Just give it physical servers, and it will run your VMs without you having to think about it, all in just an hour or two of setup time.
|
||||
|
||||
|
@ -353,7 +353,19 @@ The password for the PVC node daemon to log in to the IPMI interface.
|
||||
|
||||
* *required*
|
||||
|
||||
The selector algorithm to use when migrating hosts away from the node. Valid `selector` values are: `mem`: the node with the least allocated VM memory; `vcpus`: the node with the least allocated VM vCPUs; `load`: the node with the least current load average; `vms`: the node with the least number of provisioned VMs.
|
||||
The default selector algorithm to use when migrating VMs away from a node; individual VMs can override this default.
|
||||
|
||||
Valid `target_selector` values are:
|
||||
* `mem`: choose the node with the least provisioned VM memory
|
||||
* `memfree`: choose the node with the most (real) free memory
|
||||
* `vcpus`: choose the node with the least allocated VM vCPUs
|
||||
* `load`: choose the node with the lowest current load average
|
||||
* `vms`: choose the node with the least number of provisioned VMs
|
||||
|
||||
For most clusters, `mem` should be sufficient, but others may be used based on the cluster workload and available resources. The following caveats should be considered:
|
||||
* `mem` looks at the provisioned memory, not the allocated memory; thus, stopped or disabled VMs are counted towards a node's memory for this selector, even though their memory is not actively in use.
|
||||
* `memfree` looks at the free memory of the node in general, ignoring the amount provisioned to VMs; if any VM's internal memory usage changes, this value would be affected. This might be preferable to `mem` on clusters with very high memory utilization versus total capacity or if many VMs are stopped/disabled.
|
||||
* `load` looks at the system load of the node in general, ignoring load in any particular VMs; if any VM's CPU usage changes, this value would be affected. This might be preferable on clusters with some very CPU intensive VMs.
|
||||
|
||||
#### `system` → `configuration` → `directories` → `dynamic_directory`
|
||||
|
||||
|
@ -192,7 +192,7 @@
|
||||
"type": "array"
|
||||
},
|
||||
"node_selector": {
|
||||
"description": "The selector used to determine candidate nodes during migration",
|
||||
"description": "The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
@ -1414,7 +1414,7 @@
|
||||
"type": "array"
|
||||
},
|
||||
"node_selector": {
|
||||
"description": "The selector used to determine candidate nodes during migration",
|
||||
"description": "The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference",
|
||||
"type": "string"
|
||||
},
|
||||
"profile": {
|
||||
@ -6173,13 +6173,15 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"default": "mem",
|
||||
"description": "The selector used to determine candidate nodes during migration",
|
||||
"default": "none",
|
||||
"description": "The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference",
|
||||
"enum": [
|
||||
"mem",
|
||||
"memfree",
|
||||
"vcpus",
|
||||
"load",
|
||||
"vms"
|
||||
"vms",
|
||||
"none (cluster default)"
|
||||
],
|
||||
"in": "query",
|
||||
"name": "selector",
|
||||
@ -6330,10 +6332,11 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"default": "mem",
|
||||
"description": "The selector used to determine candidate nodes during migration",
|
||||
"default": "none",
|
||||
"description": "The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference",
|
||||
"enum": [
|
||||
"mem",
|
||||
"memfree",
|
||||
"vcpus",
|
||||
"load",
|
||||
"vms",
|
||||
@ -6591,12 +6594,14 @@
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "The selector used to determine candidate nodes during migration",
|
||||
"description": "The selector used to determine candidate nodes during migration; see 'target_selector' in the node daemon configuration reference",
|
||||
"enum": [
|
||||
"mem",
|
||||
"memfree",
|
||||
"vcpus",
|
||||
"load",
|
||||
"vms"
|
||||
"vms",
|
||||
"none (cluster default)"
|
||||
],
|
||||
"in": "query",
|
||||
"name": "selector",
|
||||
|
@ -1,20 +0,0 @@
|
||||
# Parallel Virtual Cluster autoflush daemon
|
||||
|
||||
[Unit]
|
||||
Description = Parallel Virtual Cluster autoflush daemon
|
||||
After = pvcnoded.service pvcapid.service zookeeper.service libvirtd.service ssh.service ceph.target network-online.target
|
||||
Wants = pvcnoded.service
|
||||
PartOf = pvc.target
|
||||
|
||||
[Service]
|
||||
Type = oneshot
|
||||
RemainAfterExit = true
|
||||
WorkingDirectory = /usr/share/pvc
|
||||
TimeoutSec = 30min
|
||||
ExecStartPre = /bin/sleep 30
|
||||
ExecStart = /usr/bin/pvc -c local node unflush --wait
|
||||
ExecStop = /usr/bin/pvc -c local node flush --wait
|
||||
ExecStopPost = /bin/sleep 5
|
||||
|
||||
[Install]
|
||||
WantedBy = pvc.target
|
@ -122,7 +122,7 @@ pvc:
|
||||
pass: Passw0rd
|
||||
# migration: Migration option configuration
|
||||
migration:
|
||||
# target_selector: Criteria to select the ideal migration target, options: mem, load, vcpus, vms
|
||||
# target_selector: Criteria to select the ideal migration target, options: mem, memfree, load, vcpus, vms
|
||||
target_selector: mem
|
||||
# configuration: Local system configurations
|
||||
configuration:
|
||||
|
@ -48,7 +48,7 @@ import re
|
||||
import json
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.49"
|
||||
version = "0.9.54"
|
||||
|
||||
|
||||
##########################################################
|
||||
|
@ -1239,8 +1239,9 @@ class CephSnapshotInstance(object):
|
||||
# Primary command function
|
||||
# This command pipe is only used for OSD adds and removes
|
||||
def ceph_command(zkhandler, logger, this_node, data, d_osd):
|
||||
# Get the command and args
|
||||
command, args = data.split()
|
||||
# Get the command and args; the * + join ensures arguments with spaces (e.g. detect strings) are recombined right
|
||||
command, *args = data.split()
|
||||
args = " ".join(args)
|
||||
|
||||
# Adding a new OSD
|
||||
if command == "osd_add":
|
||||
|
@ -307,8 +307,14 @@ def collect_ceph_stats(logger, config, zkhandler, this_node, queue):
|
||||
"var": osd["var"],
|
||||
"pgs": osd["pgs"],
|
||||
"kb": osd["kb"],
|
||||
"kb_used": osd["kb_used"],
|
||||
"kb_used_data": osd["kb_used_data"],
|
||||
"kb_used_omap": osd["kb_used_omap"],
|
||||
"kb_used_meta": osd["kb_used_meta"],
|
||||
"kb_avail": osd["kb_avail"],
|
||||
"weight": osd["crush_weight"],
|
||||
"reweight": osd["reweight"],
|
||||
"class": osd["device_class"],
|
||||
}
|
||||
}
|
||||
)
|
||||
|
Reference in New Issue
Block a user