Compare commits
154 Commits
v0.9.45
...
1cf8706a52
Author | SHA1 | Date | |
---|---|---|---|
1cf8706a52 | |||
dd8f07526f | |||
5a5e5da663 | |||
739b60b91e | |||
16544227eb | |||
73e3746885 | |||
66230ce971 | |||
fbfbd70461 | |||
2506098223 | |||
83e887c4ee | |||
4eb0f3bb8a | |||
adc767e32f | |||
2083fd824a | |||
3aa74a3940 | |||
71d94bbeab | |||
718f689df9 | |||
268b5c0b86 | |||
b016b9bf3d | |||
7604b9611f | |||
b21278fd80 | |||
3b02034b70 | |||
c7a5b41b1e | |||
48b0091d3e | |||
2e94516ee2 | |||
d7f26b27ea | |||
872f35a7ee | |||
52c3e8ced3 | |||
1d7acf62bf | |||
c790c331a7 | |||
23165482df | |||
057071a7b7 | |||
554fa9f412 | |||
5a5f924268 | |||
cc309fc021 | |||
5f783f1663 | |||
bc89bb5b68 | |||
eb233ef588 | |||
d3efb54cb4 | |||
da15357c8a | |||
b6939a28c0 | |||
a1da479a4c | |||
ace4082820 | |||
4036af6045 | |||
f96de97861 | |||
04cad46305 | |||
e9dea4d2d1 | |||
39fd85fcc3 | |||
cbbab46b55 | |||
d1f2ce0b0a | |||
2f01edca14 | |||
12a3a3a6a6 | |||
c44732be83 | |||
a8b68e0968 | |||
e59152afee | |||
56021c443a | |||
ebdea165f1 | |||
fb0651fb05 | |||
35e7e11403 | |||
b7555468eb | |||
f1b4ee02ba | |||
4698edc98e | |||
40e7e04aad | |||
7f074847c4 | |||
b0b0b75605 | |||
89f62318bd | |||
925141ed65 | |||
f7a826bf52 | |||
e176f3b2f6 | |||
b339d5e641 | |||
d476b13cc0 | |||
ce8b2c22cc | |||
feab5d3479 | |||
ee348593c9 | |||
e403146bcf | |||
bde684dd3a | |||
992e003500 | |||
eaeb860a83 | |||
1198ca9f5c | |||
e79d200244 | |||
5b3bb9f306 | |||
5501586a47 | |||
c160648c5c | |||
fa37227127 | |||
2cac98963c | |||
8e50428707 | |||
a4953bc6ef | |||
3c10d57148 | |||
26d8551388 | |||
57342541dd | |||
50f8afd749 | |||
3449069e3d | |||
cb66b16045 | |||
8edce74b85 | |||
e9b69c4124 | |||
3948206225 | |||
a09578fcf5 | |||
73be807b84 | |||
4a9805578e | |||
f70f052df1 | |||
1e8841ce69 | |||
9c7d39d523 | |||
011490bcca | |||
8de63b2785 | |||
8f8f00b2e9 | |||
1daab49b50 | |||
9f6041b9cf | |||
5b27e438a9 | |||
3e8a85b029 | |||
19ac1e17c3 | |||
252175fb6f | |||
f39b041471 | |||
3b41759262 | |||
e514eed414 | |||
b81e70ec18 | |||
c2a473ed8b | |||
5355f6ff48 | |||
bf7823deb5 | |||
8ba371723e | |||
e10ac52116 | |||
341073521b | |||
16c38da5ef | |||
c8134d3a1c | |||
9f41373324 | |||
8e62d5b30b | |||
7a8eee244a | |||
7df5b8e52e | |||
6f96219023 | |||
51967e164b | |||
7a3a44d47c | |||
44491dd988 | |||
eba142f470 | |||
6cef68d157 | |||
e8caf3369e | |||
3e3776a25b | |||
6e0d0e264e | |||
1855d03a36 | |||
1a286dc8dd | |||
1b6d10e03a | |||
73c96d1e93 | |||
5841c98a59 | |||
bc6395c959 | |||
d582f87472 | |||
e9735113af | |||
722fd0a65d | |||
3b41beb0f3 | |||
d3392c0282 | |||
560c013e95 | |||
384c6320ef | |||
445dec1c38 | |||
534c7cd7f0 | |||
4014ef7714 | |||
180f0445ac | |||
074664d4c1 | |||
418ac23d40 |
@ -4,4 +4,4 @@ bbuilder:
|
|||||||
published:
|
published:
|
||||||
- git submodule update --init
|
- git submodule update --init
|
||||||
- /bin/bash build-stable-deb.sh
|
- /bin/bash build-stable-deb.sh
|
||||||
- sudo /usr/local/bin/deploy-package -C pvc
|
- /usr/local/bin/deploy-package -C pvc
|
||||||
|
2
.github/FUNDING.yml
vendored
2
.github/FUNDING.yml
vendored
@ -1,2 +0,0 @@
|
|||||||
github: [joshuaboniface]
|
|
||||||
patreon: [joshuaboniface]
|
|
@ -4,16 +4,9 @@ pushd $( git rev-parse --show-toplevel ) &>/dev/null
|
|||||||
|
|
||||||
ex=0
|
ex=0
|
||||||
|
|
||||||
./format check
|
./prepare
|
||||||
if [[ $? -ne 0 ]]; then
|
if [[ $? -ne 0 ]]; then
|
||||||
./format
|
echo "Aborting commit due to formatting or linting errors."
|
||||||
echo "Black formatting change detected; review and recommit"
|
|
||||||
ex=1
|
|
||||||
fi
|
|
||||||
|
|
||||||
./lint
|
|
||||||
if [[ $? -ne 0 ]]; then
|
|
||||||
echo "Linting error detected; correct and recommit"
|
|
||||||
ex=1
|
ex=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
27
CHANGELOG.md
27
CHANGELOG.md
@ -1,32 +1,5 @@
|
|||||||
## PVC Changelog
|
## PVC Changelog
|
||||||
|
|
||||||
###### [v0.9.45](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.45)
|
|
||||||
|
|
||||||
* [Node Daemon] Fixes an ordering issue with pvcnoded.service
|
|
||||||
* [CLI Client] Fixes bad calls to echo() without argument
|
|
||||||
|
|
||||||
###### [v0.9.44](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.44)
|
|
||||||
|
|
||||||
* [Node Daemon] Adds a Munin plugin for Ceph utilization
|
|
||||||
* [CLI] Fixes timeouts for long-running API commands
|
|
||||||
|
|
||||||
###### [v0.9.44](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.44)
|
|
||||||
|
|
||||||
* [CLI] Fixes timeout issues with long-running API commands
|
|
||||||
|
|
||||||
###### [v0.9.43](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.43)
|
|
||||||
|
|
||||||
* [Packaging] Fixes a bad test in postinst
|
|
||||||
* [CLI] Adds support for removing VM interfaces by MAC address
|
|
||||||
* [CLI] Modifies the default restart + live behaviour to prefer the explicit restart
|
|
||||||
* [CLI] Adds support for adding additional VM interfaces in the same network
|
|
||||||
* [CLI] Various ordering and message fixes
|
|
||||||
* [Node Daemon] Adds additional delays and retries to fencing actions
|
|
||||||
* [All] Adds Black formatting for Python code and various script/hook cleanups
|
|
||||||
* [CLI/API] Adds automatic shutdown or stop when disabling a VM
|
|
||||||
* [CLI] Adds support for forcing colourized output
|
|
||||||
* [Docs] Remove obsolete Ansible and Testing manuals
|
|
||||||
|
|
||||||
###### [v0.9.42](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.42)
|
###### [v0.9.42](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.42)
|
||||||
|
|
||||||
* [Documentation] Reworks and updates various documentation sections
|
* [Documentation] Reworks and updates various documentation sections
|
||||||
|
@ -25,7 +25,7 @@ import yaml
|
|||||||
from distutils.util import strtobool as dustrtobool
|
from distutils.util import strtobool as dustrtobool
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.45"
|
version = "0.9.42"
|
||||||
|
|
||||||
# API version
|
# API version
|
||||||
API_VERSION = 1.0
|
API_VERSION = 1.0
|
||||||
|
@ -32,8 +32,7 @@ pushd $( git rev-parse --show-toplevel ) &>/dev/null
|
|||||||
|
|
||||||
# Prepare code
|
# Prepare code
|
||||||
echo "Preparing code (format and lint)..."
|
echo "Preparing code (format and lint)..."
|
||||||
./format || exit 1
|
./prepare || exit
|
||||||
./lint || exit 1
|
|
||||||
|
|
||||||
# Build the packages
|
# Build the packages
|
||||||
echo -n "Building packages... "
|
echo -n "Building packages... "
|
||||||
|
@ -15,7 +15,7 @@ cp -a debian/changelog client-cli/setup.py ${tmpdir}/
|
|||||||
cp -a node-daemon/pvcnoded/Daemon.py ${tmpdir}/node-Daemon.py
|
cp -a node-daemon/pvcnoded/Daemon.py ${tmpdir}/node-Daemon.py
|
||||||
cp -a api-daemon/pvcapid/Daemon.py ${tmpdir}/api-Daemon.py
|
cp -a api-daemon/pvcapid/Daemon.py ${tmpdir}/api-Daemon.py
|
||||||
# Replace the "base" version with the git revision version
|
# Replace the "base" version with the git revision version
|
||||||
sed -i "s/version = \"${base_ver}\"/version = \"${new_ver}\"/" node-daemon/pvcnoded/Daemon.py api-daemon/pvcapid/Daemon.py client-cli/setup.py
|
sed -i "s/version = '${base_ver}'/version = '${new_ver}'/" node-daemon/pvcnoded/Daemon.py api-daemon/pvcapid/Daemon.py client-cli/setup.py
|
||||||
sed -i "s/${base_ver}-0/${new_ver}/" debian/changelog
|
sed -i "s/${base_ver}-0/${new_ver}/" debian/changelog
|
||||||
cat <<EOF > debian/changelog
|
cat <<EOF > debian/changelog
|
||||||
pvc (${new_ver}) unstable; urgency=medium
|
pvc (${new_ver}) unstable; urgency=medium
|
||||||
|
@ -19,9 +19,9 @@ $EDITOR ${changelog_file}
|
|||||||
changelog="$( cat ${changelog_file} | grep -v '^#' | sed 's/^*/ */' )"
|
changelog="$( cat ${changelog_file} | grep -v '^#' | sed 's/^*/ */' )"
|
||||||
rm ${changelog_file}
|
rm ${changelog_file}
|
||||||
|
|
||||||
sed -i "s,version = \"${current_version}\",version = \"${new_version}\"," node-daemon/pvcnoded/Daemon.py
|
sed -i "s,version = '${current_version}',version = '${new_version}'," node-daemon/pvcnoded/Daemon.py
|
||||||
sed -i "s,version = \"${current_version}\",version = \"${new_version}\"," api-daemon/pvcapid/Daemon.py
|
sed -i "s,version = '${current_version}',version = '${new_version}'," api-daemon/pvcapid/Daemon.py
|
||||||
sed -i "s,version=\"${current_version}\",version=\"${new_version}\"," client-cli/setup.py
|
sed -i "s,version='${current_version}',version='${new_version}'," client-cli/setup.py
|
||||||
echo ${new_version} > .version
|
echo ${new_version} > .version
|
||||||
|
|
||||||
changelog_tmpdir=$( mktemp -d )
|
changelog_tmpdir=$( mktemp -d )
|
||||||
@ -52,7 +52,7 @@ git commit -v
|
|||||||
popd &>/dev/null
|
popd &>/dev/null
|
||||||
|
|
||||||
echo
|
echo
|
||||||
echo "Release message:"
|
echo "GitLab release message:"
|
||||||
echo
|
echo
|
||||||
echo "# Parallel Virtual Cluster version ${new_version}"
|
echo "# Parallel Virtual Cluster version ${new_version}"
|
||||||
echo
|
echo
|
||||||
|
@ -123,10 +123,8 @@ def call_api(
|
|||||||
params=None,
|
params=None,
|
||||||
data=None,
|
data=None,
|
||||||
files=None,
|
files=None,
|
||||||
|
timeout=3,
|
||||||
):
|
):
|
||||||
# Set the connect timeout to 3 seconds but extremely long (48 hour) data timeout
|
|
||||||
timeout = (3.05, 172800)
|
|
||||||
|
|
||||||
# Craft the URI
|
# Craft the URI
|
||||||
uri = "{}://{}{}{}".format(
|
uri = "{}://{}{}{}".format(
|
||||||
config["api_scheme"], config["api_host"], config["api_prefix"], request_uri
|
config["api_scheme"], config["api_host"], config["api_prefix"], request_uri
|
||||||
|
@ -382,7 +382,9 @@ def vm_state(config, vm, target_state, force=False, wait=False):
|
|||||||
"force": str(force).lower(),
|
"force": str(force).lower(),
|
||||||
"wait": str(wait).lower(),
|
"wait": str(wait).lower(),
|
||||||
}
|
}
|
||||||
response = call_api(config, "post", "/vm/{vm}/state".format(vm=vm), params=params)
|
response = call_api(
|
||||||
|
config, "post", "/vm/{vm}/state".format(vm=vm), params=params, timeout=120
|
||||||
|
)
|
||||||
|
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
retstatus = True
|
retstatus = True
|
||||||
|
@ -42,13 +42,11 @@ import pvc.cli_lib.network as pvc_network
|
|||||||
import pvc.cli_lib.ceph as pvc_ceph
|
import pvc.cli_lib.ceph as pvc_ceph
|
||||||
import pvc.cli_lib.provisioner as pvc_provisioner
|
import pvc.cli_lib.provisioner as pvc_provisioner
|
||||||
|
|
||||||
|
|
||||||
myhostname = socket.gethostname().split(".")[0]
|
myhostname = socket.gethostname().split(".")[0]
|
||||||
zk_host = ""
|
zk_host = ""
|
||||||
is_completion = True if os.environ.get("_PVC_COMPLETE", "") == "complete" else False
|
is_completion = True if os.environ.get("_PVC_COMPLETE", "") == "complete" else False
|
||||||
|
|
||||||
default_store_data = {"cfgfile": "/etc/pvc/pvcapid.yaml"}
|
default_store_data = {"cfgfile": "/etc/pvc/pvcapid.yaml"}
|
||||||
config = dict()
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
@ -60,7 +58,7 @@ def print_version(ctx, param, value):
|
|||||||
from pkg_resources import get_distribution
|
from pkg_resources import get_distribution
|
||||||
|
|
||||||
version = get_distribution("pvc").version
|
version = get_distribution("pvc").version
|
||||||
echo(f"Parallel Virtual Cluster version {version}")
|
click.echo(f"Parallel Virtual Cluster version {version}")
|
||||||
ctx.exit()
|
ctx.exit()
|
||||||
|
|
||||||
|
|
||||||
@ -168,18 +166,9 @@ if not is_completion:
|
|||||||
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"], max_content_width=120)
|
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"], max_content_width=120)
|
||||||
|
|
||||||
|
|
||||||
def echo(msg, nl=True, err=False):
|
|
||||||
if config.get("colour", False):
|
|
||||||
colour = True
|
|
||||||
else:
|
|
||||||
colour = None
|
|
||||||
|
|
||||||
click.echo(message=msg, color=colour, nl=nl, err=err)
|
|
||||||
|
|
||||||
|
|
||||||
def cleanup(retcode, retmsg):
|
def cleanup(retcode, retmsg):
|
||||||
if retmsg != "":
|
if retmsg != "":
|
||||||
echo(retmsg)
|
click.echo(retmsg)
|
||||||
if retcode is True:
|
if retcode is True:
|
||||||
exit(0)
|
exit(0)
|
||||||
else:
|
else:
|
||||||
@ -268,7 +257,9 @@ def cluster_add(description, address, port, ssl, name, api_key):
|
|||||||
}
|
}
|
||||||
# Update the store
|
# Update the store
|
||||||
update_store(store_path, existing_config)
|
update_store(store_path, existing_config)
|
||||||
echo('Added new cluster "{}" at host "{}" to local database'.format(name, address))
|
click.echo(
|
||||||
|
'Added new cluster "{}" at host "{}" to local database'.format(name, address)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@ -289,7 +280,7 @@ def cluster_remove(name):
|
|||||||
print('No cluster with name "{}" found'.format(name))
|
print('No cluster with name "{}" found'.format(name))
|
||||||
# Update the store
|
# Update the store
|
||||||
update_store(store_path, existing_config)
|
update_store(store_path, existing_config)
|
||||||
echo('Removed cluster "{}" from local database'.format(name))
|
click.echo('Removed cluster "{}" from local database'.format(name))
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
@ -363,9 +354,9 @@ def cluster_list(raw):
|
|||||||
|
|
||||||
if not raw:
|
if not raw:
|
||||||
# Display the data nicely
|
# Display the data nicely
|
||||||
echo("Available clusters:")
|
click.echo("Available clusters:")
|
||||||
echo("")
|
click.echo()
|
||||||
echo(
|
click.echo(
|
||||||
"{bold}{name: <{name_length}} {description: <{description_length}} {address: <{address_length}} {port: <{port_length}} {scheme: <{scheme_length}} {api_key: <{api_key_length}}{end_bold}".format(
|
"{bold}{name: <{name_length}} {description: <{description_length}} {address: <{address_length}} {port: <{port_length}} {scheme: <{scheme_length}} {api_key: <{api_key_length}}{end_bold}".format(
|
||||||
bold=ansiprint.bold(),
|
bold=ansiprint.bold(),
|
||||||
end_bold=ansiprint.end(),
|
end_bold=ansiprint.end(),
|
||||||
@ -402,7 +393,7 @@ def cluster_list(raw):
|
|||||||
api_key = "N/A"
|
api_key = "N/A"
|
||||||
|
|
||||||
if not raw:
|
if not raw:
|
||||||
echo(
|
click.echo(
|
||||||
"{bold}{name: <{name_length}} {description: <{description_length}} {address: <{address_length}} {port: <{port_length}} {scheme: <{scheme_length}} {api_key: <{api_key_length}}{end_bold}".format(
|
"{bold}{name: <{name_length}} {description: <{description_length}} {address: <{address_length}} {port: <{port_length}} {scheme: <{scheme_length}} {api_key: <{api_key_length}}{end_bold}".format(
|
||||||
bold="",
|
bold="",
|
||||||
end_bold="",
|
end_bold="",
|
||||||
@ -421,7 +412,7 @@ def cluster_list(raw):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
echo(cluster)
|
click.echo(cluster)
|
||||||
|
|
||||||
|
|
||||||
# Validate that the cluster is set for a given command
|
# Validate that the cluster is set for a given command
|
||||||
@ -429,7 +420,7 @@ def cluster_req(function):
|
|||||||
@wraps(function)
|
@wraps(function)
|
||||||
def validate_cluster(*args, **kwargs):
|
def validate_cluster(*args, **kwargs):
|
||||||
if config.get("badcfg", None):
|
if config.get("badcfg", None):
|
||||||
echo(
|
click.echo(
|
||||||
'No cluster specified and no local pvcapid.yaml configuration found. Use "pvc cluster" to add a cluster API to connect to.'
|
'No cluster specified and no local pvcapid.yaml configuration found. Use "pvc cluster" to add a cluster API to connect to.'
|
||||||
)
|
)
|
||||||
exit(1)
|
exit(1)
|
||||||
@ -472,24 +463,24 @@ def node_secondary(node, wait):
|
|||||||
|
|
||||||
task_retcode, task_retdata = pvc_provisioner.task_status(config, None)
|
task_retcode, task_retdata = pvc_provisioner.task_status(config, None)
|
||||||
if len(task_retdata) > 0:
|
if len(task_retdata) > 0:
|
||||||
echo(
|
click.echo(
|
||||||
"Note: There are currently {} active or queued provisioner jobs on the current primary node.".format(
|
"Note: There are currently {} active or queued provisioner jobs on the current primary node.".format(
|
||||||
len(task_retdata)
|
len(task_retdata)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
echo(
|
click.echo(
|
||||||
" These jobs will continue executing, but status will not be visible until the current"
|
" These jobs will continue executing, but status will not be visible until the current"
|
||||||
)
|
)
|
||||||
echo(" node returns to primary state.")
|
click.echo(" node returns to primary state.")
|
||||||
echo("")
|
click.echo()
|
||||||
|
|
||||||
retcode, retmsg = pvc_node.node_coordinator_state(config, node, "secondary")
|
retcode, retmsg = pvc_node.node_coordinator_state(config, node, "secondary")
|
||||||
if not retcode:
|
if not retcode:
|
||||||
cleanup(retcode, retmsg)
|
cleanup(retcode, retmsg)
|
||||||
else:
|
else:
|
||||||
if wait:
|
if wait:
|
||||||
echo(retmsg)
|
click.echo(retmsg)
|
||||||
echo("Waiting for state transition... ", nl=False)
|
click.echo("Waiting for state transition... ", nl=False)
|
||||||
# Every half-second, check if the API is reachable and the node is in secondary state
|
# Every half-second, check if the API is reachable and the node is in secondary state
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@ -525,24 +516,24 @@ def node_primary(node, wait):
|
|||||||
|
|
||||||
task_retcode, task_retdata = pvc_provisioner.task_status(config, None)
|
task_retcode, task_retdata = pvc_provisioner.task_status(config, None)
|
||||||
if len(task_retdata) > 0:
|
if len(task_retdata) > 0:
|
||||||
echo(
|
click.echo(
|
||||||
"Note: There are currently {} active or queued provisioner jobs on the current primary node.".format(
|
"Note: There are currently {} active or queued provisioner jobs on the current primary node.".format(
|
||||||
len(task_retdata)
|
len(task_retdata)
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
echo(
|
click.echo(
|
||||||
" These jobs will continue executing, but status will not be visible until the current"
|
" These jobs will continue executing, but status will not be visible until the current"
|
||||||
)
|
)
|
||||||
echo(" node returns to primary state.")
|
click.echo(" node returns to primary state.")
|
||||||
echo("")
|
click.echo()
|
||||||
|
|
||||||
retcode, retmsg = pvc_node.node_coordinator_state(config, node, "primary")
|
retcode, retmsg = pvc_node.node_coordinator_state(config, node, "primary")
|
||||||
if not retcode:
|
if not retcode:
|
||||||
cleanup(retcode, retmsg)
|
cleanup(retcode, retmsg)
|
||||||
else:
|
else:
|
||||||
if wait:
|
if wait:
|
||||||
echo(retmsg)
|
click.echo(retmsg)
|
||||||
echo("Waiting for state transition... ", nl=False)
|
click.echo("Waiting for state transition... ", nl=False)
|
||||||
# Every half-second, check if the API is reachable and the node is in secondary state
|
# Every half-second, check if the API is reachable and the node is in secondary state
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
@ -1027,7 +1018,7 @@ def vm_modify(
|
|||||||
text=current_vm_cfgfile, require_save=True, extension=".xml"
|
text=current_vm_cfgfile, require_save=True, extension=".xml"
|
||||||
)
|
)
|
||||||
if new_vm_cfgfile is None:
|
if new_vm_cfgfile is None:
|
||||||
echo("Aborting with no modifications.")
|
click.echo("Aborting with no modifications.")
|
||||||
exit(0)
|
exit(0)
|
||||||
else:
|
else:
|
||||||
new_vm_cfgfile = new_vm_cfgfile.strip()
|
new_vm_cfgfile = new_vm_cfgfile.strip()
|
||||||
@ -1038,15 +1029,15 @@ def vm_modify(
|
|||||||
new_vm_cfgfile = cfgfile.read()
|
new_vm_cfgfile = cfgfile.read()
|
||||||
cfgfile.close()
|
cfgfile.close()
|
||||||
|
|
||||||
echo(
|
click.echo(
|
||||||
'Replacing configuration of VM "{}" with file "{}".'.format(
|
'Replacing configuration of VM "{}" with file "{}".'.format(
|
||||||
dom_name, cfgfile.name
|
dom_name, cfgfile.name
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# Show a diff and confirm
|
# Show a diff and confirm
|
||||||
echo("Pending modifications:")
|
click.echo("Pending modifications:")
|
||||||
echo("")
|
click.echo("")
|
||||||
diff = list(
|
diff = list(
|
||||||
difflib.unified_diff(
|
difflib.unified_diff(
|
||||||
current_vm_cfgfile.split("\n"),
|
current_vm_cfgfile.split("\n"),
|
||||||
@ -1061,14 +1052,14 @@ def vm_modify(
|
|||||||
)
|
)
|
||||||
for line in diff:
|
for line in diff:
|
||||||
if re.match(r"^\+", line) is not None:
|
if re.match(r"^\+", line) is not None:
|
||||||
echo(colorama.Fore.GREEN + line + colorama.Fore.RESET)
|
click.echo(colorama.Fore.GREEN + line + colorama.Fore.RESET)
|
||||||
elif re.match(r"^\-", line) is not None:
|
elif re.match(r"^\-", line) is not None:
|
||||||
echo(colorama.Fore.RED + line + colorama.Fore.RESET)
|
click.echo(colorama.Fore.RED + line + colorama.Fore.RESET)
|
||||||
elif re.match(r"^\^", line) is not None:
|
elif re.match(r"^\^", line) is not None:
|
||||||
echo(colorama.Fore.BLUE + line + colorama.Fore.RESET)
|
click.echo(colorama.Fore.BLUE + line + colorama.Fore.RESET)
|
||||||
else:
|
else:
|
||||||
echo(line)
|
click.echo(line)
|
||||||
echo("")
|
click.echo("")
|
||||||
|
|
||||||
# Verify our XML is sensible
|
# Verify our XML is sensible
|
||||||
try:
|
try:
|
||||||
@ -3606,7 +3597,7 @@ def ceph_volume_upload(pool, name, image_format, image_file):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if not os.path.exists(image_file):
|
if not os.path.exists(image_file):
|
||||||
echo("ERROR: File '{}' does not exist!".format(image_file))
|
click.echo("ERROR: File '{}' does not exist!".format(image_file))
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
retcode, retmsg = pvc_ceph.ceph_volume_upload(
|
retcode, retmsg = pvc_ceph.ceph_volume_upload(
|
||||||
@ -4478,7 +4469,7 @@ def provisioner_template_storage_disk_add(
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if source_volume and (size or filesystem or mountpoint):
|
if source_volume and (size or filesystem or mountpoint):
|
||||||
echo(
|
click.echo(
|
||||||
'The "--source-volume" option is not compatible with the "--size", "--filesystem", or "--mountpoint" options.'
|
'The "--source-volume" option is not compatible with the "--size", "--filesystem", or "--mountpoint" options.'
|
||||||
)
|
)
|
||||||
exit(1)
|
exit(1)
|
||||||
@ -4619,7 +4610,7 @@ def provisioner_userdata_add(name, filename):
|
|||||||
try:
|
try:
|
||||||
yaml.load(userdata, Loader=yaml.SafeLoader)
|
yaml.load(userdata, Loader=yaml.SafeLoader)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
echo("Error: Userdata document is malformed")
|
click.echo("Error: Userdata document is malformed")
|
||||||
cleanup(False, e)
|
cleanup(False, e)
|
||||||
|
|
||||||
params = dict()
|
params = dict()
|
||||||
@ -4656,7 +4647,7 @@ def provisioner_userdata_modify(name, filename, editor):
|
|||||||
# Grab the current config
|
# Grab the current config
|
||||||
retcode, retdata = pvc_provisioner.userdata_info(config, name)
|
retcode, retdata = pvc_provisioner.userdata_info(config, name)
|
||||||
if not retcode:
|
if not retcode:
|
||||||
echo(retdata)
|
click.echo(retdata)
|
||||||
exit(1)
|
exit(1)
|
||||||
current_userdata = retdata["userdata"].strip()
|
current_userdata = retdata["userdata"].strip()
|
||||||
|
|
||||||
@ -4664,14 +4655,14 @@ def provisioner_userdata_modify(name, filename, editor):
|
|||||||
text=current_userdata, require_save=True, extension=".yaml"
|
text=current_userdata, require_save=True, extension=".yaml"
|
||||||
)
|
)
|
||||||
if new_userdata is None:
|
if new_userdata is None:
|
||||||
echo("Aborting with no modifications.")
|
click.echo("Aborting with no modifications.")
|
||||||
exit(0)
|
exit(0)
|
||||||
else:
|
else:
|
||||||
new_userdata = new_userdata.strip()
|
new_userdata = new_userdata.strip()
|
||||||
|
|
||||||
# Show a diff and confirm
|
# Show a diff and confirm
|
||||||
echo("Pending modifications:")
|
click.echo("Pending modifications:")
|
||||||
echo("")
|
click.echo("")
|
||||||
diff = list(
|
diff = list(
|
||||||
difflib.unified_diff(
|
difflib.unified_diff(
|
||||||
current_userdata.split("\n"),
|
current_userdata.split("\n"),
|
||||||
@ -4686,14 +4677,14 @@ def provisioner_userdata_modify(name, filename, editor):
|
|||||||
)
|
)
|
||||||
for line in diff:
|
for line in diff:
|
||||||
if re.match(r"^\+", line) is not None:
|
if re.match(r"^\+", line) is not None:
|
||||||
echo(colorama.Fore.GREEN + line + colorama.Fore.RESET)
|
click.echo(colorama.Fore.GREEN + line + colorama.Fore.RESET)
|
||||||
elif re.match(r"^\-", line) is not None:
|
elif re.match(r"^\-", line) is not None:
|
||||||
echo(colorama.Fore.RED + line + colorama.Fore.RESET)
|
click.echo(colorama.Fore.RED + line + colorama.Fore.RESET)
|
||||||
elif re.match(r"^\^", line) is not None:
|
elif re.match(r"^\^", line) is not None:
|
||||||
echo(colorama.Fore.BLUE + line + colorama.Fore.RESET)
|
click.echo(colorama.Fore.BLUE + line + colorama.Fore.RESET)
|
||||||
else:
|
else:
|
||||||
echo(line)
|
click.echo(line)
|
||||||
echo("")
|
click.echo("")
|
||||||
|
|
||||||
click.confirm("Write modifications to cluster?", abort=True)
|
click.confirm("Write modifications to cluster?", abort=True)
|
||||||
|
|
||||||
@ -4708,7 +4699,7 @@ def provisioner_userdata_modify(name, filename, editor):
|
|||||||
try:
|
try:
|
||||||
yaml.load(userdata, Loader=yaml.SafeLoader)
|
yaml.load(userdata, Loader=yaml.SafeLoader)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
echo("Error: Userdata document is malformed")
|
click.echo("Error: Userdata document is malformed")
|
||||||
cleanup(False, e)
|
cleanup(False, e)
|
||||||
|
|
||||||
params = dict()
|
params = dict()
|
||||||
@ -4857,20 +4848,20 @@ def provisioner_script_modify(name, filename, editor):
|
|||||||
# Grab the current config
|
# Grab the current config
|
||||||
retcode, retdata = pvc_provisioner.script_info(config, name)
|
retcode, retdata = pvc_provisioner.script_info(config, name)
|
||||||
if not retcode:
|
if not retcode:
|
||||||
echo(retdata)
|
click.echo(retdata)
|
||||||
exit(1)
|
exit(1)
|
||||||
current_script = retdata["script"].strip()
|
current_script = retdata["script"].strip()
|
||||||
|
|
||||||
new_script = click.edit(text=current_script, require_save=True, extension=".py")
|
new_script = click.edit(text=current_script, require_save=True, extension=".py")
|
||||||
if new_script is None:
|
if new_script is None:
|
||||||
echo("Aborting with no modifications.")
|
click.echo("Aborting with no modifications.")
|
||||||
exit(0)
|
exit(0)
|
||||||
else:
|
else:
|
||||||
new_script = new_script.strip()
|
new_script = new_script.strip()
|
||||||
|
|
||||||
# Show a diff and confirm
|
# Show a diff and confirm
|
||||||
echo("Pending modifications:")
|
click.echo("Pending modifications:")
|
||||||
echo("")
|
click.echo("")
|
||||||
diff = list(
|
diff = list(
|
||||||
difflib.unified_diff(
|
difflib.unified_diff(
|
||||||
current_script.split("\n"),
|
current_script.split("\n"),
|
||||||
@ -4885,14 +4876,14 @@ def provisioner_script_modify(name, filename, editor):
|
|||||||
)
|
)
|
||||||
for line in diff:
|
for line in diff:
|
||||||
if re.match(r"^\+", line) is not None:
|
if re.match(r"^\+", line) is not None:
|
||||||
echo(colorama.Fore.GREEN + line + colorama.Fore.RESET)
|
click.echo(colorama.Fore.GREEN + line + colorama.Fore.RESET)
|
||||||
elif re.match(r"^\-", line) is not None:
|
elif re.match(r"^\-", line) is not None:
|
||||||
echo(colorama.Fore.RED + line + colorama.Fore.RESET)
|
click.echo(colorama.Fore.RED + line + colorama.Fore.RESET)
|
||||||
elif re.match(r"^\^", line) is not None:
|
elif re.match(r"^\^", line) is not None:
|
||||||
echo(colorama.Fore.BLUE + line + colorama.Fore.RESET)
|
click.echo(colorama.Fore.BLUE + line + colorama.Fore.RESET)
|
||||||
else:
|
else:
|
||||||
echo(line)
|
click.echo(line)
|
||||||
echo("")
|
click.echo("")
|
||||||
|
|
||||||
click.confirm("Write modifications to cluster?", abort=True)
|
click.confirm("Write modifications to cluster?", abort=True)
|
||||||
|
|
||||||
@ -4997,7 +4988,7 @@ def provisioner_ova_upload(name, filename, pool):
|
|||||||
Storage templates, provisioning scripts, and arguments for OVA-type profiles will be ignored and should not be set.
|
Storage templates, provisioning scripts, and arguments for OVA-type profiles will be ignored and should not be set.
|
||||||
"""
|
"""
|
||||||
if not os.path.exists(filename):
|
if not os.path.exists(filename):
|
||||||
echo("ERROR: File '{}' does not exist!".format(filename))
|
click.echo("ERROR: File '{}' does not exist!".format(filename))
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
params = dict()
|
params = dict()
|
||||||
@ -5328,19 +5319,19 @@ def provisioner_create(name, profile, wait_flag, define_flag, start_flag, script
|
|||||||
if retcode and wait_flag:
|
if retcode and wait_flag:
|
||||||
task_id = retdata
|
task_id = retdata
|
||||||
|
|
||||||
echo("Task ID: {}".format(task_id))
|
click.echo("Task ID: {}".format(task_id))
|
||||||
echo("")
|
click.echo()
|
||||||
|
|
||||||
# Wait for the task to start
|
# Wait for the task to start
|
||||||
echo("Waiting for task to start...", nl=False)
|
click.echo("Waiting for task to start...", nl=False)
|
||||||
while True:
|
while True:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
task_status = pvc_provisioner.task_status(config, task_id, is_watching=True)
|
task_status = pvc_provisioner.task_status(config, task_id, is_watching=True)
|
||||||
if task_status.get("state") != "PENDING":
|
if task_status.get("state") != "PENDING":
|
||||||
break
|
break
|
||||||
echo(".", nl=False)
|
click.echo(".", nl=False)
|
||||||
echo(" done.")
|
click.echo(" done.")
|
||||||
echo("")
|
click.echo()
|
||||||
|
|
||||||
# Start following the task state, updating progress as we go
|
# Start following the task state, updating progress as we go
|
||||||
total_task = task_status.get("total")
|
total_task = task_status.get("total")
|
||||||
@ -5361,7 +5352,7 @@ def provisioner_create(name, profile, wait_flag, define_flag, start_flag, script
|
|||||||
maxlen = curlen
|
maxlen = curlen
|
||||||
lendiff = maxlen - curlen
|
lendiff = maxlen - curlen
|
||||||
overwrite_whitespace = " " * lendiff
|
overwrite_whitespace = " " * lendiff
|
||||||
echo(
|
click.echo(
|
||||||
" " + task_status.get("status") + overwrite_whitespace,
|
" " + task_status.get("status") + overwrite_whitespace,
|
||||||
nl=False,
|
nl=False,
|
||||||
)
|
)
|
||||||
@ -5371,7 +5362,7 @@ def provisioner_create(name, profile, wait_flag, define_flag, start_flag, script
|
|||||||
if task_status.get("state") == "SUCCESS":
|
if task_status.get("state") == "SUCCESS":
|
||||||
bar.update(total_task - last_task)
|
bar.update(total_task - last_task)
|
||||||
|
|
||||||
echo("")
|
click.echo()
|
||||||
retdata = task_status.get("state") + ": " + task_status.get("status")
|
retdata = task_status.get("state") + ": " + task_status.get("status")
|
||||||
|
|
||||||
cleanup(retcode, retdata)
|
cleanup(retcode, retdata)
|
||||||
@ -5600,7 +5591,7 @@ def task_init(confirm_flag, overwrite_flag):
|
|||||||
exit(0)
|
exit(0)
|
||||||
|
|
||||||
# Easter-egg
|
# Easter-egg
|
||||||
echo("Some music while we're Layin' Pipe? https://youtu.be/sw8S_Kv89IU")
|
click.echo("Some music while we're Layin' Pipe? https://youtu.be/sw8S_Kv89IU")
|
||||||
|
|
||||||
retcode, retmsg = pvc_cluster.initialize(config, overwrite_flag)
|
retcode, retmsg = pvc_cluster.initialize(config, overwrite_flag)
|
||||||
cleanup(retcode, retmsg)
|
cleanup(retcode, retmsg)
|
||||||
@ -5645,19 +5636,10 @@ def task_init(confirm_flag, overwrite_flag):
|
|||||||
default=False,
|
default=False,
|
||||||
help='Allow unsafe operations without confirmation/"--yes" argument.',
|
help='Allow unsafe operations without confirmation/"--yes" argument.',
|
||||||
)
|
)
|
||||||
@click.option(
|
|
||||||
"--colour",
|
|
||||||
"--color",
|
|
||||||
"_colour",
|
|
||||||
envvar="PVC_COLOUR",
|
|
||||||
is_flag=True,
|
|
||||||
default=False,
|
|
||||||
help="Force colourized output.",
|
|
||||||
)
|
|
||||||
@click.option(
|
@click.option(
|
||||||
"--version", is_flag=True, callback=print_version, expose_value=False, is_eager=True
|
"--version", is_flag=True, callback=print_version, expose_value=False, is_eager=True
|
||||||
)
|
)
|
||||||
def cli(_cluster, _debug, _quiet, _unsafe, _colour):
|
def cli(_cluster, _debug, _quiet, _unsafe):
|
||||||
"""
|
"""
|
||||||
Parallel Virtual Cluster CLI management tool
|
Parallel Virtual Cluster CLI management tool
|
||||||
|
|
||||||
@ -5669,9 +5651,7 @@ def cli(_cluster, _debug, _quiet, _unsafe, _colour):
|
|||||||
|
|
||||||
"PVC_QUIET": Suppress stderr connection output from client instead of using --quiet/-q
|
"PVC_QUIET": Suppress stderr connection output from client instead of using --quiet/-q
|
||||||
|
|
||||||
"PVC_UNSAFE": Always suppress confirmations instead of needing --unsafe/-u or --yes/-y; USE WITH EXTREME CARE
|
"PVC_UNSAFE": Suppress confirmation requirements instead of using --unsafe/-u or --yes/-y; USE WITH EXTREME CARE
|
||||||
|
|
||||||
"PVC_COLOUR": Force colour on the output even if Click determines it is not a console (e.g. with 'watch')
|
|
||||||
|
|
||||||
If no PVC_CLUSTER/--cluster is specified, attempts first to load the "local" cluster, checking
|
If no PVC_CLUSTER/--cluster is specified, attempts first to load the "local" cluster, checking
|
||||||
for an API configuration in "/etc/pvc/pvcapid.yaml". If this is also not found, abort.
|
for an API configuration in "/etc/pvc/pvcapid.yaml". If this is also not found, abort.
|
||||||
@ -5683,14 +5663,13 @@ def cli(_cluster, _debug, _quiet, _unsafe, _colour):
|
|||||||
if not config.get("badcfg", None):
|
if not config.get("badcfg", None):
|
||||||
config["debug"] = _debug
|
config["debug"] = _debug
|
||||||
config["unsafe"] = _unsafe
|
config["unsafe"] = _unsafe
|
||||||
config["colour"] = _colour
|
|
||||||
|
|
||||||
if not _quiet:
|
if not _quiet:
|
||||||
if config["api_scheme"] == "https" and not config["verify_ssl"]:
|
if config["api_scheme"] == "https" and not config["verify_ssl"]:
|
||||||
ssl_unverified_msg = " (unverified)"
|
ssl_unverified_msg = " (unverified)"
|
||||||
else:
|
else:
|
||||||
ssl_unverified_msg = ""
|
ssl_unverified_msg = ""
|
||||||
echo(
|
click.echo(
|
||||||
'Using cluster "{}" - Host: "{}" Scheme: "{}{}" Prefix: "{}"'.format(
|
'Using cluster "{}" - Host: "{}" Scheme: "{}{}" Prefix: "{}"'.format(
|
||||||
config["cluster"],
|
config["cluster"],
|
||||||
config["api_host"],
|
config["api_host"],
|
||||||
@ -5700,9 +5679,11 @@ def cli(_cluster, _debug, _quiet, _unsafe, _colour):
|
|||||||
),
|
),
|
||||||
err=True,
|
err=True,
|
||||||
)
|
)
|
||||||
echo("", err=True)
|
click.echo("", err=True)
|
||||||
|
|
||||||
|
|
||||||
|
config = dict()
|
||||||
|
|
||||||
#
|
#
|
||||||
# Click command tree
|
# Click command tree
|
||||||
#
|
#
|
||||||
|
@ -2,7 +2,7 @@ from setuptools import setup
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="pvc",
|
name="pvc",
|
||||||
version="0.9.45",
|
version="0.9.42",
|
||||||
packages=["pvc", "pvc.cli_lib"],
|
packages=["pvc", "pvc.cli_lib"],
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"Click",
|
"Click",
|
||||||
|
35
debian/changelog
vendored
35
debian/changelog
vendored
@ -1,38 +1,3 @@
|
|||||||
pvc (0.9.45-0) unstable; urgency=high
|
|
||||||
|
|
||||||
* [Node Daemon] Fixes an ordering issue with pvcnoded.service
|
|
||||||
* [CLI Client] Fixes bad calls to echo() without argument
|
|
||||||
|
|
||||||
-- Joshua M. Boniface <joshua@boniface.me> Thu, 25 Nov 2021 09:34:20 -0500
|
|
||||||
|
|
||||||
pvc (0.9.44-0) unstable; urgency=high
|
|
||||||
|
|
||||||
* [Node Daemon] Adds a Munin plugin for Ceph utilization
|
|
||||||
* [CLI] Fixes timeouts for long-running API commands
|
|
||||||
|
|
||||||
-- Joshua M. Boniface <joshua@boniface.me> Thu, 11 Nov 2021 16:20:38 -0500
|
|
||||||
|
|
||||||
pvc (0.9.44-0) unstable; urgency=high
|
|
||||||
|
|
||||||
* [CLI] Fixes timeout issues with long-running API commands
|
|
||||||
|
|
||||||
-- Joshua M. Boniface <joshua@boniface.me> Thu, 11 Nov 2021 16:19:32 -0500
|
|
||||||
|
|
||||||
pvc (0.9.43-0) unstable; urgency=high
|
|
||||||
|
|
||||||
* [Packaging] Fixes a bad test in postinst
|
|
||||||
* [CLI] Adds support for removing VM interfaces by MAC address
|
|
||||||
* [CLI] Modifies the default restart + live behaviour to prefer the explicit restart
|
|
||||||
* [CLI] Adds support for adding additional VM interfaces in the same network
|
|
||||||
* [CLI] Various ordering and message fixes
|
|
||||||
* [Node Daemon] Adds additional delays and retries to fencing actions
|
|
||||||
* [All] Adds Black formatting for Python code and various script/hook cleanups
|
|
||||||
* [CLI/API] Adds automatic shutdown or stop when disabling a VM
|
|
||||||
* [CLI] Adds support for forcing colourized output
|
|
||||||
* [Docs] Remove obsolete Ansible and Testing manuals
|
|
||||||
|
|
||||||
-- Joshua M. Boniface <joshua@boniface.me> Mon, 08 Nov 2021 02:27:38 -0500
|
|
||||||
|
|
||||||
pvc (0.9.42-0) unstable; urgency=high
|
pvc (0.9.42-0) unstable; urgency=high
|
||||||
|
|
||||||
* [Documentation] Reworks and updates various documentation sections
|
* [Documentation] Reworks and updates various documentation sections
|
||||||
|
@ -95,7 +95,7 @@ The CLI client is self-documenting using the `-h`/`--help` arguments throughout,
|
|||||||
|
|
||||||
The overall management, deployment, bootstrapping, and configuring of nodes is accomplished via a set of Ansible roles and playbooks, found in the [`pvc-ansible` repository](https://github.com/parallelvirtualcluster/pvc-ansible), and nodes are installed via a custom installer ISO generated by the [`pvc-installer` repository](https://github.com/parallelvirtualcluster/pvc-installer). Once the cluster is set up, nodes can be added, replaced, updated, or reconfigured using this Ansible framework.
|
The overall management, deployment, bootstrapping, and configuring of nodes is accomplished via a set of Ansible roles and playbooks, found in the [`pvc-ansible` repository](https://github.com/parallelvirtualcluster/pvc-ansible), and nodes are installed via a custom installer ISO generated by the [`pvc-installer` repository](https://github.com/parallelvirtualcluster/pvc-installer). Once the cluster is set up, nodes can be added, replaced, updated, or reconfigured using this Ansible framework.
|
||||||
|
|
||||||
Details about the Ansible setup and node installer can be found in those repositories.
|
The Ansible configuration and architecture manual can be found at the [Ansible manual page](/manuals/ansible).
|
||||||
|
|
||||||
The [getting started documentation](/getting-started) provides a walk-through of using these tools to bootstrap a new cluster.
|
The [getting started documentation](/getting-started) provides a walk-through of using these tools to bootstrap a new cluster.
|
||||||
|
|
||||||
|
@ -210,7 +210,7 @@ The upstream network functions as the main upstream for the cluster nodes, provi
|
|||||||
|
|
||||||
The floating IP address in the cluster network can be used as a single point of communication with the active primary node, for instance to access the DNS aggregator instance or the management API. PVC provides only limited access control mechanisms to the API interface, so the upstream network should always be protected by a firewall; running PVC directly accessible on the Internet is strongly discouraged and may post a serious security risk, and all access should be restricted to the smallest possible set of remote systems.
|
The floating IP address in the cluster network can be used as a single point of communication with the active primary node, for instance to access the DNS aggregator instance or the management API. PVC provides only limited access control mechanisms to the API interface, so the upstream network should always be protected by a firewall; running PVC directly accessible on the Internet is strongly discouraged and may post a serious security risk, and all access should be restricted to the smallest possible set of remote systems.
|
||||||
|
|
||||||
Nodes in this network are generally assigned static IP addresses which are configured at node install time in the [Ansible deployment configuration](https://github.com/parallelvirtualcluster/pvc-ansible).
|
Nodes in this network are generally assigned static IP addresses which are configured at node install time and in the [Ansible deployment configuration](/manuals/ansible).
|
||||||
|
|
||||||
The upstream router should be able to handle static routes to the PVC cluster, or form a BGP neighbour relationship with the coordinator nodes and/or floating IP address to learn routes to the managed client networks.
|
The upstream router should be able to handle static routes to the PVC cluster, or form a BGP neighbour relationship with the coordinator nodes and/or floating IP address to learn routes to the managed client networks.
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ This guide will walk you through setting up a simple 3-node PVC cluster from scr
|
|||||||
|
|
||||||
0. Create an initial `hosts` inventory, using `hosts.default` in the `pvc-ansible` repo as a template. You can manage multiple PVC clusters ("sites") from the Ansible repository easily, however for simplicity you can use the simple name `cluster` for your initial site. Define the 3 hostnames you will use under the site group; usually the provided names of `pvchv1`, `pvchv2`, and `pvchv3` are sufficient, though you may use any hostname pattern you wish. It is *very important* that the names all contain a sequential number, however, as this is used by various components.
|
0. Create an initial `hosts` inventory, using `hosts.default` in the `pvc-ansible` repo as a template. You can manage multiple PVC clusters ("sites") from the Ansible repository easily, however for simplicity you can use the simple name `cluster` for your initial site. Define the 3 hostnames you will use under the site group; usually the provided names of `pvchv1`, `pvchv2`, and `pvchv3` are sufficient, though you may use any hostname pattern you wish. It is *very important* that the names all contain a sequential number, however, as this is used by various components.
|
||||||
|
|
||||||
0. Create an initial set of `group_vars` for your cluster at `group_vars/<cluster>`, using the `group_vars/default` in the `pvc-ansible` repo as a template. Inside these group vars are two main files: `base.yml` and `pvc.yml`. These example files are well-documented; read them carefully and specify all required options before proceeding, and reference the [Ansible setup examples](https://github.com/parallelvirtualcluster/pvc-ansible) for more detailed descriptions of the options.
|
0. Create an initial set of `group_vars` for your cluster at `group_vars/<cluster>`, using the `group_vars/default` in the `pvc-ansible` repo as a template. Inside these group vars are two main files: `base.yml` and `pvc.yml`. These example files are well-documented; read them carefully and specify all required options before proceeding, and reference the [Ansible manual](/manuals/ansible) for more detailed descriptions of the options.
|
||||||
|
|
||||||
* `base.yml` configures the `base` role and some common per-cluster configurations such as an upstream domain, a root password, a set of administrative users, various hardware configuration items, as well as and most importantly, the basic network configuration of the nodes. Make special note of the various items that must be generated such as passwords; these should all be cluster-unique.
|
* `base.yml` configures the `base` role and some common per-cluster configurations such as an upstream domain, a root password, a set of administrative users, various hardware configuration items, as well as and most importantly, the basic network configuration of the nodes. Make special note of the various items that must be generated such as passwords; these should all be cluster-unique.
|
||||||
|
|
||||||
|
949
docs/manuals/ansible.md
Normal file
949
docs/manuals/ansible.md
Normal file
@ -0,0 +1,949 @@
|
|||||||
|
# PVC Ansible architecture
|
||||||
|
|
||||||
|
The PVC Ansible setup and management framework is written in Ansible. It consists of two roles: `base` and `pvc`.
|
||||||
|
|
||||||
|
## Base role
|
||||||
|
|
||||||
|
The Base role configures a node to a specific, standard base Debian system, with a number of PVC-specific tweaks. Some examples include:
|
||||||
|
|
||||||
|
* Installing the custom PVC repository hosted at Boniface Labs.
|
||||||
|
|
||||||
|
* Removing several unnecessary packages and installing numerous additional packages.
|
||||||
|
|
||||||
|
* Automatically configuring network interfaces based on the `group_vars` configuration.
|
||||||
|
|
||||||
|
* Configuring several general `sysctl` settings for optimal performance.
|
||||||
|
|
||||||
|
* Installing and configuring rsyslog, postfix, ntpd, ssh, and fail2ban.
|
||||||
|
|
||||||
|
* Creating the users specified in the `group_vars` configuration.
|
||||||
|
|
||||||
|
* Installing custom MOTDs, bashrc files, vimrc files, and other useful configurations for each user.
|
||||||
|
|
||||||
|
The end result is a standardized "PVC node" system ready to have the daemons installed by the PVC role.
|
||||||
|
|
||||||
|
The Base role is optional: if an administrator so chooses, they can bypass this role and configure things manually. That said, for the proper functioning of the PVC role, the Base role should always be applied first.
|
||||||
|
|
||||||
|
## PVC role
|
||||||
|
|
||||||
|
The PVC role configures all the dependencies of PVC, including storage, networking, and databases, then installs the PVC daemon itself. Specifically, it will, in order:
|
||||||
|
|
||||||
|
* Install Ceph, configure and bootstrap a new cluster if `bootstrap=yes` is set, configure the monitor and manager daemons, and start up the cluster ready for the addition of OSDs via the client interface (coordinators only).
|
||||||
|
|
||||||
|
* Install, configure, and if `bootstrap=yes` is set, bootstrap a Zookeeper cluster (coordinators only).
|
||||||
|
|
||||||
|
* Install, configure, and if `bootstrap=yes` is set, bootstrap a Patroni PostgreSQL cluster for the PowerDNS aggregator (coordinators only).
|
||||||
|
|
||||||
|
* Install and configure Libvirt.
|
||||||
|
|
||||||
|
* Install and configure FRRouting.
|
||||||
|
|
||||||
|
* Install and configure the main PVC daemon and API client.
|
||||||
|
|
||||||
|
* If `bootstrap=yes` is set, initialize the PVC cluster (`pvc task init`).
|
||||||
|
|
||||||
|
## Completion
|
||||||
|
|
||||||
|
Once the entire playbook has run for the first time against a given host, the host will be rebooted to apply all the configured services. On startup, the system should immediately launch the PVC daemon, check in to the Zookeeper cluster, and become ready. The node will be in `flushed` state on its first boot; the administrator will need to run `pvc node unflush <node>` to set the node into active state ready to handle virtual machines. On the first bootstrap run, the administrator will also have to configure storage block devices (OSDs), networks, etc. For full details, see [the main getting started page](/getting-started).
|
||||||
|
|
||||||
|
## General usage
|
||||||
|
|
||||||
|
### Initial setup
|
||||||
|
|
||||||
|
After cloning the `pvc-ansible` repo, set up a set of configurations for your cluster. One copy of the `pvc-ansible` repository can manage an unlimited number of clusters with differing configurations.
|
||||||
|
|
||||||
|
All files created during initial setup should be stored outside the `pvc-ansible` repository, as they will be ignored by the main Git repository by default. It is recommended to set up a separate folder, either standalone or as its own Git repository, to contain your files, then symlink them back into the main repository at the appropriate places outlined below.
|
||||||
|
|
||||||
|
Create a `hosts` file containing the clusters as groups, then the list of hosts within each cluster group. The `hosts.default` file can be used as a template.
|
||||||
|
|
||||||
|
Create a `files/<cluster>` folder to hold the cluster-created static configuration files. Until the first bootstrap run, this directory will be empty.
|
||||||
|
|
||||||
|
Create a `group_vars/<cluster>` folder to hold the cluster configuration variables. The `group_vars/default` directory can be used as an example.
|
||||||
|
|
||||||
|
### Bootstrapping a cluster
|
||||||
|
|
||||||
|
Before bootstrapping a cluster, see the section on [PVC Ansible configuration variables](/manuals/ansible/#pvc-ansible-configuration-variables) to configure the cluster.
|
||||||
|
|
||||||
|
Bootstrapping a cluster can be done using the main `pvc.yml` playbook. Generally, a bootstrap run should be limited to the coordinators of the cluster to avoid potential race conditions or strange bootstrap behaviour. The special variable `bootstrap=yes` must be set to indicate that a cluster bootstrap is to be requested.
|
||||||
|
|
||||||
|
**WARNING:** Do not run the playbook with `bootstrap=yes` *except during the very first run against a freshly-installed set of coordinator nodes*. Running it against an existing cluster will result in the complete failure of the cluster, the destruction of all data, or worse.
|
||||||
|
|
||||||
|
### Adding new nodes
|
||||||
|
|
||||||
|
Adding new nodes to an existing cluster can be done using the main `pvc.yml` playbook. The new node(s) should be added to the `group_vars` configuration `node_list`, then the playbook run against all hosts in the cluster with no special flags or limits. This will ensure the entire cluster is updated with the new information, while simultaneously configuring the new node.
|
||||||
|
|
||||||
|
### Reconfiguration and software updates
|
||||||
|
|
||||||
|
For general, day-to-day software updates such as base system updates or upgrading to newer PVC versions, a special playbook, `oneshot/update-pvc-cluster.yml`, is provided. This playbook will gracefully update and upgrade all PVC nodes in the cluster, flush them, reboot them, and then unflush them. This operation should be completely transparent to VMs on the cluster.
|
||||||
|
|
||||||
|
For more advanced updates, such as changing configurations in the `group_vars`, the main `pvc.yml` playbook can be used to deploy the changes across all hosts. Note that this may cause downtime due to node reboots if certain configurations change, and it is not recommended to use this process frequently.
|
||||||
|
|
||||||
|
# PVC Ansible configuration manual
|
||||||
|
|
||||||
|
This manual documents the various `group_vars` configuration options for the `pvc-ansible` framework. We assume that the administrator is generally familiar with Ansible and its operation.
|
||||||
|
|
||||||
|
## PVC Ansible configuration variables
|
||||||
|
|
||||||
|
The `group_vars` folder contains configuration variables for all clusters managed by your local copy of `pvc-ansible`. Each cluster has a distinct set of `group_vars` to allow different configurations for each cluster.
|
||||||
|
|
||||||
|
This section outlines the various configuration options available in the `group_vars` configuration; the `group_vars/default` directory contains an example set of variables, split into two files (`base.yml` and `pvc.yml`), that set every listed configuration option.
|
||||||
|
|
||||||
|
### Conventions
|
||||||
|
|
||||||
|
* Settings may be `required`, `optional`, or `ignored`. Ignored settings are used for human-readability in the configuration but are ignored by the actual role.
|
||||||
|
|
||||||
|
* Settings may `depends` on other settings. This indicates that, if one setting is enabled, the other setting is very likely `required` by that setting.
|
||||||
|
|
||||||
|
* If a particular `<setting>` is marked `optional`, and a latter setting is marked `depends on <setting>`, the latter is ignored unless the `<setting>` is specified.
|
||||||
|
|
||||||
|
### `base.yml`
|
||||||
|
|
||||||
|
Example configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
---
|
||||||
|
cluster_group: mycluster
|
||||||
|
timezone_location: Canada/Eastern
|
||||||
|
local_domain: upstream.local
|
||||||
|
recursive_dns_servers:
|
||||||
|
- 8.8.8.8
|
||||||
|
- 8.8.4.4
|
||||||
|
recursive_dns_search_domains:
|
||||||
|
- "{{ local_domain }}"
|
||||||
|
|
||||||
|
username_ipmi_host: "pvc"
|
||||||
|
passwd_ipmi_host: "MyPassword2019"
|
||||||
|
|
||||||
|
passwd_root: MySuperSecretPassword # Not actually used by the playbook, but good for reference
|
||||||
|
passwdhash_root: "$6$shadowencryptedpassword"
|
||||||
|
|
||||||
|
logrotate_keepcount: 7
|
||||||
|
logrotate_interval: daily
|
||||||
|
|
||||||
|
username_email_root: root
|
||||||
|
|
||||||
|
hosts:
|
||||||
|
- name: testhost
|
||||||
|
ip: 127.0.0.1
|
||||||
|
|
||||||
|
admin_users:
|
||||||
|
- name: "myuser"
|
||||||
|
uid: 500
|
||||||
|
keys:
|
||||||
|
- "ssh-ed25519 MyKey 2019-06"
|
||||||
|
|
||||||
|
networks:
|
||||||
|
"bondU":
|
||||||
|
device: "bondU"
|
||||||
|
type: "bond"
|
||||||
|
bond_mode: "802.3ad"
|
||||||
|
bond_devices:
|
||||||
|
- "enp1s0f0"
|
||||||
|
- "enp1s0f1"
|
||||||
|
mtu: 9000
|
||||||
|
|
||||||
|
"upstream":
|
||||||
|
device: "vlan1000"
|
||||||
|
type: "vlan"
|
||||||
|
raw_device: "bondU"
|
||||||
|
mtu: 1500
|
||||||
|
domain: "{{ local_domain }}"
|
||||||
|
subnet: "192.168.100.0/24"
|
||||||
|
floating_ip: "192.168.100.10/24"
|
||||||
|
gateway_ip: "192.168.100.1"
|
||||||
|
|
||||||
|
"cluster":
|
||||||
|
device: "vlan1001"
|
||||||
|
type: "vlan"
|
||||||
|
raw_device: "bondU"
|
||||||
|
mtu: 1500
|
||||||
|
domain: "pvc-cluster.local"
|
||||||
|
subnet: "10.0.0.0/24"
|
||||||
|
floating_ip: "10.0.0.254/24"
|
||||||
|
|
||||||
|
"storage":
|
||||||
|
device: "vlan1002"
|
||||||
|
type: "vlan"
|
||||||
|
raw_device: "bondU"
|
||||||
|
mtu: 9000
|
||||||
|
domain: "pvc-storage.local"
|
||||||
|
subnet: "10.0.1.0/24"
|
||||||
|
floating_ip: "10.0.1.254/24"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `cluster_group`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The name of the Ansible PVC cluster group in the `hosts` inventory.
|
||||||
|
|
||||||
|
#### `timezone_location`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The TZ database format name of the local timezone, e.g. `America/Toronto` or `Canada/Eastern`.
|
||||||
|
|
||||||
|
#### `local_domain`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The domain name of the PVC cluster nodes. This is the domain portion of the FQDN of each node, and should usually be the domain of the `upstream` network.
|
||||||
|
|
||||||
|
#### `recursive_dns_servers`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
A list of recursive DNS servers to be used by cluster nodes. Defaults to Google Public DNS if unspecified.
|
||||||
|
|
||||||
|
#### `recursive_dns_search_domains`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
A list of domain names (must explicitly include `local_domain` if desired) to be used for shortname DNS lookups.
|
||||||
|
|
||||||
|
#### `username_ipmi_host`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
* *requires* `passwd_ipmi_host`
|
||||||
|
|
||||||
|
The IPMI username used by PVC to communicate with the node management controllers. This user should be created on each node's IPMI before deploying the cluster, and should have, at minimum, permission to read and alter the node's power state.
|
||||||
|
|
||||||
|
#### `passwd_ipmi_host`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
* *requires* `username_ipmi_host`
|
||||||
|
|
||||||
|
The IPMI password, in plain text, used by PVC to communicate with the node management controllers.
|
||||||
|
|
||||||
|
Generate using `pwgen -s 16` and adjusting length as required.
|
||||||
|
|
||||||
|
#### `passwd_root`
|
||||||
|
|
||||||
|
* *ignored*
|
||||||
|
|
||||||
|
Used only for reference, the plain-text root password for `passwdhash_root`.
|
||||||
|
|
||||||
|
#### `passwdhash_root`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The `/etc/shadow`-encoded root password for all nodes.
|
||||||
|
|
||||||
|
Generate using `pwgen -s 16`, adjusting length as required, and encrypt using `mkpasswd -m sha-512 <password> $( pwgen -s 8 )`.
|
||||||
|
|
||||||
|
#### `logrotate_keepcount`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The number of `logrotate_interval` to keep system logs.
|
||||||
|
|
||||||
|
#### `logrotate_interval`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The interval for rotating system logs. Must be one of: `hourly`, `daily`, `weekly`, `monthly`.
|
||||||
|
|
||||||
|
#### `username_email_root`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The email address of the root user, at the `local_domain`. Usually `root`, but can be something like `admin` if needed.
|
||||||
|
|
||||||
|
#### `hosts`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
A list of additional entries for the `/etc/hosts` files on the nodes. Each list element contains the following sub-elements:
|
||||||
|
|
||||||
|
##### `name`
|
||||||
|
|
||||||
|
The hostname of the entry.
|
||||||
|
|
||||||
|
##### `ip`
|
||||||
|
|
||||||
|
The IP address of the entry.
|
||||||
|
|
||||||
|
#### `admin_users`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
A list of non-root users, their UIDs, and SSH public keys, that are able to access the server. At least one non-root user should be specified to administer the nodes. These users will not have a password set; only key-based login is supported. Each list element contains the following sub-elements:
|
||||||
|
|
||||||
|
##### `name`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The name of the user.
|
||||||
|
|
||||||
|
##### `uid`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The Linux UID of the user. Should usually start at 500 and increment for each user.
|
||||||
|
|
||||||
|
##### `keys`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
A list of SSH public key strings, in `authorized_keys` line format, for the user.
|
||||||
|
|
||||||
|
#### `networks`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
A dictionary of networks to configure on the nodes.
|
||||||
|
|
||||||
|
The key will be used to "name" the interface file under `/etc/network/interfaces.d`, but otherwise the `device` is the real name of the device (e.g. `iface [device] inet ...`.
|
||||||
|
|
||||||
|
The three required networks are: `upstream`, `cluster`, `storage`. If `storage` is configured identically to `cluster`, the two networks will be collapsed into one; for details on this, please see the [documentation about the storage network](/cluster-architecture/#storage-connecting-ceph-daemons-with-each-other-and-with-osds).
|
||||||
|
|
||||||
|
Additional networks can also be specified here to automate their configuration. In the above example, a "bondU" interface is configured, which the remaining required networks use as their `raw_device`.
|
||||||
|
|
||||||
|
Within each `network` element, the following options may be specified:
|
||||||
|
|
||||||
|
##### `device`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The real network device name.
|
||||||
|
|
||||||
|
##### `type`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The type of network device. Must be one of: `nic`, `bond`, `vlan`.
|
||||||
|
|
||||||
|
##### `bond_mode`
|
||||||
|
|
||||||
|
* *required* if `type` is `bond`
|
||||||
|
|
||||||
|
The Linux bonding/`ifenslave` mode for the cluster. Must be a valid Linux bonding mode.
|
||||||
|
|
||||||
|
##### `bond_devices`
|
||||||
|
|
||||||
|
* *required* if `type` is `bond`
|
||||||
|
|
||||||
|
The list of physical (`nic`) interfaces to bond.
|
||||||
|
|
||||||
|
##### `raw_device`
|
||||||
|
|
||||||
|
* *required* if `type` is `vlan`
|
||||||
|
|
||||||
|
The underlying interface for the vLAN.
|
||||||
|
|
||||||
|
##### `mtu`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The MTU of the interface. Ensure that the underlying network infrastructure can support the configured MTU.
|
||||||
|
|
||||||
|
##### `domain`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The domain name for the network. For the "upstream" network, should usually be `local_domain`.
|
||||||
|
|
||||||
|
##### `subnet`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The CIDR-formatted subnet of the network. Individual nodes will be configured with specific IPs in this network in a later setting.
|
||||||
|
|
||||||
|
##### `floating_ip`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
A CIDR-formatted IP address in the network to act as the cluster floating IP address. This IP address will follow the primary coordinator.
|
||||||
|
|
||||||
|
##### `gateway_ip`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
A non-CIDR gateway IP address for the network.
|
||||||
|
|
||||||
|
### `pvc.yml`
|
||||||
|
|
||||||
|
Example configuration:
|
||||||
|
|
||||||
|
```
|
||||||
|
---
|
||||||
|
pvc_log_to_file: False
|
||||||
|
pvc_log_to_stdout: True
|
||||||
|
pvc_log_colours: False
|
||||||
|
pvc_log_dates: False
|
||||||
|
pvc_log_keepalives: True
|
||||||
|
pvc_log_keepalive_cluster_details: True
|
||||||
|
pvc_log_keepalive_storage_details: True
|
||||||
|
pvc_log_console_lines: 1000
|
||||||
|
|
||||||
|
pvc_vm_shutdown_timeout: 180
|
||||||
|
pvc_keepalive_interval: 5
|
||||||
|
pvc_fence_intervals: 6
|
||||||
|
pvc_suicide_intervals: 0
|
||||||
|
pvc_fence_successful_action: migrate
|
||||||
|
pvc_fence_failed_action: None
|
||||||
|
|
||||||
|
pvc_osd_memory_limit: 4294967296
|
||||||
|
pvc_zookeeper_heap_limit: 256M
|
||||||
|
pvc_zookeeper_stack_limit: 512M
|
||||||
|
|
||||||
|
pvc_api_listen_address: "0.0.0.0"
|
||||||
|
pvc_api_listen_port: "7370"
|
||||||
|
pvc_api_secret_key: ""
|
||||||
|
|
||||||
|
pvc_api_enable_authentication: False
|
||||||
|
pvc_api_tokens:
|
||||||
|
- description: "myuser"
|
||||||
|
token: ""
|
||||||
|
|
||||||
|
pvc_api_enable_ssl: False
|
||||||
|
pvc_api_ssl_cert_path: /etc/ssl/pvc/cert.pem
|
||||||
|
pvc_api_ssl_cert: >
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIIxxx
|
||||||
|
-----END CERTIFICATE-----
|
||||||
|
pvc_api_ssl_key_path: /etc/ssl/pvc/key.pem
|
||||||
|
pvc_api_ssl_key: >
|
||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MIIxxx
|
||||||
|
-----END PRIVATE KEY-----
|
||||||
|
|
||||||
|
pvc_ceph_storage_secret_uuid: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
|
||||||
|
|
||||||
|
pvc_dns_database_name: "pvcdns"
|
||||||
|
pvc_dns_database_user: "pvcdns"
|
||||||
|
pvc_dns_database_password: "xxxxxxxx"
|
||||||
|
pvc_api_database_name: "pvcapi"
|
||||||
|
pvc_api_database_user: "pcapi"
|
||||||
|
pvc_api_database_password: "xxxxxxxx"
|
||||||
|
pvc_replication_database_user: "replicator"
|
||||||
|
pvc_replication_database_password: "xxxxxxxx"
|
||||||
|
pvc_superuser_database_user: "postgres"
|
||||||
|
pvc_superuser_database_password: "xxxxxxxx"
|
||||||
|
|
||||||
|
pvc_asn: "65500"
|
||||||
|
pvc_routers:
|
||||||
|
- "192.168.100.1"
|
||||||
|
|
||||||
|
pvc_nodes:
|
||||||
|
- hostname: "pvchv1"
|
||||||
|
is_coordinator: yes
|
||||||
|
node_id: 1
|
||||||
|
router_id: "192.168.100.11"
|
||||||
|
upstream_ip: "192.168.100.11"
|
||||||
|
upstream_cidr: 24
|
||||||
|
cluster_ip: "10.0.0.1"
|
||||||
|
cluster_cidr: 24
|
||||||
|
storage_ip: "10.0.1.1"
|
||||||
|
storage_cidr: 24
|
||||||
|
ipmi_host: "pvchv1-lom.{{ local_domain }}"
|
||||||
|
ipmi_user: "{{ username_ipmi_host }}"
|
||||||
|
ipmi_password: "{{ passwd_ipmi_host }}"
|
||||||
|
- hostname: "pvchv2"
|
||||||
|
is_coordinator: yes
|
||||||
|
node_id: 2
|
||||||
|
router_id: "192.168.100.12"
|
||||||
|
upstream_ip: "192.168.100.12"
|
||||||
|
upstream_cidr: 24
|
||||||
|
cluster_ip: "10.0.0.2"
|
||||||
|
cluster_cidr: 24
|
||||||
|
storage_ip: "10.0.1.2"
|
||||||
|
storage_cidr: 24
|
||||||
|
ipmi_host: "pvchv2-lom.{{ local_domain }}"
|
||||||
|
ipmi_user: "{{ username_ipmi_host }}"
|
||||||
|
ipmi_password: "{{ passwd_ipmi_host }}"
|
||||||
|
- hostname: "pvchv3"
|
||||||
|
is_coordinator: yes
|
||||||
|
node_id: 3
|
||||||
|
router_id: "192.168.100.13"
|
||||||
|
upstream_ip: "192.168.100.13"
|
||||||
|
upstream_cidr: 24
|
||||||
|
cluster_ip: "10.0.0.3"
|
||||||
|
cluster_cidr: 24
|
||||||
|
storage_ip: "10.0.1.3"
|
||||||
|
storage_cidr: 24
|
||||||
|
ipmi_host: "pvchv3-lom.{{ local_domain }}"
|
||||||
|
ipmi_user: "{{ username_ipmi_host }}"
|
||||||
|
ipmi_password: "{{ passwd_ipmi_host }}"
|
||||||
|
|
||||||
|
pvc_bridge_device: bondU
|
||||||
|
pvc_bridge_mtu: 1500
|
||||||
|
|
||||||
|
pvc_sriov_enable: True
|
||||||
|
pvc_sriov_device:
|
||||||
|
- phy: ens1f0
|
||||||
|
mtu: 9000
|
||||||
|
vfcount: 6
|
||||||
|
|
||||||
|
pvc_upstream_device: "{{ networks['upstream']['device'] }}"
|
||||||
|
pvc_upstream_mtu: "{{ networks['upstream']['mtu'] }}"
|
||||||
|
pvc_upstream_domain: "{{ networks['upstream']['domain'] }}"
|
||||||
|
pvc_upstream_subnet: "{{ networks['upstream']['subnet'] }}"
|
||||||
|
pvc_upstream_floatingip: "{{ networks['upstream']['floating_ip'] }}"
|
||||||
|
pvc_upstream_gatewayip: "{{ networks['upstream']['gateway_ip'] }}"
|
||||||
|
pvc_cluster_device: "{{ networks['cluster']['device'] }}"
|
||||||
|
pvc_cluster_mtu: "{{ networks['cluster']['mtu'] }}"
|
||||||
|
pvc_cluster_domain: "{{ networks['cluster']['domain'] }}"
|
||||||
|
pvc_cluster_subnet: "{{ networks['cluster']['subnet'] }}"
|
||||||
|
pvc_cluster_floatingip: "{{ networks['cluster']['floating_ip'] }}"
|
||||||
|
pvc_storage_device: "{{ networks['storage']['device'] }}"
|
||||||
|
pvc_storage_mtu: "{{ networks['storage']['mtu'] }}"
|
||||||
|
pvc_storage_domain: "{{ networks['storage']['domain'] }}"
|
||||||
|
pvc_storage_subnet: "{{ networks['storage']['subnet'] }}"
|
||||||
|
pvc_storage_floatingip: "{{ networks['storage']['floating_ip'] }}"
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `pvc_log_to_file`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
Whether to log PVC output to the file `/var/log/pvc/pvc.log`. Must be one of, unquoted: `True`, `False`.
|
||||||
|
|
||||||
|
If unset, a default value of "False" is set in the role defaults.
|
||||||
|
|
||||||
|
#### `pvc_log_to_stdout`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
Whether to log PVC output to stdout, i.e. `journald`. Must be one of, unquoted: `True`, `False`.
|
||||||
|
|
||||||
|
If unset, a default value of "True" is set in the role defaults.
|
||||||
|
|
||||||
|
#### `pvc_log_colours`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
Whether to include ANSI coloured prompts (`>>>`) for status in the log output. Must be one of, unquoted: `True`, `False`.
|
||||||
|
|
||||||
|
Requires `journalctl -o cat` or file logging in order to be visible and useful.
|
||||||
|
|
||||||
|
If set to False, the prompts will instead be text values.
|
||||||
|
|
||||||
|
If unset, a default value of "True" is set in the role defaults.
|
||||||
|
|
||||||
|
#### `pvc_log_dates`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
Whether to include dates in the log output. Must be one of, unquoted: `True`, `False`.
|
||||||
|
|
||||||
|
Requires `journalctl -o cat` or file logging in order to be visible and useful (and not clutter the logs with duplicate dates).
|
||||||
|
|
||||||
|
If unset, a default value of "False" is set in the role defaults.
|
||||||
|
|
||||||
|
#### `pvc_log_keepalives`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
Whether to log the regular keepalive messages. Must be one of, unquoted: `True`, `False`.
|
||||||
|
|
||||||
|
If unset, a default value of "True" is set in the role defaults.
|
||||||
|
|
||||||
|
#### `pvc_log_keepalive_cluster_details`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
* *ignored* if `pvc_log_keepalives` is `False`
|
||||||
|
|
||||||
|
Whether to log cluster and node details during keepalive messages. Must be one of, unquoted: `True`, `False`.
|
||||||
|
|
||||||
|
If unset, a default value of "True" is set in the role defaults.
|
||||||
|
|
||||||
|
#### `pvc_log_keepalive_storage_details`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
* *ignored* if `pvc_log_keepalives` is `False`
|
||||||
|
|
||||||
|
Whether to log storage cluster details during keepalive messages. Must be one of, unquoted: `True`, `False`.
|
||||||
|
|
||||||
|
If unset, a default value of "True" is set in the role defaults.
|
||||||
|
|
||||||
|
#### `pvc_log_console_lines`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The number of output console lines to log for each VM, to be used by the console log endpoints (`pvc vm log`).
|
||||||
|
|
||||||
|
If unset, a default value of "1000" is set in the role defaults.
|
||||||
|
|
||||||
|
#### `pvc_vm_shutdown_timeout`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The number of seconds to wait for a VM to `shutdown` before it is forced off.
|
||||||
|
|
||||||
|
A value of "0" disables this functionality.
|
||||||
|
|
||||||
|
If unset, a default value of "180" is set in the role defaults.
|
||||||
|
|
||||||
|
#### `pvc_keepalive_interval`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The number of seconds between node keepalives.
|
||||||
|
|
||||||
|
If unset, a default value of "5" is set in the role defaults.
|
||||||
|
|
||||||
|
**WARNING**: Changing this value is not recommended except in exceptional circumstances.
|
||||||
|
|
||||||
|
#### `pvc_fence_intervals`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The number of keepalive intervals to be missed before other nodes consider a node `dead` and trigger the fencing process. The total time elapsed will be `pvc_keepalive_interval * pvc_fence_intervals`.
|
||||||
|
|
||||||
|
If unset, a default value of "6" is set in the role defaults.
|
||||||
|
|
||||||
|
**NOTE**: This is not the total time until a node is fenced. A node has a further 6 (hardcoded) `pvc_keepalive_interval`s ("saving throw" attepmts) to try to send a keepalive before it is actually fenced. Thus, with the default values, this works out to a total of 60 +/- 5 seconds between a node crashing, and it being fenced. An administrator of a very important cluster may want to set this lower, perhaps to 2, or even 1, leaving only the "saving throws", though this is not recommended for most clusters, due to timing overhead from various other subsystems.
|
||||||
|
|
||||||
|
#### `pvc_suicide intervals`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The number of keepalive intervals without the ability to send a keepalive before a node considers *itself* to be dead and reboots itself.
|
||||||
|
|
||||||
|
A value of "0" disables this functionality.
|
||||||
|
|
||||||
|
If unset, a default value of "0" is set in the role defaults.
|
||||||
|
|
||||||
|
**WARNING**: This option is provided to allow additional flexibility in fencing behaviour. Normally, it is not safe to set a `pvc_fence_failed_action` of `migrate`, since if the other nodes cannot fence a node its VMs cannot be safely started on other nodes. This would also apply to nodes without IPMI-over-LAN which could not be fenced normally. This option provides an alternative way to guarantee this safety, at least in situations where the node can still reliably shut itself down (i.e. it is not hard-locked). The administrator should however take special care and thoroughly test their system before using these alternative fencing options in production, as the results could be disasterous.
|
||||||
|
|
||||||
|
#### `pvc_fence_successful_action`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The action the cluster should take upon a successful node fence with respect to running VMs. Must be one of, unquoted: `migrate`, `None`.
|
||||||
|
|
||||||
|
If unset, a default value of "migrate" is set in the role defaults.
|
||||||
|
|
||||||
|
An administrator can set the value "None" to disable automatic VM recovery migrations after a node fence.
|
||||||
|
|
||||||
|
#### `pvc_fence_failed_action`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The action the cluster should take upon a failed node fence with respect to running VMs. Must be one of, unquoted: `migrate`, `None`.
|
||||||
|
|
||||||
|
If unset, a default value of "None" is set in the role defaults.
|
||||||
|
|
||||||
|
**WARNING**: See the warning in the above `pvc_suicide_intervals` section for details on the purpose of this option. Do not set this option to "migrate" unless you have also set `pvc_suicide_intervals` to a non-"0" value and understand the caveats and risks.
|
||||||
|
|
||||||
|
#### `pvc_fence_migrate_target_selector`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The migration selector to use when running a `migrate` command after a node fence. Must be one of, unquoted: `mem`, `load`, `vcpu`, `vms`.
|
||||||
|
|
||||||
|
If unset, a default value of "mem" is set in the role defaults.
|
||||||
|
|
||||||
|
**NOTE**: These values map to the standard VM meta `selector` options, and determine how nodes select where to run the migrated VMs.
|
||||||
|
|
||||||
|
#### `pvc_osd_memory_limit`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The memory limit, in bytes, to pass to the Ceph OSD processes. Only set once, during cluster bootstrap; subsequent changes to this value must be manually made in the `files/*/ceph.conf` static configuration for the cluster in question.
|
||||||
|
|
||||||
|
If unset, a default value of "4294967296" (i.e. 4GB) is set in the role defaults.
|
||||||
|
|
||||||
|
As per Ceph documentation, the minimum value possible is "939524096" (i.e. ~1GB), and the default matches the Ceph system default. Setting a lower value is only recommended for systems with relatively low memory availability, where the default of 4GB per OSD is too large; it is recommended to increase the total system memory first before tweaking this setting to ensure optimal storage performance across all workloads.
|
||||||
|
|
||||||
|
#### `pvc_zookeeper_heap_limit`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The memory limit to pass to the Zookeeper Java process for its heap.
|
||||||
|
|
||||||
|
If unset, a default vlue of "256M" is set in the role defaults.
|
||||||
|
|
||||||
|
The administrator may set this to a lower value on memory-constrained systems or if the memory usage of the Zookeeper process becomes excessive.
|
||||||
|
|
||||||
|
#### `pvc_zookeeper_stack_limit`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The memory limit to pass to the Zookeeper Java process for its stack.
|
||||||
|
|
||||||
|
If unset, a defautl value of "512M" is set in the role defaults.
|
||||||
|
|
||||||
|
The administrator may set this to a lower value on memory-constrained systems or if the memory usage of the Zookeeper process becomes excessive.
|
||||||
|
|
||||||
|
#### `pvc_api_listen_address`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
Address for the API to listen on; `0.0.0.0` indicates all interfaces.
|
||||||
|
|
||||||
|
#### `pvc_api_listen_port`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
Port for the API to listen on.
|
||||||
|
|
||||||
|
#### `pvc_api_enable_authentication`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
Whether to enable authentication on the API. Must be one of, unquoted: `True`, `False`.
|
||||||
|
|
||||||
|
#### `pvc_api_secret_key`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
A secret key used to sign and encrypt API Flask cookies.
|
||||||
|
|
||||||
|
Generate using `uuidgen` or `pwgen -s 32` and adjusting length as required.
|
||||||
|
|
||||||
|
#### `pvc_api_tokens`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
A list of API tokens that are allowed to access the PVC API. At least one should be specified. Each list element contains the following sub-elements:
|
||||||
|
|
||||||
|
##### `description`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
A human-readable description of the token. Not parsed anywhere, but used to make this list human-readable and identify individual tokens by their use.
|
||||||
|
|
||||||
|
##### `token`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The API token.
|
||||||
|
|
||||||
|
Generate using `uuidgen` or `pwgen -s 32` and adjusting length as required.
|
||||||
|
|
||||||
|
#### `pvc_api_enable_ssl`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
Whether to enable SSL for the PVC API. Must be one of, unquoted: `True`, `False`.
|
||||||
|
|
||||||
|
#### `pvc_api_ssl_cert_path`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
* *required* if `pvc_api_enable_ssl` is `True` and `pvc_api_ssl_cert` is not set.
|
||||||
|
|
||||||
|
The path to an (existing) SSL certificate on the node system for the PVC API to use.
|
||||||
|
|
||||||
|
#### `pvc_api_ssl_cert`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
* *required* if `pvc_api_enable_ssl` is `True` and `pvc_api_ssl_cert_path` is not set.
|
||||||
|
|
||||||
|
The SSL certificate, in text form, for the PVC API to use. Will be installed to `/etc/pvc/api-cert.pem` on the node system.
|
||||||
|
|
||||||
|
#### `pc_api_ssl_key_path`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
* *required* if `pvc_api_enable_ssl` is `True` and `pvc_api_ssl_key` is not set.
|
||||||
|
|
||||||
|
The path to an (existing) SSL private key on the node system for the PVC API to use.
|
||||||
|
|
||||||
|
#### `pvc_api_ssl_key`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
* *required* if `pvc_api_enable_ssl` is `True` and `pvc_api_ssl_key_path` is not set.
|
||||||
|
|
||||||
|
The SSL private key, in text form, for the PVC API to use. Will be installed to `/etc/pvc/api-key.pem` on the node system.
|
||||||
|
|
||||||
|
#### `pvc_ceph_storage_secret_uuid`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The UUID for Libvirt to communicate with the Ceph storage cluster. This UUID will be used in all VM configurations for the block device.
|
||||||
|
|
||||||
|
Generate using `uuidgen`.
|
||||||
|
|
||||||
|
#### `pvc_dns_database_name`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The name of the PVC DNS aggregator database.
|
||||||
|
|
||||||
|
#### `pvc_dns_database_user`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The username of the PVC DNS aggregator database user.
|
||||||
|
|
||||||
|
#### `pvc_dns_database_password`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The password of the PVC DNS aggregator database user.
|
||||||
|
|
||||||
|
Generate using `pwgen -s 16` and adjusting length as required.
|
||||||
|
|
||||||
|
#### `pvc_api_database_name`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The name of the PVC API database.
|
||||||
|
|
||||||
|
#### `pvc_api_database_user`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The username of the PVC API database user.
|
||||||
|
|
||||||
|
#### `pvc_api_database_password`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The password of the PVC API database user.
|
||||||
|
|
||||||
|
Generate using `pwgen -s 16` and adjusting length as required.
|
||||||
|
|
||||||
|
#### `pvc_replication_database_user`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The username of the PVC DNS aggregator database replication user.
|
||||||
|
|
||||||
|
#### `pvc_replication_database_password`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The password of the PVC DNS aggregator database replication user.
|
||||||
|
|
||||||
|
Generate using `pwgen -s 16` and adjusting length as required.
|
||||||
|
|
||||||
|
#### `pvc_superuser_database_user`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The username of the PVC DNS aggregator database superuser.
|
||||||
|
|
||||||
|
#### `pvc_superuser_database_password`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The password of the PVC DNS aggregator database superuser.
|
||||||
|
|
||||||
|
Generate using `pwgen -s 16` and adjusting length as required.
|
||||||
|
|
||||||
|
#### `pvc_asn`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
The private autonomous system number used for BGP updates to upstream routers.
|
||||||
|
|
||||||
|
A default value of "65001" is set in the role defaults if left unset.
|
||||||
|
|
||||||
|
#### `pvc_routers`
|
||||||
|
|
||||||
|
A list of upstream routers to communicate BGP routes to.
|
||||||
|
|
||||||
|
#### `pvc_nodes`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
A list of all nodes in the PVC cluster and their node-specific configurations. Each node must be present in this list. Each list element contains the following sub-elements:
|
||||||
|
|
||||||
|
##### `hostname`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The (short) hostname of the node.
|
||||||
|
|
||||||
|
##### `is_coordinator`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
Whether the node is a coordinator. Must be one of, unquoted: `yes`, `no`.
|
||||||
|
|
||||||
|
##### `node_id`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The ID number of the node. Should normally match the number suffix of the `hostname`.
|
||||||
|
|
||||||
|
##### `router_id`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The BGP router-id value for upstream route exchange. Should normally match the `upstream_ip`.
|
||||||
|
|
||||||
|
##### `upstream_ip`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The non-CIDR IP address of the node in the `upstream` network.
|
||||||
|
|
||||||
|
##### `upstream_cidr`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The CIDR bit mask of the node `upstream_ip` address. Must match the `upstream` network.
|
||||||
|
|
||||||
|
##### `cluster_ip`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The non-CIDR IP address of the node in the `cluster` network.
|
||||||
|
|
||||||
|
##### `cluster_cidr`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The CIDR bit mask of the node `cluster_ip` address. Must match the `cluster` network.
|
||||||
|
|
||||||
|
##### `storage_ip`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The non-CIDR IP address of the node in the `storage` network.
|
||||||
|
|
||||||
|
##### `storage_cidr`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The CIDR bit mask of the node `storage_ip` address. Must match the `storage` network.
|
||||||
|
|
||||||
|
##### `ipmi_host`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The IPMI hostname or non-CIDR IP address of the node management controller. Must be reachable by all nodes.
|
||||||
|
|
||||||
|
##### `ipmi_user`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The IPMI username for the node management controller. Unless a per-host override is required, should usually use the previously-configured global `username_ipmi_host`. All notes from that entry apply.
|
||||||
|
|
||||||
|
##### `ipmi_password`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The IPMI password for the node management controller. Unless a per-host override is required, should usually use the previously-configured global `passwordname_ipmi_host`. All notes from that entry apply.
|
||||||
|
|
||||||
|
#### `pvc_bridge_device`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The device name of the underlying network interface to be used for "bridged"-type client networks. For each "bridged"-type network, an IEEE 802.3q vLAN and bridge will be created on top of this device to pass these networks. In most cases, using the reflexive `networks['cluster']['raw_device']` or `networks['upstream']['raw_device']` from the Base role is sufficient.
|
||||||
|
|
||||||
|
#### `pvc_bridge_mtu`
|
||||||
|
|
||||||
|
* *required*
|
||||||
|
|
||||||
|
The MTU of the underlying network interface to be used for "bridged"-type client networks. This is the maximum MTU such networks can use.
|
||||||
|
|
||||||
|
#### `pvc_sriov_enable`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
Whether to enable or disable SR-IOV functionality.
|
||||||
|
|
||||||
|
#### `pvc_sriov_device`
|
||||||
|
|
||||||
|
* *optional*
|
||||||
|
|
||||||
|
A list of SR-IOV devices. See the Daemon manual for details.
|
||||||
|
|
||||||
|
#### `pvc_<network>_*`
|
||||||
|
|
||||||
|
The next set of entries is hard-coded to use the values from the global `networks` list. It should not need to be changed under most circumstances. Refer to the previous sections for specific notes about each entry.
|
||||||
|
|
@ -4,7 +4,7 @@ The PVC Node Daemon is the heart of the PVC system and runs on each node to mana
|
|||||||
|
|
||||||
The node daemon is build using Python 3.X and is packaged in the Debian package `pvc-daemon`.
|
The node daemon is build using Python 3.X and is packaged in the Debian package `pvc-daemon`.
|
||||||
|
|
||||||
Configuration of the daemon is documented in [the manual](/manuals/daemon), however it is recommended to use the [Ansible configuration system](https://github.com/parallelvirtualcluster/pvc-ansible) to configure the PVC cluster for you from scratch.
|
Configuration of the daemon is documented in [the manual](/manuals/daemon), however it is recommended to use the [Ansible configuration interface](/manuals/ansible) to configure the PVC system for you from scratch.
|
||||||
|
|
||||||
## Overall architecture
|
## Overall architecture
|
||||||
|
|
||||||
@ -60,7 +60,7 @@ The PVC node daemon ins build with Python 3 and is run directly on nodes. For de
|
|||||||
|
|
||||||
The Daemon is configured using a YAML configuration file which is passed in to the API process by the environment variable `PVCD_CONFIG_FILE`. When running with the default package and SystemD unit, this file is located at `/etc/pvc/pvcnoded.yaml`.
|
The Daemon is configured using a YAML configuration file which is passed in to the API process by the environment variable `PVCD_CONFIG_FILE`. When running with the default package and SystemD unit, this file is located at `/etc/pvc/pvcnoded.yaml`.
|
||||||
|
|
||||||
For most deployments, the management of the configuration file is handled entirely by the [PVC Ansible framework](https://github.com/parallelvirtualcluster/pvc-ansible) and should not be modified directly. Many options from the Ansible framework map directly into the configuration options in this file.
|
For most deployments, the management of the configuration file is handled entirely by the [PVC Ansible framework](/manuals/ansible) and should not be modified directly. Many options from the Ansible framework map directly into the configuration options in this file.
|
||||||
|
|
||||||
### Conventions
|
### Conventions
|
||||||
|
|
||||||
|
69
docs/manuals/testing.md
Normal file
69
docs/manuals/testing.md
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# Testing procedures
|
||||||
|
|
||||||
|
This manual documents the standard procedures used to test PVC before release. This is a living document and will change frequently as new features are added and new corner cases are found.
|
||||||
|
|
||||||
|
As PVC does not currently feature any sort of automated tests, this is the primary way of ensuring functionality is as expected and the various components are operating correctly.
|
||||||
|
|
||||||
|
## Basic Tests
|
||||||
|
|
||||||
|
### Hypervisors
|
||||||
|
|
||||||
|
0. Stop then start all PVC node daemons sequentially, ensure they start up successfully.
|
||||||
|
|
||||||
|
0. Observe primary coordinator migration between nodes during startup sequence.
|
||||||
|
|
||||||
|
0. Verify reachability of floating IPs on each node across primary coordinator migrations.
|
||||||
|
|
||||||
|
0. Manually shuffle primary coordinator between nodes and verify as above (`pvc node primary`).
|
||||||
|
|
||||||
|
0. Automatically shuffle primary coordinator between nodes and verify as above (`pvc node secondary`).
|
||||||
|
|
||||||
|
### Virtual Machines
|
||||||
|
|
||||||
|
0. Deploy a new virtual machine using `vminstall` using managed networking and storage.
|
||||||
|
|
||||||
|
0. Start the VM on the first node, verify reachability over managed network (`pvc vm start`).
|
||||||
|
|
||||||
|
0. Verify console logs are operating (`pvc vm log -f`).
|
||||||
|
|
||||||
|
0. Migrate VM to another node via auto-selection and back again (`pvc vm migrate` and `pvc vm unmigrate`).
|
||||||
|
|
||||||
|
0. Manually shuffle VM between nodes and verify reachability on each node (`pvc vm move`).
|
||||||
|
|
||||||
|
0. Kill the VM and ensure restart occurs (`virsh destroy`).
|
||||||
|
|
||||||
|
0. Restart the VM (`pvc vm restart`).
|
||||||
|
|
||||||
|
0. Shutdown the VM (`pvc vm shutdown`).
|
||||||
|
|
||||||
|
0. Forcibly stop the VM (`pvc vm stop`).
|
||||||
|
|
||||||
|
### Virtual Networking
|
||||||
|
|
||||||
|
0. Create a new managed virtual network (`pvc network add`).
|
||||||
|
|
||||||
|
0. Verify network is present on all nodes.
|
||||||
|
|
||||||
|
0. Verify network gateway is reachable across all nodes (`pvc node primary`).
|
||||||
|
|
||||||
|
## Advanced Tests
|
||||||
|
|
||||||
|
### Fencing
|
||||||
|
|
||||||
|
0. Trigger node kernel panic and observe fencing behaviour (`echo c | sudo tee /proc/sysrq-trigger`).
|
||||||
|
|
||||||
|
0. Verify node is fenced successfully.
|
||||||
|
|
||||||
|
0. Verify primary coordinator status transfers successfully.
|
||||||
|
|
||||||
|
0. Verify VMs are migrated away from node successfully.
|
||||||
|
|
||||||
|
### Ceph Storage
|
||||||
|
|
||||||
|
0. Create an RBD volume.
|
||||||
|
|
||||||
|
0. Create an RBD snapshot.
|
||||||
|
|
||||||
|
0. Remove an RBD snapshot.
|
||||||
|
|
||||||
|
0. Remove an RBD volume.
|
8
format
8
format
@ -5,14 +5,10 @@ if ! which black &>/dev/null; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $1 == "check" ]]; then
|
|
||||||
check="--check"
|
|
||||||
fi
|
|
||||||
|
|
||||||
pushd $( git rev-parse --show-toplevel ) &>/dev/null
|
pushd $( git rev-parse --show-toplevel ) &>/dev/null
|
||||||
|
|
||||||
echo ">>> Formatting..."
|
echo "Formatting..."
|
||||||
black --safe ${check} --exclude api-daemon/migrations .
|
black --safe --exclude api-daemon/migrations .
|
||||||
ret=$?
|
ret=$?
|
||||||
if [[ $ret -eq 0 ]]; then
|
if [[ $ret -eq 0 ]]; then
|
||||||
echo "Successfully formatted project!"
|
echo "Successfully formatted project!"
|
||||||
|
2
lint
2
lint
@ -7,7 +7,7 @@ fi
|
|||||||
|
|
||||||
pushd $( git rev-parse --show-toplevel ) &>/dev/null
|
pushd $( git rev-parse --show-toplevel ) &>/dev/null
|
||||||
|
|
||||||
echo ">>> Linting..."
|
echo "Linting..."
|
||||||
flake8
|
flake8
|
||||||
ret=$?
|
ret=$?
|
||||||
if [[ $ret -eq 0 ]]; then
|
if [[ $ret -eq 0 ]]; then
|
||||||
|
@ -1,325 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# -*- sh -*-
|
|
||||||
|
|
||||||
: << =cut
|
|
||||||
|
|
||||||
=head1 NAME
|
|
||||||
|
|
||||||
ceph_utilization - Plugin to monitor a Ceph cluster's utilization
|
|
||||||
|
|
||||||
=head1 CONFIGURATION
|
|
||||||
|
|
||||||
Defaults (no config required) for the total utilization thresholds:
|
|
||||||
|
|
||||||
[ceph_utilization]
|
|
||||||
env.warning 80
|
|
||||||
env.critical 90
|
|
||||||
|
|
||||||
=head1 AUTHOR
|
|
||||||
|
|
||||||
Joshua Boniface <joshua@boniface.me>
|
|
||||||
|
|
||||||
=head1 LICENSE
|
|
||||||
|
|
||||||
GPLv3
|
|
||||||
|
|
||||||
=head1 BUGS
|
|
||||||
|
|
||||||
=back
|
|
||||||
|
|
||||||
=head1 MAGIC MARKERS
|
|
||||||
|
|
||||||
#%# family=auto
|
|
||||||
#%# capabilities=autoconf
|
|
||||||
|
|
||||||
=cut
|
|
||||||
|
|
||||||
. "$MUNIN_LIBDIR/plugins/plugin.sh"
|
|
||||||
|
|
||||||
is_multigraph
|
|
||||||
|
|
||||||
warning=80
|
|
||||||
critical=90
|
|
||||||
|
|
||||||
RADOSDF_CMD="/usr/bin/sudo /usr/bin/rados df --format json"
|
|
||||||
OSDDF_CMD="/usr/bin/sudo /usr/bin/ceph osd df --format json"
|
|
||||||
JQ_CMD="/usr/bin/jq"
|
|
||||||
|
|
||||||
output_usage() {
|
|
||||||
echo "This plugin outputs information about a Ceph cluster"
|
|
||||||
exit 0
|
|
||||||
}
|
|
||||||
|
|
||||||
output_autoconf() {
|
|
||||||
$RADOSDF_CMD &>/dev/null
|
|
||||||
radosdf_ret=$?
|
|
||||||
$OSDDF_CMD &>/dev/null
|
|
||||||
osddf_ret=$?
|
|
||||||
$JQ_CMD --version &>/dev/null
|
|
||||||
jq_ret=$?
|
|
||||||
|
|
||||||
if [[ ${radosdf_ret} -eq 0 && ${osddf_ret} -eq 0 && ${jq_ret} -eq 0 ]]; then
|
|
||||||
echo "yes"
|
|
||||||
elif [[ ${radosdf_ret} -ne 0 || ${osddf_ret} -ne 0 ]]; then
|
|
||||||
echo "no (no 'rados' or 'ceph' command found)"
|
|
||||||
elif [[ ${jq_ret} -ne 0 ]]; then
|
|
||||||
echo "no (no 'jq' command found)"
|
|
||||||
else
|
|
||||||
echo "no (general failure)"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
output_config() {
|
|
||||||
# Graph set 1 - Ceph cluster utilization
|
|
||||||
echo 'multigraph cluster_utilization'
|
|
||||||
echo 'graph_title Cluster Utilization'
|
|
||||||
echo 'graph_args --base 1000'
|
|
||||||
echo 'graph_vlabel % Utilization'
|
|
||||||
echo 'graph_category ceph'
|
|
||||||
echo 'graph_info This graph shows the cluster utilization.'
|
|
||||||
|
|
||||||
echo 'cluster_utilization.label Cluster Utilization'
|
|
||||||
echo 'cluster_utilization.type GAUGE'
|
|
||||||
echo 'cluster_utilization.max 100'
|
|
||||||
echo 'cluster_utilization.info Percentage utilization of the cluster.'
|
|
||||||
print_warning cluster_utilization
|
|
||||||
print_critical cluster_utilization
|
|
||||||
|
|
||||||
# Graph set 2 - Ceph cluster objects
|
|
||||||
echo 'multigraph cluster_objects'
|
|
||||||
echo 'graph_title Cluster Objects'
|
|
||||||
echo 'graph_args --base 1000'
|
|
||||||
echo 'graph_vlabel Objects'
|
|
||||||
echo 'graph_category ceph'
|
|
||||||
echo 'graph_info This graph shows the cluster object count.'
|
|
||||||
|
|
||||||
echo 'cluster_objects.label Cluster Objects'
|
|
||||||
echo 'cluster_objects.type GAUGE'
|
|
||||||
echo 'cluster_objects.min 0'
|
|
||||||
echo 'cluster_objects.info Total objects in the cluster.'
|
|
||||||
|
|
||||||
POOL_LIST="$( $RADOSDF_CMD | jq -r '.pools[].name' )"
|
|
||||||
|
|
||||||
# Graph set 3 - Cluster I/O Bytes Lifetime
|
|
||||||
echo 'multigraph pool_rdbytes'
|
|
||||||
echo "graph_title IO Bytes (Lifetime)"
|
|
||||||
echo "graph_args --base 1000"
|
|
||||||
echo "graph_vlabel bytes read (-) / write (+)"
|
|
||||||
echo "graph_category ceph"
|
|
||||||
echo "graph_info This graph shows the lifetime cluster bytes."
|
|
||||||
for pool in ${POOL_LIST}; do
|
|
||||||
# Graph set 3 - Cluster I/O Bytes Lifetime
|
|
||||||
echo "pool_rdbytes_${pool}.label Pool ${pool} IO (Bytes)"
|
|
||||||
echo "pool_rdbytes_${pool}.type GAUGE"
|
|
||||||
echo "pool_rdbytes_${pool}.min 0"
|
|
||||||
echo "pool_rdbytes_${pool}.draw LINE1"
|
|
||||||
echo "pool_rdbytes_${pool}.graph no"
|
|
||||||
echo "pool_wrbytes_${pool}.label Pool ${pool} IO (Bytes)"
|
|
||||||
echo "pool_wrbytes_${pool}.type GAUGE"
|
|
||||||
echo "pool_wrbytes_${pool}.min 0"
|
|
||||||
echo "pool_wrbytes_${pool}.draw LINE1"
|
|
||||||
echo "pool_wrbytes_${pool}.negative pool_rdbytes_${pool}"
|
|
||||||
done
|
|
||||||
|
|
||||||
# Graph set 4 - Cluster I/O Operations Lifetime
|
|
||||||
echo 'multigraph pool_rdops'
|
|
||||||
echo "graph_title IO Operations (Lifetime)"
|
|
||||||
echo "graph_args --base 1000"
|
|
||||||
echo "graph_vlabel IOs read (-) / write (+)"
|
|
||||||
echo "graph_category ceph"
|
|
||||||
echo "graph_info This graph shows the lifetime cluster IOs."
|
|
||||||
for pool in ${POOL_LIST}; do
|
|
||||||
# Graph set 4 - Cluster I/O Operations Lifetime
|
|
||||||
echo "pool_rdops_${pool}.label Pool ${pool} IO (Ops)"
|
|
||||||
echo "pool_rdops_${pool}.type GAUGE"
|
|
||||||
echo "pool_rdops_${pool}.min 0"
|
|
||||||
echo "pool_rdops_${pool}.draw LINE1"
|
|
||||||
echo "pool_rdops_${pool}.graph no"
|
|
||||||
echo "pool_wrops_${pool}.label Pool ${pool} IO (Ops)"
|
|
||||||
echo "pool_wrops_${pool}.type GAUGE"
|
|
||||||
echo "pool_wrops_${pool}.min 0"
|
|
||||||
echo "pool_wrops_${pool}.draw LINE1"
|
|
||||||
echo "pool_wrops_${pool}.negative pool_rdops_${pool}"
|
|
||||||
done
|
|
||||||
|
|
||||||
# Graph set 5 - Ceph pool objects
|
|
||||||
echo 'multigraph pool_objects_total'
|
|
||||||
echo "graph_title Objects"
|
|
||||||
echo "graph_args --base 1000"
|
|
||||||
echo "graph_vlabel Objects"
|
|
||||||
echo "graph_category ceph"
|
|
||||||
echo "graph_info This graph shows the cluster object count."
|
|
||||||
for pool in ${POOL_LIST}; do
|
|
||||||
# Graph set 5 - Ceph pool objects
|
|
||||||
echo "pool_objects_total_${pool}.label Pool ${pool} Objects"
|
|
||||||
echo "pool_objects_total_${pool}.type GAUGE"
|
|
||||||
echo "pool_objects_total_${pool}.min 0"
|
|
||||||
echo "pool_objects_total_${pool}.info Total objects in the pool."
|
|
||||||
done
|
|
||||||
|
|
||||||
# Graph set 6 - Ceph pool objects copies
|
|
||||||
echo 'multigraph pool_objects_copies'
|
|
||||||
echo "graph_title Objects Copies"
|
|
||||||
echo "graph_args --base 1000"
|
|
||||||
echo "graph_vlabel Objects"
|
|
||||||
echo "graph_category ceph"
|
|
||||||
echo "graph_info This graph shows the cluster object copy count."
|
|
||||||
for pool in ${POOL_LIST}; do
|
|
||||||
# Graph set 6 - Ceph pool objects copies
|
|
||||||
echo "pool_objects_copies_${pool}.label Pool ${pool} Objects Copies"
|
|
||||||
echo "pool_objects_copies_${pool}.type GAUGE"
|
|
||||||
echo "pool_objects_copies_${pool}.min 0"
|
|
||||||
echo "pool_objects_copies_${pool}.info Total object copies in the pool."
|
|
||||||
done
|
|
||||||
|
|
||||||
# Graph set 7 - Ceph pool objects degraded
|
|
||||||
echo 'multigraph pool_objects_degraded'
|
|
||||||
echo "graph_title Objects Degraded"
|
|
||||||
echo "graph_args --base 1000"
|
|
||||||
echo "graph_vlabel Objects"
|
|
||||||
echo "graph_category ceph"
|
|
||||||
echo "graph_info This graph shows the cluster object degraded count."
|
|
||||||
for pool in ${POOL_LIST}; do
|
|
||||||
# Graph set 7 - Ceph pool objects degraded
|
|
||||||
echo "pool_objects_degraded_${pool}.label Pool ${pool} Objects Degraded"
|
|
||||||
echo "pool_objects_degraded_${pool}.type GAUGE"
|
|
||||||
echo "pool_objects_degraded_${pool}.min 0"
|
|
||||||
echo "pool_objects_degraded_${pool}.info Total degraded objects in the pool."
|
|
||||||
done
|
|
||||||
|
|
||||||
OSD_LIST="$( $OSDDF_CMD | jq -r '.nodes[].id' | sort -n )"
|
|
||||||
|
|
||||||
# Graph set 8 - Ceph OSD status
|
|
||||||
echo 'multigraph osd_status'
|
|
||||||
echo "graph_title OSD Status"
|
|
||||||
echo "graph_args --base 1000"
|
|
||||||
echo "graph_vlabel Status Up (1) / Down (0)"
|
|
||||||
echo "graph_category ceph"
|
|
||||||
echo "graph_info This graph shows the OSD status."
|
|
||||||
for osd in ${OSD_LIST}; do
|
|
||||||
# Graph set 8 - Ceph OSD status
|
|
||||||
echo "osd_status_${osd}.label osd.${osd} Status"
|
|
||||||
echo "osd_status_${osd}.type GAUGE"
|
|
||||||
echo "osd_status_${osd}.min 0"
|
|
||||||
echo "osd_status_${osd}.max 1"
|
|
||||||
echo "osd_status_${osd}.info Status of the OSD."
|
|
||||||
done
|
|
||||||
|
|
||||||
# Graph set 9 - Ceph OSD utilization
|
|
||||||
echo 'multigraph osd_utilization'
|
|
||||||
echo "graph_title OSD Utilization"
|
|
||||||
echo "graph_args --base 1000"
|
|
||||||
echo "graph_vlabel % Utilization"
|
|
||||||
echo "graph_category ceph"
|
|
||||||
echo "graph_info This graph shows the OSD utilization."
|
|
||||||
for osd in ${OSD_LIST}; do
|
|
||||||
# Graph set 9 - Ceph OSD utilization
|
|
||||||
echo "osd_utilization_${osd}.label osd.${osd} Utilization"
|
|
||||||
echo "osd_utilization_${osd}.type GAUGE"
|
|
||||||
echo "osd_utilization_${osd}.max 100"
|
|
||||||
echo "osd_utilization_${osd}.info Utilization of the OSD."
|
|
||||||
done
|
|
||||||
|
|
||||||
exit 0
|
|
||||||
}
|
|
||||||
|
|
||||||
output_values() {
|
|
||||||
RADOS_JSON_OUTPUT="$( $RADOSDF_CMD )"
|
|
||||||
OSD_JSON_OUTPUT="$( $OSDDF_CMD )"
|
|
||||||
|
|
||||||
cluster_utilization="$( $JQ_CMD -r '.total_used' <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
cluster_size="$( $JQ_CMD -r '.total_space' <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
pct_utilization="$( echo "scale=4; ${cluster_utilization} / ${cluster_size} * 100" | bc -l )"
|
|
||||||
cluster_objects="$( $JQ_CMD -r '.total_objects' <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
|
|
||||||
echo "multigraph cluster_utilization"
|
|
||||||
echo "cluster_utilization.value ${pct_utilization}"
|
|
||||||
echo "multigraph cluster_objects"
|
|
||||||
echo "cluster_objects.value ${cluster_objects}"
|
|
||||||
|
|
||||||
cluster_pool_count="$( $JQ_CMD -r '.pools[].name' <<<"${RADOS_JSON_OUTPUT}" | wc -l )"
|
|
||||||
echo "multigraph pool_rdbytes"
|
|
||||||
for id in $( seq 0 $(( ${cluster_pool_count} - 1 )) ); do
|
|
||||||
pool="$( $JQ_CMD -r ".pools[$id].name" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
pool_rdbytes="$( $JQ_CMD -r ".pools[$id].read_bytes" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
pool_wrbytes="$( $JQ_CMD -r ".pools[$id].write_bytes" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
echo "pool_rdbytes_${pool}.value ${pool_rdbytes}"
|
|
||||||
echo "pool_wrbytes_${pool}.value ${pool_wrbytes}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "multigraph pool_rdops"
|
|
||||||
for id in $( seq 0 $(( ${cluster_pool_count} - 1 )) ); do
|
|
||||||
pool="$( $JQ_CMD -r ".pools[$id].name" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
pool_rdops="$( $JQ_CMD -r ".pools[$id].read_ops" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
pool_wrops="$( $JQ_CMD -r ".pools[$id].write_ops" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
echo "pool_rdops_${pool}.value ${pool_rdops}"
|
|
||||||
echo "pool_wrops_${pool}.value ${pool_wrops}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "multigraph pool_objects_total"
|
|
||||||
for id in $( seq 0 $(( ${cluster_pool_count} - 1 )) ); do
|
|
||||||
pool="$( $JQ_CMD -r ".pools[$id].name" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
pool_objects="$( $JQ_CMD -r ".pools[$id].num_objects" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
echo "pool_objects_total_${pool}.value ${pool_objects}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "multigraph pool_objects_copies"
|
|
||||||
for id in $( seq 0 $(( ${cluster_pool_count} - 1 )) ); do
|
|
||||||
pool="$( $JQ_CMD -r ".pools[$id].name" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
pool_copies="$( $JQ_CMD -r ".pools[$id].num_object_copies" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
echo "pool_objects_copies_${pool}.value ${pool_copies}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "multigraph pool_objects_degraded"
|
|
||||||
for id in $( seq 0 $(( ${cluster_pool_count} - 1 )) ); do
|
|
||||||
pool="$( $JQ_CMD -r ".pools[$id].name" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
pool_degraded="$( $JQ_CMD -r ".pools[$id].num_objects_degraded" <<<"${RADOS_JSON_OUTPUT}" )"
|
|
||||||
echo "pool_objects_degraded_${pool}.value ${pool_degraded}"
|
|
||||||
done
|
|
||||||
|
|
||||||
cluster_osd_count="$( $JQ_CMD -r '.nodes[].id' <<<"${OSD_JSON_OUTPUT}" | wc -l)"
|
|
||||||
echo "multigraph osd_status"
|
|
||||||
for id in $( seq 0 $(( ${cluster_osd_count} - 1 )) ); do
|
|
||||||
osd="$( $JQ_CMD -r ".nodes[$id].id" <<<"${OSD_JSON_OUTPUT}" )"
|
|
||||||
osd_status="$( $JQ_CMD -r ".nodes[$id].status" <<<"${OSD_JSON_OUTPUT}" )"
|
|
||||||
case ${osd_status} in
|
|
||||||
up)
|
|
||||||
osd_status="1"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
osd_status="0"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
echo "osd_status_${osd}.value ${osd_status}"
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "multigraph osd_utilization"
|
|
||||||
for id in $( seq 0 $(( ${cluster_osd_count} - 1 )) ); do
|
|
||||||
osd="$( $JQ_CMD -r ".nodes[$id].id" <<<"${OSD_JSON_OUTPUT}" )"
|
|
||||||
osd_utilization="$( $JQ_CMD -r ".nodes[$id].utilization" <<<"${OSD_JSON_OUTPUT}" )"
|
|
||||||
echo "osd_utilization_${osd}.value ${osd_utilization}"
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
case $# in
|
|
||||||
0)
|
|
||||||
output_values
|
|
||||||
;;
|
|
||||||
1)
|
|
||||||
case $1 in
|
|
||||||
autoconf)
|
|
||||||
output_autoconf
|
|
||||||
;;
|
|
||||||
config)
|
|
||||||
output_config
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
output_usage
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
output_usage
|
|
||||||
exit 1
|
|
||||||
esac
|
|
@ -2,8 +2,7 @@
|
|||||||
|
|
||||||
[Unit]
|
[Unit]
|
||||||
Description = Parallel Virtual Cluster node daemon
|
Description = Parallel Virtual Cluster node daemon
|
||||||
After = network.target
|
After = network-online.target
|
||||||
Wants = network-online.target
|
|
||||||
PartOf = pvc.target
|
PartOf = pvc.target
|
||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
|
@ -48,7 +48,7 @@ import re
|
|||||||
import json
|
import json
|
||||||
|
|
||||||
# Daemon version
|
# Daemon version
|
||||||
version = "0.9.45"
|
version = "0.9.42"
|
||||||
|
|
||||||
|
|
||||||
##########################################################
|
##########################################################
|
||||||
|
Reference in New Issue
Block a user