Compare commits
430 Commits
eca726f7a5
...
v0.9.72
Author | SHA1 | Date | |
---|---|---|---|
fdda47e8a2 | |||
a5ffe373cd | |||
bb2aac145d | |||
a7c1b91f60 | |||
ec6d3351b2 | |||
22faaa9bbc | |||
6c407d54c3 | |||
9ba7aa5b08 | |||
cb413e5ce6 | |||
123499f75f | |||
83b8ce7b62 | |||
5e43f9bd7c | |||
ed087d83c2 | |||
83d475bd15 | |||
3d5cdf2b23 | |||
705ec802a3 | |||
47d7b23763 | |||
0bae729a18 | |||
b968110e9f | |||
4898ae5082 | |||
249e1568a1 | |||
0b90f37518 | |||
33205273dc | |||
1e083d7652 | |||
65d2b7869c | |||
66aee73f1d | |||
075dbe7cc9 | |||
2ff7a6865b | |||
2002394a51 | |||
0e8bdfad15 | |||
b5f996febd | |||
3a4914fa5e | |||
dcda7b5748 | |||
ae7950e9b7 | |||
d769071799 | |||
e298d10561 | |||
fc8cf9ed44 | |||
4ccdd6347e | |||
b32f478633 | |||
cf442fcc2d | |||
b753f85410 | |||
d2bcaec28f | |||
a70273dbae | |||
30ebd6b42c | |||
b2e6feeba3 | |||
c9b06ffdb2 | |||
a032dcc5c8 | |||
01122415f6 | |||
bd3e3829b3 | |||
e01bbe9764 | |||
3e7953531c | |||
c7b7ad0cf7 | |||
776daac267 | |||
653b95ee25 | |||
59c9d89986 | |||
e294e1c087 | |||
4685ba1ec4 | |||
969091ed22 | |||
148f04b256 | |||
dc9e43fbee | |||
d8dcec254d | |||
3a90fda109 | |||
78322f4de4 | |||
c1782c5004 | |||
9114255af5 | |||
b26bb5cb65 | |||
74c4ce3ec7 | |||
2c3a3cdf52 | |||
0b583bfdaf | |||
7c07fbefff | |||
202dc3ed59 | |||
8667f4d03b | |||
4c2d99f8a6 | |||
bcff6650d0 | |||
a11206253d | |||
7f57c6dbf7 | |||
6865979e08 | |||
5126bc3272 | |||
765f0ef13d | |||
fe258d9d56 | |||
93d89a2414 | |||
a49f3810d3 | |||
45ad3b9a17 | |||
07623fad1a | |||
8331b7ecd8 | |||
94d4ee5b9b | |||
e773211293 | |||
32c36c866b | |||
dc4e56db4b | |||
e45b3108a2 | |||
118237a53b | |||
9805681f94 | |||
6c9abb2abe | |||
a1122c6e71 | |||
3696f81597 | |||
5ca0d903b6 | |||
6ddbde763e | |||
626424b74a | |||
b3d99827f5 | |||
c9ceb3159b | |||
6525a2568b | |||
09a005d3d7 | |||
96defebd0b | |||
d00b8aa6cd | |||
e9aa545e9b | |||
fb0fcc0597 | |||
3009f24910 | |||
5ae836f1c5 | |||
70ba364f1d | |||
eda1b95d5f | |||
3bd93563e6 | |||
1f8561d59a | |||
a2efc83953 | |||
f2d2537e1c | |||
1093ca6264 | |||
15ff729f83 | |||
29584e5636 | |||
f4e8449356 | |||
388f6556c0 | |||
ec79acf061 | |||
6c7be492b8 | |||
00586074cf | |||
f4eef30770 | |||
8565cf26b3 | |||
0ecf219910 | |||
0f4edc54d1 | |||
ca91be51e1 | |||
e29d0e89eb | |||
14d29f2986 | |||
bc88d764b0 | |||
a3c31564ca | |||
b07396c39a | |||
71139fa66d | |||
e6f9e6e0e8 | |||
1ea4800212 | |||
9c14d84bfc | |||
d8f346abdd | |||
2ee52e44d3 | |||
3c742a827b | |||
aeb238f43c | |||
671a907236 | |||
e945fd8590 | |||
a49510ecc8 | |||
6d7730ab52 | |||
8135426973 | |||
20d436a745 | |||
28f6819726 | |||
35c07f0384 | |||
6127387be4 | |||
343d66875b | |||
92feeefd26 | |||
38d63d9837 | |||
095bcb2373 | |||
91e450f399 | |||
79eb994a5e | |||
d65f512897 | |||
8af7189dd0 | |||
ea7a4b2b85 | |||
59f97ebbfb | |||
072337f1f0 | |||
c3bc55eff8 | |||
6c58d52fa1 | |||
666e02fbfd | |||
46dde055c4 | |||
ef437c3dbf | |||
bd2208e8f6 | |||
62d5ff11df | |||
0019881cfa | |||
d46133802b | |||
fcadde057e | |||
2608f38d64 | |||
89f05ced3f | |||
729481126c | |||
41eccb9c7d | |||
e550e39a5a | |||
dff156b2b0 | |||
1c4fb80d1f | |||
ec7beb08cc | |||
3a180193ee | |||
e26ff8a975 | |||
6276414702 | |||
a34d64a71b | |||
71297e0179 | |||
45c9909428 | |||
7268592c87 | |||
726d0a562b | |||
39e1fc50ed | |||
7a3870fc44 | |||
bffab7a5a1 | |||
6cbaeb5dc8 | |||
58ce133c8d | |||
43feb33caa | |||
3a5d8c61da | |||
1e0b502250 | |||
fe17d28385 | |||
8aaac33056 | |||
cc7952c232 | |||
16915ed507 | |||
2c624ceb2c | |||
da85480488 | |||
47b0704555 | |||
7c49967586 | |||
e3f96ac87e | |||
4df70cf086 | |||
f1df1cfe93 | |||
5942aa50fc | |||
096bcdfd75 | |||
239c392892 | |||
172d0a86e4 | |||
d8e57a26c5 | |||
9b499b9f48 | |||
881550b610 | |||
2a21d48128 | |||
8d0f26ff7a | |||
bcabd7d079 | |||
05a316cdd6 | |||
4b36753f27 | |||
171f6ac9ed | |||
645b525ad7 | |||
ec559aec0d | |||
71ffd5a191 | |||
2739c27299 | |||
56129a3636 | |||
932b3c55a3 | |||
92e2ff7449 | |||
d8d3feee22 | |||
b1357cafdb | |||
f8cdcb30ba | |||
51ad2058ed | |||
c401a1f655 | |||
7a40c7a55b | |||
8027a6efdc | |||
3801fcc07b | |||
c741900baf | |||
464f0e0356 | |||
cea8832f90 | |||
5807351405 | |||
d6ca74376a | |||
413100a147 | |||
4d698be34b | |||
53aed0a735 | |||
ea709f573f | |||
1142454934 | |||
bbfad340a1 | |||
c73939e1c5 | |||
25fe45dd28 | |||
58d57d7037 | |||
00d2c67c41 | |||
67131de4f6 | |||
abc23ebb18 | |||
9f122e916f | |||
3ce4d90693 | |||
6ccd19e636 | |||
d8689e6eaa | |||
bc49b5eca2 | |||
8470dfaa29 | |||
f164d898c1 | |||
195f31501c | |||
a8899a1d66 | |||
817dffcf30 | |||
eda2a57a73 | |||
135d28e60b | |||
e7d7378bae | |||
799c3e8d5d | |||
d0ec24f690 | |||
6e9fcd38a3 | |||
f51f9fc4c8 | |||
a6dcffc737 | |||
364c190106 | |||
ea19af6494 | |||
7069d3237c | |||
619c3f7ff5 | |||
8a75bb3011 | |||
a817c3e678 | |||
0cc3f2deab | |||
21b4bbe51a | |||
87ec31c023 | |||
0d857d5ab8 | |||
006f40f195 | |||
5f193a6134 | |||
78faa90139 | |||
23b1501f40 | |||
66bfad3109 | |||
eee5c25d6f | |||
ff4fc18a60 | |||
ac885b855a | |||
b9c30baf80 | |||
9b12cc0236 | |||
c41664d2da | |||
3779bc960e | |||
5c620262e9 | |||
6b88fbd1e3 | |||
a50c8e6a4d | |||
7d6e4353f1 | |||
bf30b31db6 | |||
70bd601dc1 | |||
2e7b9b28b3 | |||
12eef58d42 | |||
f2e6892fd2 | |||
91fb9e1241 | |||
d87bea4159 | |||
3a6f442856 | |||
dfca998adf | |||
55f397a347 | |||
dfebb2d3e5 | |||
e88147db4a | |||
b8204d89ac | |||
fe73dfbdc9 | |||
8f906c1f81 | |||
2d9fb9688d | |||
fb84685c2a | |||
032ba44d9c | |||
b7761877e7 | |||
1fe07640b3 | |||
b8d843ebe4 | |||
95d983ddff | |||
4c5da1b6a8 | |||
be6b1e02e3 | |||
ec2a72ed4b | |||
b06e327add | |||
d1f32d2b9c | |||
3f78ca1cc9 | |||
e866335918 | |||
221494ed1b | |||
f13cc04b89 | |||
4ed537ee3b | |||
95e01f38d5 | |||
3122d73bf5 | |||
7ed8ef179c | |||
caead02b2a | |||
87bc5f93e6 | |||
203893559e | |||
2c51bb0705 | |||
46d3daf686 | |||
e9d05aa24e | |||
d2c18d7b46 | |||
6ce28c43af | |||
87cda72ca9 | |||
8f71a6d2f6 | |||
c45f8f5bd5 | |||
24de0f4189 | |||
3690a2c1e0 | |||
50d8aa0586 | |||
db6e65712d | |||
cf8e16543c | |||
1a4fcdcc2d | |||
9a71db0800 | |||
6ee4c55071 | |||
c27359c4bf | |||
46078932c3 | |||
c89699bc6f | |||
1b9507e4f5 | |||
3db7ac48f4 | |||
1830ec6465 | |||
bdb9db8375 | |||
c61d7bc313 | |||
c0f7ba0125 | |||
761032b321 | |||
3566e13e79 | |||
6b324029cf | |||
13eeabf44b | |||
d86768d3d0 | |||
a167757600 | |||
a95d9680ac | |||
63962f10ba | |||
a7a681d92a | |||
da9248cfa2 | |||
aa035a61a7 | |||
7c8ba56561 | |||
bba73980de | |||
32b3af697c | |||
7c122ac921 | |||
0dbf139706 | |||
c909beaf6d | |||
2da49297d2 | |||
0ff9a6b8c4 | |||
28377178d2 | |||
e06b114c48 | |||
0058f19d88 | |||
056cf3740d | |||
58f174b87b | |||
37b98fd54f | |||
f83a345bfe | |||
ce06e4d81b | |||
23977b04fc | |||
bb1cca522f | |||
9a4dce4e4c | |||
f6f6f07488 | |||
142c999ce8 | |||
1de069298c | |||
55221b3d97 | |||
0d72798814 | |||
3638efc77e | |||
c2c888d684 | |||
febef2e406 | |||
2a4f38e933 | |||
3b805cdc34 | |||
06f0f7ed91 | |||
fd040ab45a | |||
e23e2dd9bf | |||
ee4266f8ca | |||
0f02c5eaef | |||
075abec5fe | |||
3a1cbf8d01 | |||
a438a4155a | |||
65df807b09 | |||
d0f3e9e285 | |||
adc8a5a3bc | |||
df277edf1c | |||
772807deb3 | |||
58db537093 | |||
e71a6c90bf | |||
a8e9a56924 | |||
f3fb492633 | |||
e962743e51 | |||
46f1d761f6 | |||
be954c1625 | |||
fb46f5f9e9 | |||
694b8e85a0 | |||
eb321497ee | |||
5b81e59481 | |||
a4c0e0befd | |||
a18cef5f25 | |||
f6c5aa9992 | |||
ffa3dd5edb | |||
afb0359c20 | |||
afdf254297 | |||
42e776fac1 | |||
dae67a1b7b | |||
b86f8c1e09 |
4
.flake8
4
.flake8
@ -3,7 +3,9 @@
|
||||
# * W503 (line break before binary operator): Black moves these to new lines
|
||||
# * E501 (line too long): Long lines are a fact of life in comment blocks; Black handles active instances of this
|
||||
# * E203 (whitespace before ':'): Black recommends this as disabled
|
||||
ignore = W503, E501
|
||||
# * F403 (import * used; unable to detect undefined names): We use a wildcard for helpers
|
||||
# * F405 (possibly undefined name): We use a wildcard for helpers
|
||||
ignore = W503, E501, F403, F405
|
||||
extend-ignore = E203
|
||||
# We exclude the Debian, migrations, and provisioner examples
|
||||
exclude = debian,api-daemon/migrations/versions,api-daemon/provisioner/examples,node-daemon/monitoring
|
||||
|
56
CHANGELOG.md
56
CHANGELOG.md
@ -1,5 +1,61 @@
|
||||
## PVC Changelog
|
||||
|
||||
###### [v0.9.72](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.72)
|
||||
|
||||
* [CLI] Restores old functionality for default node value
|
||||
|
||||
###### [v0.9.71](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.71)
|
||||
|
||||
* [API] Adds API support for Debian Bookworm
|
||||
|
||||
###### [v0.9.70](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.70)
|
||||
|
||||
* [Node Daemon] Fixes several compatibility issues for Debian 12 "Bookworm"
|
||||
|
||||
###### [v0.9.69](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.69)
|
||||
|
||||
* [Node Daemon] Ensures that system load is always 2 decimal places on Bookworm
|
||||
* [Node Daemon] Fixes bug blocking primary takeover at DNS Aggregator start if Patroni is down
|
||||
|
||||
###### [v0.9.68](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.68)
|
||||
|
||||
* [CLI] Fixes another bug with network info view
|
||||
|
||||
###### [v0.9.67](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.67)
|
||||
|
||||
* [CLI] Fixes several more bugs in the refactored CLI
|
||||
|
||||
###### [v0.9.66](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.66)
|
||||
|
||||
* [CLI] Fixes a missing YAML import in CLI
|
||||
|
||||
###### [v0.9.65](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.65)
|
||||
|
||||
* [CLI] Fixes a bug in the node list filtering command
|
||||
* [CLI] Fixes a bug/default when no connection is specified
|
||||
|
||||
###### [v0.9.64](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.64)
|
||||
|
||||
**Breaking Change [CLI]**: The CLI client root commands have been reorganized. The following commands have changed:
|
||||
|
||||
* `pvc cluster` -> `pvc connection` (all subcommands)
|
||||
* `pvc task` -> `pvc cluster` (all subcommands)
|
||||
* `pvc maintenance` -> `pvc cluster maintenance`
|
||||
* `pvc status` -> `pvc cluster status`
|
||||
|
||||
Ensure you have updated to the latest version of the PVC Ansible repository before deploying this version or using PVC Ansible oneshot playbooks for management.
|
||||
|
||||
**Breaking Change [CLI]**: The `--restart` option for VM configuration changes now has an explicit `--no-restart` to disable restarting, or a prompt if neither is specified; `--unsafe` no longer bypasses this prompt which was a bug. Applies to most `vm <cmd> set` commands like `vm vcpu set`, `vm memory set`, etc. All instances also feature restart confirmation afterwards, which, if `--restart` is provided, will prompt for confirmation unless `--yes` or `--unsafe` is specified.
|
||||
|
||||
**Breaking Change [CLI]**: The `--long` option previously on some `info` commands no longer exists; use `-f long`/`--format long` instead.
|
||||
|
||||
* [CLI] Significantly refactors the CLI client code for consistency and cleanliness
|
||||
* [CLI] Implements `-f`/`--format` options for all `list` and `info` commands in a consistent way
|
||||
* [CLI] Changes the behaviour of VM modification options with "--restart" to provide a "--no-restart"; defaults to a prompt if neither is specified and ignores the "--unsafe" global entirely
|
||||
* [API] Fixes several bugs in the 3-debootstrap.py provisioner example script
|
||||
* [Node] Fixes some bugs around VM shutdown on node flush
|
||||
* [Documentation] Adds mentions of Ganeti and Harvester
|
||||
|
||||
###### [v0.9.63](https://github.com/parallelvirtualcluster/pvc/releases/tag/v0.9.63)
|
||||
|
||||
* Mentions Ganeti in the docs
|
||||
|
@ -441,7 +441,7 @@ class VMBuilderScript(VMBuilder):
|
||||
|
||||
# The directory we mounted things on earlier during prepare(); this could very well
|
||||
# be exposed as a module-level variable if you so choose
|
||||
temporary_directory = "/tmp/target"
|
||||
temp_dir = "/tmp/target"
|
||||
|
||||
# Use these convenient aliases for later (avoiding lots of "self.vm_data" everywhere)
|
||||
vm_name = self.vm_name
|
||||
@ -469,6 +469,8 @@ class VMBuilderScript(VMBuilder):
|
||||
"grub-pc",
|
||||
"cloud-init",
|
||||
"python3-cffi-backend",
|
||||
"acpid",
|
||||
"acpi-support-base",
|
||||
"wget",
|
||||
]
|
||||
|
||||
@ -482,17 +484,17 @@ class VMBuilderScript(VMBuilder):
|
||||
|
||||
# Perform a debootstrap installation
|
||||
print(
|
||||
f"Installing system with debootstrap: debootstrap --include={','.join(deb_packages)} {deb_release} {temporary_directory} {deb_mirror}"
|
||||
f"Installing system with debootstrap: debootstrap --include={','.join(deb_packages)} {deb_release} {temp_dir} {deb_mirror}"
|
||||
)
|
||||
os.system(
|
||||
f"debootstrap --include={','.join(deb_packages)} {deb_release} {temporary_directory} {deb_mirror}"
|
||||
f"debootstrap --include={','.join(deb_packages)} {deb_release} {temp_dir} {deb_mirror}"
|
||||
)
|
||||
|
||||
# Bind mount the devfs so we can grub-install later
|
||||
os.system("mount --bind /dev {}/dev".format(temporary_directory))
|
||||
os.system("mount --bind /dev {}/dev".format(temp_dir))
|
||||
|
||||
# Create an fstab entry for each volume
|
||||
fstab_file = "{}/etc/fstab".format(temporary_directory)
|
||||
fstab_file = "{}/etc/fstab".format(temp_dir)
|
||||
# The volume ID starts at zero and increments by one for each volume in the fixed-order
|
||||
# volume list. This lets us work around the insanity of Libvirt IDs not matching guest IDs,
|
||||
# while still letting us have some semblance of control here without enforcing things
|
||||
@ -537,13 +539,13 @@ class VMBuilderScript(VMBuilder):
|
||||
volume_id += 1
|
||||
|
||||
# Write the hostname; you could also take an FQDN argument for this as an example
|
||||
hostname_file = "{}/etc/hostname".format(temporary_directory)
|
||||
hostname_file = "{}/etc/hostname".format(temp_dir)
|
||||
with open(hostname_file, "w") as fh:
|
||||
fh.write("{}".format(vm_name))
|
||||
|
||||
# Fix the cloud-init.target since it's broken by default in Debian 11
|
||||
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
|
||||
temporary_directory
|
||||
temp_dir
|
||||
)
|
||||
with open(cloudinit_target_file, "w") as fh:
|
||||
# We lose our indent on these raw blocks to preserve the apperance of the files
|
||||
@ -557,7 +559,7 @@ After=multi-user.target
|
||||
fh.write(data)
|
||||
|
||||
# Write the cloud-init configuration
|
||||
ci_cfg_file = "{}/etc/cloud/cloud.cfg".format(temporary_directory)
|
||||
ci_cfg_file = "{}/etc/cloud/cloud.cfg".format(temp_dir)
|
||||
with open(ci_cfg_file, "w") as fh:
|
||||
fh.write(
|
||||
"""
|
||||
@ -618,15 +620,15 @@ After=multi-user.target
|
||||
- arches: [default]
|
||||
failsafe:
|
||||
primary: {deb_mirror}
|
||||
"""
|
||||
).format(deb_mirror=deb_mirror)
|
||||
""".format(
|
||||
deb_mirror=deb_mirror
|
||||
)
|
||||
)
|
||||
|
||||
# Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
|
||||
# will always be on PCI bus ID 2, hence the name "ens2".
|
||||
# Write a DHCP stanza for ens2
|
||||
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(
|
||||
temporary_directory
|
||||
)
|
||||
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(temp_dir)
|
||||
with open(ens2_network_file, "w") as fh:
|
||||
data = """auto ens2
|
||||
iface ens2 inet dhcp
|
||||
@ -634,7 +636,7 @@ iface ens2 inet dhcp
|
||||
fh.write(data)
|
||||
|
||||
# Write the DHCP config for ens2
|
||||
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
|
||||
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temp_dir)
|
||||
with open(dhclient_file, "w") as fh:
|
||||
# We can use fstrings too, since PVC will always have Python 3.6+, though
|
||||
# using format() might be preferable for clarity in some situations
|
||||
@ -654,7 +656,7 @@ interface "ens2" {{
|
||||
fh.write(data)
|
||||
|
||||
# Write the GRUB configuration
|
||||
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
|
||||
grubcfg_file = "{}/etc/default/grub".format(temp_dir)
|
||||
with open(grubcfg_file, "w") as fh:
|
||||
data = """# Written by the PVC provisioner
|
||||
GRUB_DEFAULT=0
|
||||
@ -671,7 +673,7 @@ GRUB_DISABLE_LINUX_UUID=false
|
||||
fh.write(data)
|
||||
|
||||
# Do some tasks inside the chroot using the provided context manager
|
||||
with chroot(temporary_directory):
|
||||
with chroot(temp_dir):
|
||||
# Install and update GRUB
|
||||
os.system(
|
||||
"grub-install --force /dev/rbd/{}/{}_{}".format(
|
||||
@ -704,16 +706,17 @@ GRUB_DISABLE_LINUX_UUID=false
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
import os
|
||||
from pvcapid.vmbuilder import open_zk
|
||||
from pvcapid.Daemon import config
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
# Set the tempdir we used in the prepare() and install() steps
|
||||
# Set the temp_dir we used in the prepare() and install() steps
|
||||
temp_dir = "/tmp/target"
|
||||
|
||||
# Unmount the bound devfs
|
||||
os.system("umount {}/dev".format(temporary_directory))
|
||||
os.system("umount {}/dev".format(temp_dir))
|
||||
|
||||
# Use this construct for reversing the list, as the normal reverse() messes with the list
|
||||
for volume in list(reversed(self.vm_data["volumes"])):
|
||||
|
@ -11,5 +11,16 @@ if [[ ! -f ${PVC_CONFIG_FILE} ]]; then
|
||||
fi
|
||||
|
||||
pushd /usr/share/pvc
|
||||
./pvcapid-manage.py db upgrade
|
||||
|
||||
case "$( cat /etc/debian_version )" in
|
||||
10.*|11.*)
|
||||
# Debian 10 & 11
|
||||
./pvcapid-manage_legacy.py db upgrade
|
||||
;;
|
||||
*)
|
||||
# Debian 12+
|
||||
flask --app ./pvcapid-manage_flask.py db upgrade
|
||||
;;
|
||||
esac
|
||||
|
||||
popd
|
||||
|
29
api-daemon/pvcapid-manage_flask.py
Executable file
29
api-daemon/pvcapid-manage_flask.py
Executable file
@ -0,0 +1,29 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# pvcapid-manage_flask.py - PVC Database management tasks (via Flask CLI)
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
from pvcapid.flaskapi import app, db
|
||||
from pvcapid.models import * # noqa F401,F403
|
||||
|
||||
from flask_migrate import Migrate
|
||||
|
||||
migrate = Migrate(app, db)
|
||||
|
||||
# Call flask --app /usr/share/pvc/pvcapid-manage_flask.py db upgrade
|
@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# manage.py - PVC Database management tasks
|
||||
# pvcapid-manage_legacy.py - PVC Database management tasks (Legacy)
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
@ -19,8 +19,7 @@
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
from flask_migrate import Migrate, MigrateCommand
|
||||
from flask_script import Manager
|
||||
from flask_migrate import Migrate, MigrateCommand, Manager
|
||||
|
||||
from pvcapid.flaskapi import app, db
|
||||
from pvcapid.models import * # noqa F401,F403
|
@ -27,12 +27,8 @@ case "$( cat /etc/debian_version )" in
|
||||
10.*)
|
||||
CELERY_ARGS="worker --app pvcapid.flaskapi.celery --concurrency 1 --loglevel INFO"
|
||||
;;
|
||||
11.*)
|
||||
CELERY_ARGS="--app pvcapid.flaskapi.celery worker --concurrency 1 --loglevel INFO"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid Debian version found!"
|
||||
exit 1
|
||||
CELERY_ARGS="--app pvcapid.flaskapi.celery worker --concurrency 1 --loglevel INFO"
|
||||
;;
|
||||
esac
|
||||
|
||||
|
@ -27,7 +27,7 @@ from ssl import SSLContext, TLSVersion
|
||||
from distutils.util import strtobool as dustrtobool
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.63"
|
||||
version = "0.9.72"
|
||||
|
||||
# API version
|
||||
API_VERSION = 1.0
|
||||
|
@ -1,116 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# cluster.py - PVC CLI client function library, cluster management
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import json
|
||||
|
||||
from pvc.lib.common import call_api
|
||||
|
||||
|
||||
def initialize(config, overwrite=False):
|
||||
"""
|
||||
Initialize the PVC cluster
|
||||
|
||||
API endpoint: GET /api/v1/initialize
|
||||
API arguments: overwrite, yes-i-really-mean-it
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
params = {"yes-i-really-mean-it": "yes", "overwrite": overwrite}
|
||||
response = call_api(config, "post", "/initialize", params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def backup(config):
|
||||
"""
|
||||
Get a JSON backup of the cluster
|
||||
|
||||
API endpoint: GET /api/v1/backup
|
||||
API arguments:
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
response = call_api(config, "get", "/backup")
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
|
||||
def restore(config, cluster_data):
|
||||
"""
|
||||
Restore a JSON backup to the cluster
|
||||
|
||||
API endpoint: POST /api/v1/restore
|
||||
API arguments: yes-i-really-mean-it
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
cluster_data_json = json.dumps(cluster_data)
|
||||
|
||||
params = {"yes-i-really-mean-it": "yes"}
|
||||
data = {"cluster_data": cluster_data_json}
|
||||
response = call_api(config, "post", "/restore", params=params, data=data)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def maintenance_mode(config, state):
|
||||
"""
|
||||
Enable or disable PVC cluster maintenance mode
|
||||
|
||||
API endpoint: POST /api/v1/status
|
||||
API arguments: {state}={state}
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
params = {"state": state}
|
||||
response = call_api(config, "post", "/status", params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def get_info(config):
|
||||
"""
|
||||
Get status of the PVC cluster
|
||||
|
||||
API endpoint: GET /api/v1/status
|
||||
API arguments:
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
response = call_api(config, "get", "/status")
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get("message", "")
|
33
client-cli-old/pvc.py
Executable file
33
client-cli-old/pvc.py
Executable file
@ -0,0 +1,33 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# pvc.py - PVC client command-line interface (stub testing interface)
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import pvc.pvc
|
||||
|
||||
|
||||
#
|
||||
# Main entry point
|
||||
#
|
||||
def main():
|
||||
return pvc.pvc.cli(obj={})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
313
client-cli-old/pvc/lib/cluster.py
Normal file
313
client-cli-old/pvc/lib/cluster.py
Normal file
@ -0,0 +1,313 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# cluster.py - PVC CLI client function library, cluster management
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import json
|
||||
|
||||
import pvc.lib.ansiprint as ansiprint
|
||||
from pvc.lib.common import call_api
|
||||
|
||||
|
||||
def initialize(config, overwrite=False):
|
||||
"""
|
||||
Initialize the PVC cluster
|
||||
|
||||
API endpoint: GET /api/v1/initialize
|
||||
API arguments: overwrite, yes-i-really-mean-it
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
params = {"yes-i-really-mean-it": "yes", "overwrite": overwrite}
|
||||
response = call_api(config, "post", "/initialize", params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def backup(config):
|
||||
"""
|
||||
Get a JSON backup of the cluster
|
||||
|
||||
API endpoint: GET /api/v1/backup
|
||||
API arguments:
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
response = call_api(config, "get", "/backup")
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
|
||||
def restore(config, cluster_data):
|
||||
"""
|
||||
Restore a JSON backup to the cluster
|
||||
|
||||
API endpoint: POST /api/v1/restore
|
||||
API arguments: yes-i-really-mean-it
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
cluster_data_json = json.dumps(cluster_data)
|
||||
|
||||
params = {"yes-i-really-mean-it": "yes"}
|
||||
data = {"cluster_data": cluster_data_json}
|
||||
response = call_api(config, "post", "/restore", params=params, data=data)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def maintenance_mode(config, state):
|
||||
"""
|
||||
Enable or disable PVC cluster maintenance mode
|
||||
|
||||
API endpoint: POST /api/v1/status
|
||||
API arguments: {state}={state}
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
params = {"state": state}
|
||||
response = call_api(config, "post", "/status", params=params)
|
||||
|
||||
if response.status_code == 200:
|
||||
retstatus = True
|
||||
else:
|
||||
retstatus = False
|
||||
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def get_info(config):
|
||||
"""
|
||||
Get status of the PVC cluster
|
||||
|
||||
API endpoint: GET /api/v1/status
|
||||
API arguments:
|
||||
API schema: {json_data_object}
|
||||
"""
|
||||
response = call_api(config, "get", "/status")
|
||||
|
||||
if response.status_code == 200:
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
|
||||
def format_info(cluster_information, oformat):
|
||||
if oformat == "json":
|
||||
return json.dumps(cluster_information)
|
||||
|
||||
if oformat == "json-pretty":
|
||||
return json.dumps(cluster_information, indent=4)
|
||||
|
||||
# Plain formatting, i.e. human-readable
|
||||
if (
|
||||
cluster_information.get("maintenance") == "true"
|
||||
or cluster_information.get("cluster_health", {}).get("health", "N/A") == "N/A"
|
||||
):
|
||||
health_colour = ansiprint.blue()
|
||||
elif cluster_information.get("cluster_health", {}).get("health", 100) > 90:
|
||||
health_colour = ansiprint.green()
|
||||
elif cluster_information.get("cluster_health", {}).get("health", 100) > 50:
|
||||
health_colour = ansiprint.yellow()
|
||||
else:
|
||||
health_colour = ansiprint.red()
|
||||
|
||||
ainformation = []
|
||||
|
||||
ainformation.append(
|
||||
"{}PVC cluster status:{}".format(ansiprint.bold(), ansiprint.end())
|
||||
)
|
||||
ainformation.append("")
|
||||
|
||||
health_text = (
|
||||
f"{cluster_information.get('cluster_health', {}).get('health', 'N/A')}"
|
||||
)
|
||||
if health_text != "N/A":
|
||||
health_text += "%"
|
||||
if cluster_information.get("maintenance") == "true":
|
||||
health_text += " (maintenance on)"
|
||||
|
||||
ainformation.append(
|
||||
"{}Cluster health:{} {}{}{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
health_colour,
|
||||
health_text,
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
if cluster_information.get("cluster_health", {}).get("messages"):
|
||||
health_messages = "\n > ".join(
|
||||
sorted(cluster_information["cluster_health"]["messages"])
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Health messages:{} > {}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
health_messages,
|
||||
)
|
||||
)
|
||||
else:
|
||||
ainformation.append(
|
||||
"{}Health messages:{} N/A".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
|
||||
if oformat == "short":
|
||||
return "\n".join(ainformation)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(
|
||||
"{}Primary node:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["primary_node"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}PVC version:{} {}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information.get("pvc_version", "N/A"),
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Cluster upstream IP:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["upstream_ip"]
|
||||
)
|
||||
)
|
||||
ainformation.append("")
|
||||
ainformation.append(
|
||||
"{}Total nodes:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["nodes"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total VMs:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["vms"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total networks:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["networks"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total OSDs:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["osds"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total pools:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["pools"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total volumes:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["volumes"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total snapshots:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["snapshots"]
|
||||
)
|
||||
)
|
||||
|
||||
nodes_string = "{}Nodes:{} {}/{} {}ready,run{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information["nodes"].get("run,ready", 0),
|
||||
cluster_information["nodes"].get("total", 0),
|
||||
ansiprint.green(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
for state, count in cluster_information["nodes"].items():
|
||||
if state == "total" or state == "run,ready":
|
||||
continue
|
||||
|
||||
nodes_string += " {}/{} {}{}{}".format(
|
||||
count,
|
||||
cluster_information["nodes"]["total"],
|
||||
ansiprint.yellow(),
|
||||
state,
|
||||
ansiprint.end(),
|
||||
)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(nodes_string)
|
||||
|
||||
vms_string = "{}VMs:{} {}/{} {}start{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information["vms"].get("start", 0),
|
||||
cluster_information["vms"].get("total", 0),
|
||||
ansiprint.green(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
for state, count in cluster_information["vms"].items():
|
||||
if state == "total" or state == "start":
|
||||
continue
|
||||
|
||||
if state in ["disable", "migrate", "unmigrate", "provision"]:
|
||||
colour = ansiprint.blue()
|
||||
else:
|
||||
colour = ansiprint.yellow()
|
||||
|
||||
vms_string += " {}/{} {}{}{}".format(
|
||||
count, cluster_information["vms"]["total"], colour, state, ansiprint.end()
|
||||
)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(vms_string)
|
||||
|
||||
if cluster_information["osds"]["total"] > 0:
|
||||
osds_string = "{}Ceph OSDs:{} {}/{} {}up,in{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information["osds"].get("up,in", 0),
|
||||
cluster_information["osds"].get("total", 0),
|
||||
ansiprint.green(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
for state, count in cluster_information["osds"].items():
|
||||
if state == "total" or state == "up,in":
|
||||
continue
|
||||
|
||||
osds_string += " {}/{} {}{}{}".format(
|
||||
count,
|
||||
cluster_information["osds"]["total"],
|
||||
ansiprint.yellow(),
|
||||
state,
|
||||
ansiprint.end(),
|
||||
)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(osds_string)
|
||||
|
||||
ainformation.append("")
|
||||
return "\n".join(ainformation)
|
@ -52,7 +52,7 @@ def node_coordinator_state(config, node, action):
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def node_domain_state(config, node, action):
|
||||
def node_domain_state(config, node, action, wait):
|
||||
"""
|
||||
Set node domain state state (flush/ready)
|
||||
|
||||
@ -60,7 +60,7 @@ def node_domain_state(config, node, action):
|
||||
API arguments: action={action}, wait={wait}
|
||||
API schema: {"message": "{data}"}
|
||||
"""
|
||||
params = {"state": action}
|
||||
params = {"state": action, "wait": str(wait).lower()}
|
||||
response = call_api(
|
||||
config, "post", "/node/{node}/domain-state".format(node=node), params=params
|
||||
)
|
||||
@ -442,9 +442,12 @@ def format_info(node_information, long_output):
|
||||
return "\n".join(ainformation)
|
||||
|
||||
|
||||
def format_list(node_list):
|
||||
if node_list == "Node not found.":
|
||||
return node_list
|
||||
def format_list(node_list, raw):
|
||||
if raw:
|
||||
ainformation = list()
|
||||
for node in sorted(item["name"] for item in node_list):
|
||||
ainformation.append(node)
|
||||
return "\n".join(ainformation)
|
||||
|
||||
node_list_output = []
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -21,6 +21,33 @@
|
||||
|
||||
from pvc.lib.node import format_info as node_format_info
|
||||
from pvc.lib.node import format_list as node_format_list
|
||||
from pvc.lib.vm import format_vm_tags as vm_format_tags
|
||||
from pvc.lib.vm import format_vm_vcpus as vm_format_vcpus
|
||||
from pvc.lib.vm import format_vm_memory as vm_format_memory
|
||||
from pvc.lib.vm import format_vm_networks as vm_format_networks
|
||||
from pvc.lib.vm import format_vm_volumes as vm_format_volumes
|
||||
from pvc.lib.vm import format_info as vm_format_info
|
||||
from pvc.lib.vm import format_list as vm_format_list
|
||||
from pvc.lib.network import format_info as network_format_info
|
||||
from pvc.lib.network import format_list as network_format_list
|
||||
from pvc.lib.network import format_list_dhcp as network_format_dhcp_list
|
||||
from pvc.lib.network import format_list_acl as network_format_acl_list
|
||||
from pvc.lib.network import format_list_sriov_pf as network_format_sriov_pf_list
|
||||
from pvc.lib.network import format_info_sriov_vf as network_format_sriov_vf_info
|
||||
from pvc.lib.network import format_list_sriov_vf as network_format_sriov_vf_list
|
||||
from pvc.lib.storage import format_raw_output as storage_format_raw
|
||||
from pvc.lib.storage import format_info_benchmark as storage_format_benchmark_info
|
||||
from pvc.lib.storage import format_list_benchmark as storage_format_benchmark_list
|
||||
from pvc.lib.storage import format_list_osd as storage_format_osd_list
|
||||
from pvc.lib.storage import format_list_pool as storage_format_pool_list
|
||||
from pvc.lib.storage import format_list_volume as storage_format_volume_list
|
||||
from pvc.lib.storage import format_list_snapshot as storage_format_snapshot_list
|
||||
from pvc.lib.provisioner import format_list_template as provisioner_format_template_list
|
||||
from pvc.lib.provisioner import format_list_userdata as provisioner_format_userdata_list
|
||||
from pvc.lib.provisioner import format_list_script as provisioner_format_script_list
|
||||
from pvc.lib.provisioner import format_list_ova as provisioner_format_ova_list
|
||||
from pvc.lib.provisioner import format_list_profile as provisioner_format_profile_list
|
||||
from pvc.lib.provisioner import format_list_task as provisioner_format_task_status
|
||||
|
||||
|
||||
# Define colour values for use in formatters
|
||||
@ -36,7 +63,7 @@ ansii = {
|
||||
}
|
||||
|
||||
|
||||
def cli_cluster_status_format_pretty(data):
|
||||
def cli_cluster_status_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the full output of cli_cluster_status
|
||||
"""
|
||||
@ -132,6 +159,8 @@ def cli_cluster_status_format_pretty(data):
|
||||
|
||||
vms_strings = list()
|
||||
for state in vm_states:
|
||||
if data.get("vms", {}).get(state) is None:
|
||||
continue
|
||||
if state in ["start"]:
|
||||
state_colour = ansii["green"]
|
||||
elif state in ["migrate", "disable"]:
|
||||
@ -188,7 +217,7 @@ def cli_cluster_status_format_pretty(data):
|
||||
return "\n".join(output)
|
||||
|
||||
|
||||
def cli_cluster_status_format_short(data):
|
||||
def cli_cluster_status_format_short(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the health-only output of cli_cluster_status
|
||||
"""
|
||||
@ -233,7 +262,7 @@ def cli_cluster_status_format_short(data):
|
||||
return "\n".join(output)
|
||||
|
||||
|
||||
def cli_connection_list_format_pretty(data):
|
||||
def cli_connection_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_connection_list
|
||||
"""
|
||||
@ -305,7 +334,7 @@ def cli_connection_list_format_pretty(data):
|
||||
return "\n".join(output)
|
||||
|
||||
|
||||
def cli_connection_detail_format_pretty(data):
|
||||
def cli_connection_detail_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_connection_detail
|
||||
"""
|
||||
@ -425,25 +454,281 @@ def cli_connection_detail_format_pretty(data):
|
||||
return "\n".join(output)
|
||||
|
||||
|
||||
def cli_node_info_format_pretty(data):
|
||||
def cli_node_info_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the basic output of cli_node_info
|
||||
"""
|
||||
|
||||
return node_format_info(data, long_output=False)
|
||||
return node_format_info(CLI_CONFIG, data, long_output=False)
|
||||
|
||||
|
||||
def cli_node_info_format_long(data):
|
||||
def cli_node_info_format_long(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the full output of cli_node_info
|
||||
"""
|
||||
|
||||
return node_format_info(data, long_output=True)
|
||||
return node_format_info(CLI_CONFIG, data, long_output=True)
|
||||
|
||||
|
||||
def cli_node_list_format_pretty(data):
|
||||
def cli_node_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_node_list
|
||||
"""
|
||||
|
||||
return node_format_list(data)
|
||||
return node_format_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_vm_tag_get_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_vm_tag_get
|
||||
"""
|
||||
|
||||
return vm_format_tags(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_vm_vcpu_get_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_vm_vcpu_get
|
||||
"""
|
||||
|
||||
return vm_format_vcpus(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_vm_memory_get_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_vm_memory_get
|
||||
"""
|
||||
|
||||
return vm_format_memory(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_vm_network_get_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_vm_network_get
|
||||
"""
|
||||
|
||||
return vm_format_networks(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_vm_volume_get_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_vm_volume_get
|
||||
"""
|
||||
|
||||
return vm_format_volumes(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_vm_info_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the basic output of cli_vm_info
|
||||
"""
|
||||
|
||||
return vm_format_info(CLI_CONFIG, data, long_output=False)
|
||||
|
||||
|
||||
def cli_vm_info_format_long(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the full output of cli_vm_info
|
||||
"""
|
||||
|
||||
return vm_format_info(CLI_CONFIG, data, long_output=True)
|
||||
|
||||
|
||||
def cli_vm_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_vm_list
|
||||
"""
|
||||
|
||||
return vm_format_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_network_info_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the full output of cli_network_info
|
||||
"""
|
||||
|
||||
return network_format_info(CLI_CONFIG, data, long_output=True)
|
||||
|
||||
|
||||
def cli_network_info_format_long(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the full output of cli_network_info
|
||||
"""
|
||||
|
||||
return network_format_info(CLI_CONFIG, data, long_output=True)
|
||||
|
||||
|
||||
def cli_network_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_network_list
|
||||
"""
|
||||
|
||||
return network_format_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_network_dhcp_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_network_dhcp_list
|
||||
"""
|
||||
|
||||
return network_format_dhcp_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_network_acl_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_network_acl_list
|
||||
"""
|
||||
|
||||
return network_format_acl_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_network_sriov_pf_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_network_sriov_pf_list
|
||||
"""
|
||||
|
||||
return network_format_sriov_pf_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_network_sriov_vf_info_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_network_sriov_vf_info
|
||||
"""
|
||||
|
||||
return network_format_sriov_vf_info(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_network_sriov_vf_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_network_sriov_vf_list
|
||||
"""
|
||||
|
||||
return network_format_sriov_vf_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_storage_status_format_raw(CLI_CONFIG, data):
|
||||
"""
|
||||
Direct format the output of cli_storage_status
|
||||
"""
|
||||
|
||||
return storage_format_raw(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_storage_util_format_raw(CLI_CONFIG, data):
|
||||
"""
|
||||
Direct format the output of cli_storage_util
|
||||
"""
|
||||
|
||||
return storage_format_raw(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_storage_benchmark_info_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_storage_benchmark_info
|
||||
"""
|
||||
|
||||
return storage_format_benchmark_info(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_storage_benchmark_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_storage_benchmark_list
|
||||
"""
|
||||
|
||||
return storage_format_benchmark_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_storage_osd_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_storage_osd_list
|
||||
"""
|
||||
|
||||
return storage_format_osd_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_storage_pool_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_storage_pool_list
|
||||
"""
|
||||
|
||||
return storage_format_pool_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_storage_volume_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_storage_volume_list
|
||||
"""
|
||||
|
||||
return storage_format_volume_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_storage_snapshot_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_storage_snapshot_list
|
||||
"""
|
||||
|
||||
return storage_format_snapshot_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_provisioner_template_system_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_provisioner_template_system_list
|
||||
"""
|
||||
|
||||
return provisioner_format_template_list(CLI_CONFIG, data, template_type="system")
|
||||
|
||||
|
||||
def cli_provisioner_template_network_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_provisioner_template_network_list
|
||||
"""
|
||||
|
||||
return provisioner_format_template_list(CLI_CONFIG, data, template_type="network")
|
||||
|
||||
|
||||
def cli_provisioner_template_storage_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_provisioner_template_storage_list
|
||||
"""
|
||||
|
||||
return provisioner_format_template_list(CLI_CONFIG, data, template_type="storage")
|
||||
|
||||
|
||||
def cli_provisioner_userdata_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_provisioner_userdata_list
|
||||
"""
|
||||
|
||||
return provisioner_format_userdata_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_provisioner_script_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_provisioner_script_list
|
||||
"""
|
||||
|
||||
return provisioner_format_script_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_provisioner_ova_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_provisioner_ova_list
|
||||
"""
|
||||
|
||||
return provisioner_format_ova_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_provisioner_profile_list_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_provisioner_profile_list
|
||||
"""
|
||||
|
||||
return provisioner_format_profile_list(CLI_CONFIG, data)
|
||||
|
||||
|
||||
def cli_provisioner_status_format_pretty(CLI_CONFIG, data):
|
||||
"""
|
||||
Pretty format the output of cli_provisioner_status
|
||||
"""
|
||||
|
||||
return provisioner_format_task_status(CLI_CONFIG, data)
|
@ -20,6 +20,7 @@
|
||||
###############################################################################
|
||||
|
||||
from click import echo as click_echo
|
||||
from click import progressbar
|
||||
from distutils.util import strtobool
|
||||
from json import load as jload
|
||||
from json import dump as jdump
|
||||
@ -27,9 +28,12 @@ from os import chmod, environ, getpid, path
|
||||
from socket import gethostname
|
||||
from sys import argv
|
||||
from syslog import syslog, openlog, closelog, LOG_AUTH
|
||||
from time import sleep
|
||||
from yaml import load as yload
|
||||
from yaml import BaseLoader
|
||||
|
||||
import pvc.lib.provisioner
|
||||
|
||||
|
||||
DEFAULT_STORE_DATA = {"cfgfile": "/etc/pvc/pvcapid.yaml"}
|
||||
DEFAULT_STORE_FILENAME = "pvc.json"
|
||||
@ -178,3 +182,60 @@ def update_store(store_path, store_data):
|
||||
|
||||
with open(store_file, "w") as fh:
|
||||
jdump(store_data, fh, sort_keys=True, indent=4)
|
||||
|
||||
|
||||
def wait_for_provisioner(CLI_CONFIG, task_id):
|
||||
"""
|
||||
Wait for a provisioner task to complete
|
||||
"""
|
||||
|
||||
echo(CLI_CONFIG, f"Task ID: {task_id}")
|
||||
echo(CLI_CONFIG, "")
|
||||
|
||||
# Wait for the task to start
|
||||
echo(CLI_CONFIG, "Waiting for task to start...", newline=False)
|
||||
while True:
|
||||
sleep(1)
|
||||
task_status = pvc.lib.provisioner.task_status(
|
||||
CLI_CONFIG, task_id, is_watching=True
|
||||
)
|
||||
if task_status.get("state") != "PENDING":
|
||||
break
|
||||
echo(".", newline=False)
|
||||
echo(CLI_CONFIG, " done.")
|
||||
echo(CLI_CONFIG, "")
|
||||
|
||||
# Start following the task state, updating progress as we go
|
||||
total_task = task_status.get("total")
|
||||
with progressbar(length=total_task, show_eta=False) as bar:
|
||||
last_task = 0
|
||||
maxlen = 0
|
||||
while True:
|
||||
sleep(1)
|
||||
if task_status.get("state") != "RUNNING":
|
||||
break
|
||||
if task_status.get("current") > last_task:
|
||||
current_task = int(task_status.get("current"))
|
||||
bar.update(current_task - last_task)
|
||||
last_task = current_task
|
||||
# The extensive spaces at the end cause this to overwrite longer previous messages
|
||||
curlen = len(str(task_status.get("status")))
|
||||
if curlen > maxlen:
|
||||
maxlen = curlen
|
||||
lendiff = maxlen - curlen
|
||||
overwrite_whitespace = " " * lendiff
|
||||
echo(
|
||||
CLI_CONFIG,
|
||||
" " + task_status.get("status") + overwrite_whitespace,
|
||||
newline=False,
|
||||
)
|
||||
task_status = pvc.lib.provisioner.task_status(
|
||||
CLI_CONFIG, task_id, is_watching=True
|
||||
)
|
||||
if task_status.get("state") == "SUCCESS":
|
||||
bar.update(total_task - last_task)
|
||||
|
||||
echo(CLI_CONFIG, "")
|
||||
retdata = task_status.get("state") + ": " + task_status.get("status")
|
||||
|
||||
return retdata
|
@ -21,8 +21,7 @@
|
||||
|
||||
import json
|
||||
|
||||
import pvc.cli_lib.ansiprint as ansiprint
|
||||
from pvc.cli_lib.common import call_api
|
||||
from pvc.lib.common import call_api
|
||||
|
||||
|
||||
def initialize(config, overwrite=False):
|
||||
@ -115,199 +114,3 @@ def get_info(config):
|
||||
return True, response.json()
|
||||
else:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
|
||||
def format_info(cluster_information, oformat):
|
||||
if oformat == "json":
|
||||
return json.dumps(cluster_information)
|
||||
|
||||
if oformat == "json-pretty":
|
||||
return json.dumps(cluster_information, indent=4)
|
||||
|
||||
# Plain formatting, i.e. human-readable
|
||||
if (
|
||||
cluster_information.get("maintenance") == "true"
|
||||
or cluster_information.get("cluster_health", {}).get("health", "N/A") == "N/A"
|
||||
):
|
||||
health_colour = ansiprint.blue()
|
||||
elif cluster_information.get("cluster_health", {}).get("health", 100) > 90:
|
||||
health_colour = ansiprint.green()
|
||||
elif cluster_information.get("cluster_health", {}).get("health", 100) > 50:
|
||||
health_colour = ansiprint.yellow()
|
||||
else:
|
||||
health_colour = ansiprint.red()
|
||||
|
||||
ainformation = []
|
||||
|
||||
ainformation.append(
|
||||
"{}PVC cluster status:{}".format(ansiprint.bold(), ansiprint.end())
|
||||
)
|
||||
ainformation.append("")
|
||||
|
||||
health_text = (
|
||||
f"{cluster_information.get('cluster_health', {}).get('health', 'N/A')}"
|
||||
)
|
||||
if health_text != "N/A":
|
||||
health_text += "%"
|
||||
if cluster_information.get("maintenance") == "true":
|
||||
health_text += " (maintenance on)"
|
||||
|
||||
ainformation.append(
|
||||
"{}Cluster health:{} {}{}{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
health_colour,
|
||||
health_text,
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
if cluster_information.get("cluster_health", {}).get("messages"):
|
||||
health_messages = "\n > ".join(
|
||||
sorted(cluster_information["cluster_health"]["messages"])
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Health messages:{} > {}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
health_messages,
|
||||
)
|
||||
)
|
||||
else:
|
||||
ainformation.append(
|
||||
"{}Health messages:{} N/A".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
)
|
||||
|
||||
if oformat == "short":
|
||||
return "\n".join(ainformation)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(
|
||||
"{}Primary node:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["primary_node"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}PVC version:{} {}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information.get("pvc_version", "N/A"),
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Cluster upstream IP:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["upstream_ip"]
|
||||
)
|
||||
)
|
||||
ainformation.append("")
|
||||
ainformation.append(
|
||||
"{}Total nodes:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["nodes"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total VMs:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["vms"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total networks:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["networks"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total OSDs:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["osds"]["total"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total pools:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["pools"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total volumes:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["volumes"]
|
||||
)
|
||||
)
|
||||
ainformation.append(
|
||||
"{}Total snapshots:{} {}".format(
|
||||
ansiprint.purple(), ansiprint.end(), cluster_information["snapshots"]
|
||||
)
|
||||
)
|
||||
|
||||
nodes_string = "{}Nodes:{} {}/{} {}ready,run{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information["nodes"].get("run,ready", 0),
|
||||
cluster_information["nodes"].get("total", 0),
|
||||
ansiprint.green(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
for state, count in cluster_information["nodes"].items():
|
||||
if state == "total" or state == "run,ready":
|
||||
continue
|
||||
|
||||
nodes_string += " {}/{} {}{}{}".format(
|
||||
count,
|
||||
cluster_information["nodes"]["total"],
|
||||
ansiprint.yellow(),
|
||||
state,
|
||||
ansiprint.end(),
|
||||
)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(nodes_string)
|
||||
|
||||
vms_string = "{}VMs:{} {}/{} {}start{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information["vms"].get("start", 0),
|
||||
cluster_information["vms"].get("total", 0),
|
||||
ansiprint.green(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
for state, count in cluster_information["vms"].items():
|
||||
if state == "total" or state == "start":
|
||||
continue
|
||||
|
||||
if state in ["disable", "migrate", "unmigrate", "provision"]:
|
||||
colour = ansiprint.blue()
|
||||
else:
|
||||
colour = ansiprint.yellow()
|
||||
|
||||
vms_string += " {}/{} {}{}{}".format(
|
||||
count, cluster_information["vms"]["total"], colour, state, ansiprint.end()
|
||||
)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(vms_string)
|
||||
|
||||
if cluster_information["osds"]["total"] > 0:
|
||||
osds_string = "{}Ceph OSDs:{} {}/{} {}up,in{}".format(
|
||||
ansiprint.purple(),
|
||||
ansiprint.end(),
|
||||
cluster_information["osds"].get("up,in", 0),
|
||||
cluster_information["osds"].get("total", 0),
|
||||
ansiprint.green(),
|
||||
ansiprint.end(),
|
||||
)
|
||||
for state, count in cluster_information["osds"].items():
|
||||
if state == "total" or state == "up,in":
|
||||
continue
|
||||
|
||||
osds_string += " {}/{} {}{}{}".format(
|
||||
count,
|
||||
cluster_information["osds"]["total"],
|
||||
ansiprint.yellow(),
|
||||
state,
|
||||
ansiprint.end(),
|
||||
)
|
||||
|
||||
ainformation.append("")
|
||||
ainformation.append(osds_string)
|
||||
|
||||
ainformation.append("")
|
||||
return "\n".join(ainformation)
|
||||
|
@ -20,8 +20,8 @@
|
||||
###############################################################################
|
||||
|
||||
import re
|
||||
import pvc.cli_lib.ansiprint as ansiprint
|
||||
from pvc.cli_lib.common import call_api
|
||||
import pvc.lib.ansiprint as ansiprint
|
||||
from pvc.lib.common import call_api
|
||||
|
||||
|
||||
def isValidMAC(macaddr):
|
||||
@ -542,11 +542,16 @@ def net_sriov_vf_info(config, node, vf):
|
||||
return False, "VF not found."
|
||||
else:
|
||||
# Return a single instance if the response is a list
|
||||
data = dict()
|
||||
data["node"] = node
|
||||
if isinstance(response.json(), list):
|
||||
return True, response.json()[0]
|
||||
data = dict()
|
||||
data["vf_information"] = response.json()[0]
|
||||
return True, data
|
||||
# This shouldn't happen, but is here just in case
|
||||
else:
|
||||
return True, response.json()
|
||||
data["vf_information"] = response.json()
|
||||
return True, data
|
||||
else:
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
@ -695,7 +700,7 @@ def format_info(config, network_information, long_output):
|
||||
ainformation.append("")
|
||||
if retcode:
|
||||
dhcp4_reservations_string = format_list_dhcp(
|
||||
dhcp4_reservations_list
|
||||
config, dhcp4_reservations_list
|
||||
)
|
||||
for line in dhcp4_reservations_string.split("\n"):
|
||||
ainformation.append(line)
|
||||
@ -714,7 +719,7 @@ def format_info(config, network_information, long_output):
|
||||
)
|
||||
ainformation.append("")
|
||||
if retcode:
|
||||
firewall_rules_string = format_list_acl(firewall_rules_list)
|
||||
firewall_rules_string = format_list_acl(config, firewall_rules_list)
|
||||
for line in firewall_rules_string.split("\n"):
|
||||
ainformation.append(line)
|
||||
else:
|
||||
@ -888,7 +893,7 @@ def format_list(config, network_list):
|
||||
return "\n".join(network_list_output)
|
||||
|
||||
|
||||
def format_list_dhcp(dhcp_lease_list):
|
||||
def format_list_dhcp(config, dhcp_lease_list):
|
||||
dhcp_lease_list_output = []
|
||||
|
||||
# Determine optimal column widths
|
||||
@ -987,7 +992,7 @@ def format_list_dhcp(dhcp_lease_list):
|
||||
return "\n".join(dhcp_lease_list_output)
|
||||
|
||||
|
||||
def format_list_acl(acl_list):
|
||||
def format_list_acl(config, acl_list):
|
||||
# Handle when we get an empty entry
|
||||
if not acl_list:
|
||||
acl_list = list()
|
||||
@ -1086,7 +1091,7 @@ def format_list_acl(acl_list):
|
||||
return "\n".join(acl_list_output)
|
||||
|
||||
|
||||
def format_list_sriov_pf(pf_list):
|
||||
def format_list_sriov_pf(config, pf_list):
|
||||
# The maximum column width of the VFs column
|
||||
max_vfs_length = 70
|
||||
|
||||
@ -1206,7 +1211,7 @@ def format_list_sriov_pf(pf_list):
|
||||
return "\n".join(pf_list_output)
|
||||
|
||||
|
||||
def format_list_sriov_vf(vf_list):
|
||||
def format_list_sriov_vf(config, vf_list):
|
||||
# Handle when we get an empty entry
|
||||
if not vf_list:
|
||||
vf_list = list()
|
||||
@ -1338,10 +1343,13 @@ def format_list_sriov_vf(vf_list):
|
||||
return "\n".join(vf_list_output)
|
||||
|
||||
|
||||
def format_info_sriov_vf(config, vf_information, node):
|
||||
if not vf_information:
|
||||
def format_info_sriov_vf(config, data):
|
||||
if not data or not data["vf_information"]:
|
||||
return "No VF found"
|
||||
|
||||
node = data["node"]
|
||||
vf_information = data["vf_information"]
|
||||
|
||||
# Get information on the using VM if applicable
|
||||
if vf_information["usage"]["used"] == "True" and vf_information["usage"]["domain"]:
|
||||
vm_information = call_api(
|
||||
|
@ -21,8 +21,8 @@
|
||||
|
||||
import time
|
||||
|
||||
import pvc.cli_lib.ansiprint as ansiprint
|
||||
from pvc.cli_lib.common import call_api
|
||||
import pvc.lib.ansiprint as ansiprint
|
||||
from pvc.lib.common import call_api
|
||||
|
||||
|
||||
#
|
||||
@ -52,7 +52,7 @@ def node_coordinator_state(config, node, action):
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def node_domain_state(config, node, action, wait):
|
||||
def node_domain_state(config, node, action):
|
||||
"""
|
||||
Set node domain state state (flush/ready)
|
||||
|
||||
@ -60,7 +60,7 @@ def node_domain_state(config, node, action, wait):
|
||||
API arguments: action={action}, wait={wait}
|
||||
API schema: {"message": "{data}"}
|
||||
"""
|
||||
params = {"state": action, "wait": str(wait).lower()}
|
||||
params = {"state": action}
|
||||
response = call_api(
|
||||
config, "post", "/node/{node}/domain-state".format(node=node), params=params
|
||||
)
|
||||
@ -273,7 +273,7 @@ def getOutputColours(node_information):
|
||||
)
|
||||
|
||||
|
||||
def format_info(node_information, long_output):
|
||||
def format_info(config, node_information, long_output):
|
||||
(
|
||||
health_colour,
|
||||
daemon_state_colour,
|
||||
@ -442,12 +442,9 @@ def format_info(node_information, long_output):
|
||||
return "\n".join(ainformation)
|
||||
|
||||
|
||||
def format_list(node_list, raw):
|
||||
if raw:
|
||||
ainformation = list()
|
||||
for node in sorted(item["name"] for item in node_list):
|
||||
ainformation.append(node)
|
||||
return "\n".join(ainformation)
|
||||
def format_list(config, node_list):
|
||||
if node_list == "Node not found.":
|
||||
return node_list
|
||||
|
||||
node_list_output = []
|
||||
|
||||
|
@ -24,8 +24,8 @@ from requests_toolbelt.multipart.encoder import (
|
||||
MultipartEncoderMonitor,
|
||||
)
|
||||
|
||||
import pvc.cli_lib.ansiprint as ansiprint
|
||||
from pvc.cli_lib.common import UploadProgressBar, call_api
|
||||
import pvc.lib.ansiprint as ansiprint
|
||||
from pvc.lib.common import UploadProgressBar, call_api
|
||||
from ast import literal_eval
|
||||
|
||||
|
||||
@ -750,24 +750,11 @@ def task_status(config, task_id=None, is_watching=False):
|
||||
if response.status_code == 200:
|
||||
retvalue = True
|
||||
respjson = response.json()
|
||||
|
||||
if is_watching:
|
||||
# Just return the raw JSON to the watching process instead of formatting it
|
||||
# Just return the raw JSON to the watching process instead of including value
|
||||
return respjson
|
||||
|
||||
job_state = respjson["state"]
|
||||
if job_state == "RUNNING":
|
||||
retdata = "Job state: RUNNING\nStage: {}/{}\nStatus: {}".format(
|
||||
respjson["current"], respjson["total"], respjson["status"]
|
||||
)
|
||||
elif job_state == "FAILED":
|
||||
retdata = "Job state: FAILED\nStatus: {}".format(respjson["status"])
|
||||
elif job_state == "COMPLETED":
|
||||
retdata = "Job state: COMPLETED\nStatus: {}".format(respjson["status"])
|
||||
else:
|
||||
retdata = "Job state: {}\nStatus: {}".format(
|
||||
respjson["state"], respjson["status"]
|
||||
)
|
||||
return retvalue, respjson
|
||||
else:
|
||||
retvalue = False
|
||||
retdata = response.json().get("message", "")
|
||||
@ -814,7 +801,7 @@ def task_status(config, task_id=None, is_watching=False):
|
||||
#
|
||||
# Format functions
|
||||
#
|
||||
def format_list_template(template_data, template_type=None):
|
||||
def format_list_template(config, template_data, template_type=None):
|
||||
"""
|
||||
Format the returned template template
|
||||
|
||||
@ -1330,7 +1317,12 @@ def format_list_template_storage(template_template):
|
||||
return "\n".join(template_list_output)
|
||||
|
||||
|
||||
def format_list_userdata(userdata_data, lines=None):
|
||||
def format_list_userdata(config, userdata_data):
|
||||
if not config.get("long_output"):
|
||||
lines = 4
|
||||
else:
|
||||
lines = None
|
||||
|
||||
if isinstance(userdata_data, dict):
|
||||
userdata_data = [userdata_data]
|
||||
|
||||
@ -1432,7 +1424,12 @@ def format_list_userdata(userdata_data, lines=None):
|
||||
return "\n".join(userdata_list_output)
|
||||
|
||||
|
||||
def format_list_script(script_data, lines=None):
|
||||
def format_list_script(config, script_data):
|
||||
if not config.get("long_output"):
|
||||
lines = 4
|
||||
else:
|
||||
lines = None
|
||||
|
||||
if isinstance(script_data, dict):
|
||||
script_data = [script_data]
|
||||
|
||||
@ -1531,7 +1528,7 @@ def format_list_script(script_data, lines=None):
|
||||
return "\n".join(script_list_output)
|
||||
|
||||
|
||||
def format_list_ova(ova_data):
|
||||
def format_list_ova(config, ova_data):
|
||||
if isinstance(ova_data, dict):
|
||||
ova_data = [ova_data]
|
||||
|
||||
@ -1678,7 +1675,7 @@ def format_list_ova(ova_data):
|
||||
return "\n".join(ova_list_output)
|
||||
|
||||
|
||||
def format_list_profile(profile_data):
|
||||
def format_list_profile(config, profile_data):
|
||||
if isinstance(profile_data, dict):
|
||||
profile_data = [profile_data]
|
||||
|
||||
@ -1867,7 +1864,23 @@ def format_list_profile(profile_data):
|
||||
return "\n".join(profile_list_output)
|
||||
|
||||
|
||||
def format_list_task(task_data):
|
||||
def format_list_task(config, task_data):
|
||||
if not isinstance(task_data, list):
|
||||
job_state = task_data["state"]
|
||||
if job_state == "RUNNING":
|
||||
retdata = "Job state: RUNNING\nStage: {}/{}\nStatus: {}".format(
|
||||
task_data["current"], task_data["total"], task_data["status"]
|
||||
)
|
||||
elif job_state == "FAILED":
|
||||
retdata = "Job state: FAILED\nStatus: {}".format(task_data["status"])
|
||||
elif job_state == "COMPLETED":
|
||||
retdata = "Job state: COMPLETED\nStatus: {}".format(task_data["status"])
|
||||
else:
|
||||
retdata = "Job state: {}\nStatus: {}".format(
|
||||
task_data["state"], task_data["status"]
|
||||
)
|
||||
return retdata
|
||||
|
||||
task_list_output = []
|
||||
|
||||
# Determine optimal column widths
|
||||
|
@ -21,14 +21,14 @@
|
||||
|
||||
import math
|
||||
|
||||
from json import dumps, loads
|
||||
from json import loads
|
||||
from requests_toolbelt.multipart.encoder import (
|
||||
MultipartEncoder,
|
||||
MultipartEncoderMonitor,
|
||||
)
|
||||
|
||||
import pvc.cli_lib.ansiprint as ansiprint
|
||||
from pvc.cli_lib.common import UploadProgressBar, call_api
|
||||
import pvc.lib.ansiprint as ansiprint
|
||||
from pvc.lib.common import UploadProgressBar, call_api
|
||||
|
||||
#
|
||||
# Supplemental functions
|
||||
@ -143,7 +143,7 @@ def ceph_util(config):
|
||||
return False, response.json().get("message", "")
|
||||
|
||||
|
||||
def format_raw_output(status_data):
|
||||
def format_raw_output(config, status_data):
|
||||
ainformation = list()
|
||||
ainformation.append(
|
||||
"{bold}Ceph cluster {stype} (primary node {end}{blue}{primary}{end}{bold}){end}\n".format(
|
||||
@ -379,7 +379,7 @@ def getOutputColoursOSD(osd_information):
|
||||
return osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour
|
||||
|
||||
|
||||
def format_list_osd(osd_list):
|
||||
def format_list_osd(config, osd_list):
|
||||
# Handle empty list
|
||||
if not osd_list:
|
||||
osd_list = list()
|
||||
@ -835,7 +835,7 @@ def ceph_pool_set_pgs(config, pool, pgs):
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def format_list_pool(pool_list):
|
||||
def format_list_pool(config, pool_list):
|
||||
# Handle empty list
|
||||
if not pool_list:
|
||||
pool_list = list()
|
||||
@ -1318,7 +1318,7 @@ def ceph_volume_clone(config, pool, volume, new_volume):
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def format_list_volume(volume_list):
|
||||
def format_list_volume(config, volume_list):
|
||||
# Handle empty list
|
||||
if not volume_list:
|
||||
volume_list = list()
|
||||
@ -1596,7 +1596,7 @@ def ceph_snapshot_modify(config, pool, volume, snapshot, new_name=None):
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def format_list_snapshot(snapshot_list):
|
||||
def format_list_snapshot(config, snapshot_list):
|
||||
# Handle empty list
|
||||
if not snapshot_list:
|
||||
snapshot_list = list()
|
||||
@ -1981,7 +1981,7 @@ def format_list_benchmark(config, benchmark_information):
|
||||
return "\n".join(benchmark_list_output)
|
||||
|
||||
|
||||
def format_info_benchmark(config, oformat, benchmark_information):
|
||||
def format_info_benchmark(config, benchmark_information):
|
||||
# This matrix is a list of the possible format functions for a benchmark result
|
||||
# It is extensable in the future should newer formats be required.
|
||||
benchmark_matrix = {
|
||||
@ -1991,12 +1991,7 @@ def format_info_benchmark(config, oformat, benchmark_information):
|
||||
|
||||
benchmark_version = benchmark_information[0]["test_format"]
|
||||
|
||||
if oformat == "json-pretty":
|
||||
return dumps(benchmark_information, indent=4)
|
||||
elif oformat == "json":
|
||||
return dumps(benchmark_information)
|
||||
else:
|
||||
return benchmark_matrix[benchmark_version](config, benchmark_information[0])
|
||||
return benchmark_matrix[benchmark_version](config, benchmark_information[0])
|
||||
|
||||
|
||||
def format_info_benchmark_legacy(config, benchmark_information):
|
@ -22,8 +22,8 @@
|
||||
import time
|
||||
import re
|
||||
|
||||
import pvc.cli_lib.ansiprint as ansiprint
|
||||
from pvc.cli_lib.common import call_api, format_bytes, format_metric
|
||||
import pvc.lib.ansiprint as ansiprint
|
||||
from pvc.lib.common import call_api, format_bytes, format_metric
|
||||
|
||||
|
||||
#
|
||||
@ -286,20 +286,18 @@ def vm_tag_set(config, vm, action, tag, protected=False):
|
||||
return retstatus, response.json().get("message", "")
|
||||
|
||||
|
||||
def format_vm_tags(config, name, tags):
|
||||
def format_vm_tags(config, data):
|
||||
"""
|
||||
Format the output of a tags dictionary in a nice table
|
||||
"""
|
||||
|
||||
tags = data.get("tags", [])
|
||||
|
||||
if len(tags) < 1:
|
||||
return "No tags found."
|
||||
|
||||
output_list = []
|
||||
|
||||
name_length = 5
|
||||
_name_length = len(name) + 1
|
||||
if _name_length > name_length:
|
||||
name_length = _name_length
|
||||
|
||||
tags_name_length = 4
|
||||
tags_type_length = 5
|
||||
tags_protected_length = 10
|
||||
@ -495,44 +493,38 @@ def vm_vcpus_get(config, vm):
|
||||
except Exception:
|
||||
return False, "ERROR: Failed to parse XML data."
|
||||
|
||||
vm_vcpus = int(parsed_xml.vcpu.text)
|
||||
vm_sockets = parsed_xml.cpu.topology.attrib.get("sockets")
|
||||
vm_cores = parsed_xml.cpu.topology.attrib.get("cores")
|
||||
vm_threads = parsed_xml.cpu.topology.attrib.get("threads")
|
||||
data = dict()
|
||||
data["name"] = vm
|
||||
data["vcpus"] = int(parsed_xml.vcpu.text)
|
||||
data["sockets"] = parsed_xml.cpu.topology.attrib.get("sockets")
|
||||
data["cores"] = parsed_xml.cpu.topology.attrib.get("cores")
|
||||
data["threads"] = parsed_xml.cpu.topology.attrib.get("threads")
|
||||
|
||||
return True, (vm_vcpus, (vm_sockets, vm_cores, vm_threads))
|
||||
return True, data
|
||||
|
||||
|
||||
def format_vm_vcpus(config, name, vcpus):
|
||||
def format_vm_vcpus(config, data):
|
||||
"""
|
||||
Format the output of a vCPU value in a nice table
|
||||
"""
|
||||
output_list = []
|
||||
|
||||
name_length = 5
|
||||
_name_length = len(name) + 1
|
||||
if _name_length > name_length:
|
||||
name_length = _name_length
|
||||
|
||||
vcpus_length = 6
|
||||
sockets_length = 8
|
||||
cores_length = 6
|
||||
threads_length = 8
|
||||
|
||||
output_list.append(
|
||||
"{bold}{name: <{name_length}} \
|
||||
{vcpus: <{vcpus_length}} \
|
||||
"{bold}{vcpus: <{vcpus_length}} \
|
||||
{sockets: <{sockets_length}} \
|
||||
{cores: <{cores_length}} \
|
||||
{threads: <{threads_length}}{end_bold}".format(
|
||||
name_length=name_length,
|
||||
vcpus_length=vcpus_length,
|
||||
sockets_length=sockets_length,
|
||||
cores_length=cores_length,
|
||||
threads_length=threads_length,
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
name="Name",
|
||||
vcpus="vCPUs",
|
||||
sockets="Sockets",
|
||||
cores="Cores",
|
||||
@ -540,23 +532,20 @@ def format_vm_vcpus(config, name, vcpus):
|
||||
)
|
||||
)
|
||||
output_list.append(
|
||||
"{bold}{name: <{name_length}} \
|
||||
{vcpus: <{vcpus_length}} \
|
||||
"{bold}{vcpus: <{vcpus_length}} \
|
||||
{sockets: <{sockets_length}} \
|
||||
{cores: <{cores_length}} \
|
||||
{threads: <{threads_length}}{end_bold}".format(
|
||||
name_length=name_length,
|
||||
vcpus_length=vcpus_length,
|
||||
sockets_length=sockets_length,
|
||||
cores_length=cores_length,
|
||||
threads_length=threads_length,
|
||||
bold="",
|
||||
end_bold="",
|
||||
name=name,
|
||||
vcpus=vcpus[0],
|
||||
sockets=vcpus[1][0],
|
||||
cores=vcpus[1][1],
|
||||
threads=vcpus[1][2],
|
||||
vcpus=data["vcpus"],
|
||||
sockets=data["sockets"],
|
||||
cores=data["cores"],
|
||||
threads=data["threads"],
|
||||
)
|
||||
)
|
||||
return "\n".join(output_list)
|
||||
@ -619,44 +608,35 @@ def vm_memory_get(config, vm):
|
||||
except Exception:
|
||||
return False, "ERROR: Failed to parse XML data."
|
||||
|
||||
vm_memory = int(parsed_xml.memory.text)
|
||||
data = dict()
|
||||
data["name"] = vm
|
||||
data["memory"] = int(parsed_xml.memory.text)
|
||||
|
||||
return True, vm_memory
|
||||
return True, data
|
||||
|
||||
|
||||
def format_vm_memory(config, name, memory):
|
||||
def format_vm_memory(config, data):
|
||||
"""
|
||||
Format the output of a memory value in a nice table
|
||||
"""
|
||||
output_list = []
|
||||
|
||||
name_length = 5
|
||||
_name_length = len(name) + 1
|
||||
if _name_length > name_length:
|
||||
name_length = _name_length
|
||||
|
||||
memory_length = 6
|
||||
|
||||
output_list.append(
|
||||
"{bold}{name: <{name_length}} \
|
||||
{memory: <{memory_length}}{end_bold}".format(
|
||||
name_length=name_length,
|
||||
"{bold}{memory: <{memory_length}}{end_bold}".format(
|
||||
memory_length=memory_length,
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
name="Name",
|
||||
memory="RAM (M)",
|
||||
)
|
||||
)
|
||||
output_list.append(
|
||||
"{bold}{name: <{name_length}} \
|
||||
{memory: <{memory_length}}{end_bold}".format(
|
||||
name_length=name_length,
|
||||
"{bold}{memory: <{memory_length}}{end_bold}".format(
|
||||
memory_length=memory_length,
|
||||
bold="",
|
||||
end_bold="",
|
||||
name=name,
|
||||
memory=memory,
|
||||
memory=data["memory"],
|
||||
)
|
||||
)
|
||||
return "\n".join(output_list)
|
||||
@ -677,7 +657,7 @@ def vm_networks_add(
|
||||
from lxml.objectify import fromstring
|
||||
from lxml.etree import tostring
|
||||
from random import randint
|
||||
import pvc.cli_lib.network as pvc_network
|
||||
import pvc.lib.network as pvc_network
|
||||
|
||||
network_exists, _ = pvc_network.net_info(config, network)
|
||||
if not network_exists:
|
||||
@ -946,7 +926,9 @@ def vm_networks_get(config, vm):
|
||||
except Exception:
|
||||
return False, "ERROR: Failed to parse XML data."
|
||||
|
||||
network_data = list()
|
||||
data = dict()
|
||||
data["name"] = vm
|
||||
data["networks"] = list()
|
||||
for interface in parsed_xml.devices.find("interface"):
|
||||
mac_address = interface.mac.attrib.get("address")
|
||||
model = interface.model.attrib.get("type")
|
||||
@ -960,76 +942,65 @@ def vm_networks_get(config, vm):
|
||||
elif interface_type == "hostdev":
|
||||
network = "hostdev:{}".format(interface.source.attrib.get("dev"))
|
||||
|
||||
network_data.append((network, mac_address, model))
|
||||
data["networks"].append(
|
||||
{"network": network, "mac_address": mac_address, "model": model}
|
||||
)
|
||||
|
||||
return True, network_data
|
||||
return True, data
|
||||
|
||||
|
||||
def format_vm_networks(config, name, networks):
|
||||
def format_vm_networks(config, data):
|
||||
"""
|
||||
Format the output of a network list in a nice table
|
||||
"""
|
||||
output_list = []
|
||||
|
||||
name_length = 5
|
||||
vni_length = 8
|
||||
network_length = 8
|
||||
macaddr_length = 12
|
||||
model_length = 6
|
||||
|
||||
_name_length = len(name) + 1
|
||||
if _name_length > name_length:
|
||||
name_length = _name_length
|
||||
for network in data["networks"]:
|
||||
_network_length = len(network["network"]) + 1
|
||||
if _network_length > network_length:
|
||||
network_length = _network_length
|
||||
|
||||
for network in networks:
|
||||
_vni_length = len(network[0]) + 1
|
||||
if _vni_length > vni_length:
|
||||
vni_length = _vni_length
|
||||
|
||||
_macaddr_length = len(network[1]) + 1
|
||||
_macaddr_length = len(network["mac_address"]) + 1
|
||||
if _macaddr_length > macaddr_length:
|
||||
macaddr_length = _macaddr_length
|
||||
|
||||
_model_length = len(network[2]) + 1
|
||||
_model_length = len(network["model"]) + 1
|
||||
if _model_length > model_length:
|
||||
model_length = _model_length
|
||||
|
||||
output_list.append(
|
||||
"{bold}{name: <{name_length}} \
|
||||
{vni: <{vni_length}} \
|
||||
"{bold}{network: <{network_length}} \
|
||||
{macaddr: <{macaddr_length}} \
|
||||
{model: <{model_length}}{end_bold}".format(
|
||||
name_length=name_length,
|
||||
vni_length=vni_length,
|
||||
network_length=network_length,
|
||||
macaddr_length=macaddr_length,
|
||||
model_length=model_length,
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
name="Name",
|
||||
vni="Network",
|
||||
network="Network",
|
||||
macaddr="MAC Address",
|
||||
model="Model",
|
||||
)
|
||||
)
|
||||
count = 0
|
||||
for network in networks:
|
||||
if count > 0:
|
||||
name = ""
|
||||
for network in data["networks"]:
|
||||
count += 1
|
||||
output_list.append(
|
||||
"{bold}{name: <{name_length}} \
|
||||
{vni: <{vni_length}} \
|
||||
"{bold}{network: <{network_length}} \
|
||||
{macaddr: <{macaddr_length}} \
|
||||
{model: <{model_length}}{end_bold}".format(
|
||||
name_length=name_length,
|
||||
vni_length=vni_length,
|
||||
network_length=network_length,
|
||||
macaddr_length=macaddr_length,
|
||||
model_length=model_length,
|
||||
bold="",
|
||||
end_bold="",
|
||||
name=name,
|
||||
vni=network[0],
|
||||
macaddr=network[1],
|
||||
model=network[2],
|
||||
network=network["network"],
|
||||
macaddr=network["mac_address"],
|
||||
model=network["model"],
|
||||
)
|
||||
)
|
||||
return "\n".join(output_list)
|
||||
@ -1046,13 +1017,13 @@ def vm_volumes_add(config, vm, volume, disk_id, bus, disk_type, live, restart):
|
||||
from lxml.objectify import fromstring
|
||||
from lxml.etree import tostring
|
||||
from copy import deepcopy
|
||||
import pvc.cli_lib.ceph as pvc_ceph
|
||||
import pvc.lib.storage as pvc_storage
|
||||
|
||||
if disk_type == "rbd":
|
||||
# Verify that the provided volume is valid
|
||||
vpool = volume.split("/")[0]
|
||||
vname = volume.split("/")[1]
|
||||
retcode, retdata = pvc_ceph.ceph_volume_info(config, vpool, vname)
|
||||
retcode, retdata = pvc_storage.ceph_volume_info(config, vpool, vname)
|
||||
if not retcode:
|
||||
return False, "Volume {} is not present in the cluster.".format(volume)
|
||||
|
||||
@ -1270,7 +1241,9 @@ def vm_volumes_get(config, vm):
|
||||
except Exception:
|
||||
return False, "ERROR: Failed to parse XML data."
|
||||
|
||||
volume_data = list()
|
||||
data = dict()
|
||||
data["name"] = vm
|
||||
data["volumes"] = list()
|
||||
for disk in parsed_xml.devices.find("disk"):
|
||||
protocol = disk.attrib.get("type")
|
||||
disk_id = disk.target.attrib.get("dev")
|
||||
@ -1285,58 +1258,52 @@ def vm_volumes_get(config, vm):
|
||||
protocol = "unknown"
|
||||
source = "unknown"
|
||||
|
||||
volume_data.append((source, disk_id, protocol, bus))
|
||||
data["volumes"].append(
|
||||
{"volume": source, "disk_id": disk_id, "protocol": protocol, "bus": bus}
|
||||
)
|
||||
|
||||
return True, volume_data
|
||||
return True, data
|
||||
|
||||
|
||||
def format_vm_volumes(config, name, volumes):
|
||||
def format_vm_volumes(config, data):
|
||||
"""
|
||||
Format the output of a volume value in a nice table
|
||||
"""
|
||||
output_list = []
|
||||
|
||||
name_length = 5
|
||||
volume_length = 7
|
||||
disk_id_length = 4
|
||||
protocol_length = 5
|
||||
bus_length = 4
|
||||
|
||||
_name_length = len(name) + 1
|
||||
if _name_length > name_length:
|
||||
name_length = _name_length
|
||||
|
||||
for volume in volumes:
|
||||
_volume_length = len(volume[0]) + 1
|
||||
for volume in data["volumes"]:
|
||||
_volume_length = len(volume["volume"]) + 1
|
||||
if _volume_length > volume_length:
|
||||
volume_length = _volume_length
|
||||
|
||||
_disk_id_length = len(volume[1]) + 1
|
||||
_disk_id_length = len(volume["disk_id"]) + 1
|
||||
if _disk_id_length > disk_id_length:
|
||||
disk_id_length = _disk_id_length
|
||||
|
||||
_protocol_length = len(volume[2]) + 1
|
||||
_protocol_length = len(volume["protocol"]) + 1
|
||||
if _protocol_length > protocol_length:
|
||||
protocol_length = _protocol_length
|
||||
|
||||
_bus_length = len(volume[3]) + 1
|
||||
_bus_length = len(volume["bus"]) + 1
|
||||
if _bus_length > bus_length:
|
||||
bus_length = _bus_length
|
||||
|
||||
output_list.append(
|
||||
"{bold}{name: <{name_length}} \
|
||||
{volume: <{volume_length}} \
|
||||
"{bold}{volume: <{volume_length}} \
|
||||
{disk_id: <{disk_id_length}} \
|
||||
{protocol: <{protocol_length}} \
|
||||
{bus: <{bus_length}}{end_bold}".format(
|
||||
name_length=name_length,
|
||||
volume_length=volume_length,
|
||||
disk_id_length=disk_id_length,
|
||||
protocol_length=protocol_length,
|
||||
bus_length=bus_length,
|
||||
bold=ansiprint.bold(),
|
||||
end_bold=ansiprint.end(),
|
||||
name="Name",
|
||||
volume="Volume",
|
||||
disk_id="Dev",
|
||||
protocol="Type",
|
||||
@ -1344,28 +1311,23 @@ def format_vm_volumes(config, name, volumes):
|
||||
)
|
||||
)
|
||||
count = 0
|
||||
for volume in volumes:
|
||||
if count > 0:
|
||||
name = ""
|
||||
for volume in data["volumes"]:
|
||||
count += 1
|
||||
output_list.append(
|
||||
"{bold}{name: <{name_length}} \
|
||||
{volume: <{volume_length}} \
|
||||
"{bold}{volume: <{volume_length}} \
|
||||
{disk_id: <{disk_id_length}} \
|
||||
{protocol: <{protocol_length}} \
|
||||
{bus: <{bus_length}}{end_bold}".format(
|
||||
name_length=name_length,
|
||||
volume_length=volume_length,
|
||||
disk_id_length=disk_id_length,
|
||||
protocol_length=protocol_length,
|
||||
bus_length=bus_length,
|
||||
bold="",
|
||||
end_bold="",
|
||||
name=name,
|
||||
volume=volume[0],
|
||||
disk_id=volume[1],
|
||||
protocol=volume[2],
|
||||
bus=volume[3],
|
||||
volume=volume["volume"],
|
||||
disk_id=volume["disk_id"],
|
||||
protocol=volume["protocol"],
|
||||
bus=volume["bus"],
|
||||
)
|
||||
)
|
||||
return "\n".join(output_list)
|
||||
@ -1869,7 +1831,7 @@ def format_info(config, domain_information, long_output):
|
||||
return "\n".join(ainformation)
|
||||
|
||||
|
||||
def format_list(config, vm_list, raw):
|
||||
def format_list(config, vm_list):
|
||||
# Function to strip the "br" off of nets and return a nicer list
|
||||
def getNiceNetID(domain_information):
|
||||
# Network list
|
||||
@ -1888,13 +1850,6 @@ def format_list(config, vm_list, raw):
|
||||
tag_list.append(tag["name"])
|
||||
return tag_list
|
||||
|
||||
# Handle raw mode since it just lists the names
|
||||
if raw:
|
||||
ainformation = list()
|
||||
for vm in sorted(item["name"] for item in vm_list):
|
||||
ainformation.append(vm)
|
||||
return "\n".join(ainformation)
|
||||
|
||||
vm_list_output = []
|
||||
|
||||
# Determine optimal column widths
|
||||
|
@ -2,8 +2,8 @@ from setuptools import setup
|
||||
|
||||
setup(
|
||||
name="pvc",
|
||||
version="0.9.63",
|
||||
packages=["pvc", "pvc.lib"],
|
||||
version="0.9.72",
|
||||
packages=["pvc.cli", "pvc.lib"],
|
||||
install_requires=[
|
||||
"Click",
|
||||
"PyYAML",
|
||||
@ -14,7 +14,7 @@ setup(
|
||||
],
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"pvc = pvc.pvc:cli",
|
||||
"pvc = pvc.cli.cli:cli",
|
||||
],
|
||||
},
|
||||
)
|
||||
|
74
debian/changelog
vendored
74
debian/changelog
vendored
@ -1,3 +1,77 @@
|
||||
pvc (0.9.72-0) unstable; urgency=high
|
||||
|
||||
* [CLI] Restores old functionality for default node value
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Fri, 01 Sep 2023 16:34:45 -0400
|
||||
|
||||
pvc (0.9.71-0) unstable; urgency=high
|
||||
|
||||
* [API] Adds API support for Debian Bookworm
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Fri, 01 Sep 2023 00:30:42 -0400
|
||||
|
||||
pvc (0.9.70-0) unstable; urgency=high
|
||||
|
||||
* [Node Daemon] Fixes several compatibility issues for Debian 12 "Bookworm"
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Thu, 31 Aug 2023 14:15:54 -0400
|
||||
|
||||
pvc (0.9.69-0) unstable; urgency=high
|
||||
|
||||
* [Node Daemon] Ensures that system load is always 2 decimal places on Bookworm
|
||||
* [Node Daemon] Fixes bug blocking primary takeover at DNS Aggregator start if Patroni is down
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Tue, 29 Aug 2023 22:01:22 -0400
|
||||
|
||||
pvc (0.9.68-0) unstable; urgency=high
|
||||
|
||||
* [CLI] Fixes another bug with network info view
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Sun, 27 Aug 2023 20:59:23 -0400
|
||||
|
||||
pvc (0.9.67-0) unstable; urgency=high
|
||||
|
||||
* [CLI] Fixes several more bugs in the refactored CLI
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Sun, 27 Aug 2023 14:47:20 -0400
|
||||
|
||||
pvc (0.9.66-0) unstable; urgency=high
|
||||
|
||||
* [CLI] Fixes a missing YAML import in CLI
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Sun, 27 Aug 2023 11:36:05 -0400
|
||||
|
||||
pvc (0.9.65-0) unstable; urgency=high
|
||||
|
||||
* [CLI] Fixes a bug in the node list filtering command
|
||||
* [CLI] Fixes a bug/default when no connection is specified
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Wed, 23 Aug 2023 01:56:57 -0400
|
||||
|
||||
pvc (0.9.64-0) unstable; urgency=high
|
||||
|
||||
**Breaking Change [CLI]**: The CLI client root commands have been reorganized. The following commands have changed:
|
||||
|
||||
* `pvc cluster` -> `pvc connection` (all subcommands)
|
||||
* `pvc task` -> `pvc cluster` (all subcommands)
|
||||
* `pvc maintenance` -> `pvc cluster maintenance`
|
||||
* `pvc status` -> `pvc cluster status`
|
||||
|
||||
Ensure you have updated to the latest version of the PVC Ansible repository before deploying this version or using PVC Ansible oneshot playbooks for management.
|
||||
|
||||
**Breaking Change [CLI]**: The `--restart` option for VM configuration changes now has an explicit `--no-restart` to disable restarting, or a prompt if neither is specified; `--unsafe` no longer bypasses this prompt which was a bug. Applies to most `vm <cmd> set` commands like `vm vcpu set`, `vm memory set`, etc. All instances also feature restart confirmation afterwards, which, if `--restart` is provided, will prompt for confirmation unless `--yes` or `--unsafe` is specified.
|
||||
|
||||
**Breaking Change [CLI]**: The `--long` option previously on some `info` commands no longer exists; use `-f long`/`--format long` instead.
|
||||
|
||||
* [CLI] Significantly refactors the CLI client code for consistency and cleanliness
|
||||
* [CLI] Implements `-f`/`--format` options for all `list` and `info` commands in a consistent way
|
||||
* [CLI] Changes the behaviour of VM modification options with "--restart" to provide a "--no-restart"; defaults to a prompt if neither is specified and ignores the "--unsafe" global entirely
|
||||
* [API] Fixes several bugs in the 3-debootstrap.py provisioner example script
|
||||
* [Node] Fixes some bugs around VM shutdown on node flush
|
||||
* [Documentation] Adds mentions of Ganeti and Harvester
|
||||
|
||||
-- Joshua M. Boniface <joshua@boniface.me> Fri, 18 Aug 2023 12:20:43 -0400
|
||||
|
||||
pvc (0.9.63-0) unstable; urgency=high
|
||||
|
||||
* Mentions Ganeti in the docs
|
||||
|
2
debian/control
vendored
2
debian/control
vendored
@ -16,7 +16,7 @@ Description: Parallel Virtual Cluster node daemon (Python 3)
|
||||
|
||||
Package: pvc-daemon-api
|
||||
Architecture: all
|
||||
Depends: systemd, pvc-daemon-common, python3-yaml, python3-flask, python3-flask-restful, python3-celery, python-celery-common, python3-distutils, redis, python3-redis, python3-lxml, python3-flask-migrate, python3-flask-script, fio
|
||||
Depends: systemd, pvc-daemon-common, python3-yaml, python3-flask, python3-flask-restful, python3-celery, python-celery-common, python3-distutils, redis, python3-redis, python3-lxml, python3-flask-migrate, fio
|
||||
Description: Parallel Virtual Cluster API daemon (Python 3)
|
||||
A KVM/Zookeeper/Ceph-based VM and private cloud manager
|
||||
.
|
||||
|
2
debian/pvc-daemon-api.install
vendored
2
debian/pvc-daemon-api.install
vendored
@ -1,5 +1,5 @@
|
||||
api-daemon/pvcapid.py usr/share/pvc
|
||||
api-daemon/pvcapid-manage.py usr/share/pvc
|
||||
api-daemon/pvcapid-manage*.py usr/share/pvc
|
||||
api-daemon/pvc-api-db-upgrade usr/share/pvc
|
||||
api-daemon/pvcapid.sample.yaml etc/pvc
|
||||
api-daemon/pvcapid usr/share/pvc
|
||||
|
@ -49,7 +49,7 @@ import re
|
||||
import json
|
||||
|
||||
# Daemon version
|
||||
version = "0.9.63"
|
||||
version = "0.9.72"
|
||||
|
||||
|
||||
##########################################################
|
||||
|
@ -77,7 +77,7 @@ def connect_zookeeper():
|
||||
|
||||
with open(pvcnoded_config_file, "r") as cfgfile:
|
||||
try:
|
||||
o_config = yaml.load(cfgfile)
|
||||
o_config = yaml.load(cfgfile, yaml.SafeLoader)
|
||||
except Exception as e:
|
||||
print(
|
||||
"ERROR: Failed to parse configuration file: {}".format(e),
|
||||
|
@ -620,9 +620,12 @@ class NodeInstance(object):
|
||||
for network in self.d_network:
|
||||
self.d_network[network].startDHCPServer()
|
||||
# 9. Start DNS aggregator; just continue if we fail
|
||||
if not patroni_failed:
|
||||
self.dns_aggregator.start_aggregator()
|
||||
else:
|
||||
try:
|
||||
if not patroni_failed:
|
||||
self.dns_aggregator.start_aggregator()
|
||||
else:
|
||||
raise
|
||||
except Exception:
|
||||
self.logger.out(
|
||||
"Not starting DNS aggregator due to Patroni failures", state="e"
|
||||
)
|
||||
@ -790,6 +793,19 @@ class NodeInstance(object):
|
||||
self.flush_stopper = False
|
||||
return
|
||||
|
||||
# Wait for a VM in "restart" or "shutdown" state to complete transition
|
||||
while self.zkhandler.read(("domain.state", dom_uuid)) in [
|
||||
"restart",
|
||||
"shutdown",
|
||||
]:
|
||||
self.logger.out(
|
||||
'Waiting 2s for VM state change completion for VM "{}"'.format(
|
||||
dom_uuid
|
||||
),
|
||||
state="i",
|
||||
)
|
||||
time.sleep(2)
|
||||
|
||||
self.logger.out(
|
||||
'Selecting target to migrate VM "{}"'.format(dom_uuid), state="i"
|
||||
)
|
||||
@ -806,17 +822,19 @@ class NodeInstance(object):
|
||||
|
||||
if target_node is None:
|
||||
self.logger.out(
|
||||
'Failed to find migration target for VM "{}"; shutting down and setting autostart flag'.format(
|
||||
'Failed to find migration target for running VM "{}"; shutting down and setting autostart flag'.format(
|
||||
dom_uuid
|
||||
),
|
||||
state="e",
|
||||
)
|
||||
self.zkhandler.write(
|
||||
[
|
||||
(("domain.state", dom_uuid), "shutdown"),
|
||||
(("domain.meta.autostart", dom_uuid), "True"),
|
||||
]
|
||||
)
|
||||
|
||||
if self.zkhandler.read(("domain.state", dom_uuid)) in ["start"]:
|
||||
self.zkhandler.write(
|
||||
[
|
||||
(("domain.state", dom_uuid), "shutdown"),
|
||||
(("domain.meta.autostart", dom_uuid), "True"),
|
||||
]
|
||||
)
|
||||
else:
|
||||
self.logger.out(
|
||||
'Migrating VM "{}" to node "{}"'.format(dom_uuid, target_node),
|
||||
|
@ -338,8 +338,21 @@ def collect_ceph_stats(logger, config, zkhandler, this_node, queue):
|
||||
line = re.sub(r"\x1b(\[.*?[@-~]|\].*?(\x07|\x1b\\))", "", line)
|
||||
# Split it for parsing
|
||||
line = line.split()
|
||||
if len(line) > 1 and line[1].isdigit():
|
||||
# This is an OSD line so parse it
|
||||
|
||||
# Ceph 14 format:
|
||||
# ['|', '0', '|', 'hv1.p.u.bonilan.net', '|', '318G', '|', '463G', '|', '213', '|', '1430k', '|', '22', '|', '124k', '|', 'exists,up', '|']
|
||||
# Ceph 16 format:
|
||||
# ['0', 'hv1.t.u.bonilan.net', '2489M', '236G', '0', '0', '0', '0', 'exists,up']
|
||||
|
||||
# Bypass obviously invalid lines
|
||||
if len(line) < 1:
|
||||
continue
|
||||
elif line[0] == "+":
|
||||
continue
|
||||
|
||||
# If line begins with | and second entry is a digit (i.e. OSD ID)
|
||||
if line[0] == "|" and line[1].isdigit():
|
||||
# Parse the line in Ceph 14 format
|
||||
osd_id = line[1]
|
||||
node = line[3].split(".")[0]
|
||||
used = line[5]
|
||||
@ -349,20 +362,39 @@ def collect_ceph_stats(logger, config, zkhandler, this_node, queue):
|
||||
rd_ops = line[13]
|
||||
rd_data = line[15]
|
||||
state = line[17]
|
||||
osd_status.update(
|
||||
{
|
||||
str(osd_id): {
|
||||
"node": node,
|
||||
"used": used,
|
||||
"avail": avail,
|
||||
"wr_ops": wr_ops,
|
||||
"wr_data": wr_data,
|
||||
"rd_ops": rd_ops,
|
||||
"rd_data": rd_data,
|
||||
"state": state,
|
||||
}
|
||||
# If first entry is a digit (i.e. OSD ID)
|
||||
elif line[0].isdigit():
|
||||
# Parse the line in Ceph 16 format
|
||||
osd_id = line[0]
|
||||
node = line[1].split(".")[0]
|
||||
used = line[2]
|
||||
avail = line[3]
|
||||
wr_ops = line[4]
|
||||
wr_data = line[5]
|
||||
rd_ops = line[6]
|
||||
rd_data = line[7]
|
||||
state = line[8]
|
||||
# Otherwise, it's the header line and is ignored
|
||||
else:
|
||||
continue
|
||||
|
||||
# I don't know why 2018 me used this construct instead of a normal
|
||||
# dictionary update, but it works so not changing it.
|
||||
# ref: bfbe9188ce830381f3f2fa1da11f1973f08eca8c
|
||||
osd_status.update(
|
||||
{
|
||||
str(osd_id): {
|
||||
"node": node,
|
||||
"used": used,
|
||||
"avail": avail,
|
||||
"wr_ops": wr_ops,
|
||||
"wr_data": wr_data,
|
||||
"rd_ops": rd_ops,
|
||||
"rd_data": rd_data,
|
||||
"state": state,
|
||||
}
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
# Merge them together into a single meaningful dict
|
||||
if debug:
|
||||
@ -753,7 +785,7 @@ def node_keepalive(logger, config, zkhandler, this_node, monitoring_instance):
|
||||
this_node.memtotal = int(psutil.virtual_memory().total / 1024 / 1024)
|
||||
this_node.memused = int(psutil.virtual_memory().used / 1024 / 1024)
|
||||
this_node.memfree = int(psutil.virtual_memory().free / 1024 / 1024)
|
||||
this_node.cpuload = os.getloadavg()[0]
|
||||
this_node.cpuload = round(os.getloadavg()[0], 2)
|
||||
|
||||
# Join against running threads
|
||||
if config["enable_hypervisor"]:
|
||||
|
@ -1,31 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
if [[ -z ${1} ]]; then
|
||||
echo "Please specify a cluster to run tests against."
|
||||
exit 1
|
||||
fi
|
||||
test_cluster="${1}"
|
||||
shift
|
||||
|
||||
if [[ ${1} == "--test-dangerously" ]]; then
|
||||
test_dangerously="y"
|
||||
else
|
||||
test_dangerously=""
|
||||
fi
|
||||
|
||||
_pvc() {
|
||||
echo "> pvc --cluster ${test_cluster} $@"
|
||||
pvc --quiet --cluster ${test_cluster} "$@"
|
||||
echo "> pvc --connection ${test_cluster} $@"
|
||||
pvc --quiet --connection ${test_cluster} "$@"
|
||||
sleep 1
|
||||
}
|
||||
|
||||
time_start=$(date +%s)
|
||||
|
||||
set -o errexit
|
||||
|
||||
pushd $( git rev-parse --show-toplevel ) &>/dev/null
|
||||
|
||||
# Cluster tests
|
||||
_pvc maintenance on
|
||||
_pvc maintenance off
|
||||
_pvc connection list
|
||||
_pvc connection detail
|
||||
|
||||
_pvc cluster maintenance on
|
||||
_pvc cluster maintenance off
|
||||
_pvc cluster status
|
||||
backup_tmp=$(mktemp)
|
||||
_pvc task backup --file ${backup_tmp}
|
||||
_pvc task restore --yes --file ${backup_tmp}
|
||||
_pvc cluster backup --file ${backup_tmp}
|
||||
if [[ -n ${test_dangerously} ]]; then
|
||||
# This is dangerous, so don't test it unless option given
|
||||
_pvc cluster restore --yes --file ${backup_tmp}
|
||||
fi
|
||||
rm ${backup_tmp} || true
|
||||
|
||||
# Provisioner tests
|
||||
_pvc provisioner profile list test
|
||||
_pvc provisioner profile list test || true
|
||||
_pvc provisioner template system add --vcpus 1 --vram 1024 --serial --vnc --vnc-bind 0.0.0.0 --node-limit hv1 --node-selector mem --node-autostart --migration-method live system-test || true
|
||||
_pvc provisioner template network add network-test || true
|
||||
_pvc provisioner template network vni add network-test 10000 || true
|
||||
_pvc provisioner template storage add storage-test || true
|
||||
_pvc provisioner template storage disk add --pool vms --size 8 --filesystem ext4 --mountpoint / storage-test sda || true
|
||||
_pvc provisioner script add script-test $( find . -name "3-debootstrap.py" ) || true
|
||||
_pvc provisioner profile add --profile-type provisioner --system-template system-test --network-template network-test --storage-template storage-test --userdata empty --script script-test --script-arg deb_release=bullseye test || true
|
||||
_pvc provisioner create --wait testx test
|
||||
sleep 30
|
||||
|
||||
@ -36,7 +59,7 @@ _pvc vm shutdown --yes --wait testx
|
||||
_pvc vm start testx
|
||||
sleep 30
|
||||
_pvc vm stop --yes testx
|
||||
_pvc vm disable testx
|
||||
_pvc vm disable --yes testx
|
||||
_pvc vm undefine --yes testx
|
||||
_pvc vm define --target hv3 --tag pvc-test ${vm_tmp}
|
||||
_pvc vm start testx
|
||||
@ -49,21 +72,21 @@ _pvc vm unmigrate --wait testx
|
||||
sleep 5
|
||||
_pvc vm move --wait --target hv1 testx
|
||||
sleep 5
|
||||
_pvc vm meta testx --limit hv1 --selector vms --method live --profile test --no-autostart
|
||||
_pvc vm meta testx --limit hv1 --node-selector vms --method live --profile test --no-autostart
|
||||
_pvc vm tag add testx mytag
|
||||
_pvc vm tag get testx
|
||||
_pvc vm list --tag mytag
|
||||
_pvc vm tag remove testx mytag
|
||||
_pvc vm network get testx
|
||||
_pvc vm vcpu set testx 4
|
||||
_pvc vm vcpu set --no-restart testx 4
|
||||
_pvc vm vcpu get testx
|
||||
_pvc vm memory set testx 4096
|
||||
_pvc vm memory set --no-restart testx 4096
|
||||
_pvc vm memory get testx
|
||||
_pvc vm vcpu set testx 2
|
||||
_pvc vm vcpu set --no-restart testx 2
|
||||
_pvc vm memory set testx 2048 --restart --yes
|
||||
sleep 5
|
||||
sleep 15
|
||||
_pvc vm list testx
|
||||
_pvc vm info --long testx
|
||||
_pvc vm info --format long testx
|
||||
rm ${vm_tmp} || true
|
||||
|
||||
# Node tests
|
||||
@ -77,6 +100,7 @@ _pvc node flush --wait hv1
|
||||
_pvc node ready --wait hv1
|
||||
_pvc node list hv1
|
||||
_pvc node info hv1
|
||||
sleep 15
|
||||
|
||||
# Network tests
|
||||
_pvc network add 10001 --description testing --type managed --domain testing.local --ipnet 10.100.100.0/24 --gateway 10.100.100.1 --dhcp --dhcp-start 10.100.100.100 --dhcp-end 10.100.100.199
|
||||
@ -84,7 +108,7 @@ sleep 5
|
||||
_pvc vm network add --restart --yes testx 10001
|
||||
sleep 30
|
||||
_pvc vm network remove --restart --yes testx 10001
|
||||
sleep 5
|
||||
sleep 15
|
||||
|
||||
_pvc network acl add 10001 --in --description test-acl --order 0 --rule "'ip daddr 10.0.0.0/8 counter'"
|
||||
_pvc network acl list 10001
|
||||
@ -95,31 +119,34 @@ _pvc network dhcp remove --yes 10001 12:34:56:78:90:ab
|
||||
|
||||
_pvc network modify --domain test10001.local 10001
|
||||
_pvc network list
|
||||
_pvc network info --long 10001
|
||||
_pvc network info --format long 10001
|
||||
|
||||
# Network-VM interaction tests
|
||||
_pvc vm network add testx 10001 --model virtio --restart --yes
|
||||
sleep 30
|
||||
_pvc vm network get testx
|
||||
_pvc vm network remove testx 10001 --restart --yes
|
||||
sleep 5
|
||||
sleep 15
|
||||
|
||||
_pvc network remove --yes 10001
|
||||
|
||||
# Storage tests
|
||||
_pvc storage status
|
||||
_pvc storage util
|
||||
_pvc storage osd set noout
|
||||
_pvc storage osd out 0
|
||||
_pvc storage osd in 0
|
||||
_pvc storage osd unset noout
|
||||
if [[ -n ${test_dangerously} ]]; then
|
||||
# This is dangerous, so don't test it unless option given
|
||||
_pvc storage osd set noout
|
||||
_pvc storage osd out 0
|
||||
_pvc storage osd in 0
|
||||
_pvc storage osd unset noout
|
||||
fi
|
||||
_pvc storage osd list
|
||||
_pvc storage pool add testing 64 --replcfg "copies=3,mincopies=2"
|
||||
sleep 5
|
||||
_pvc storage pool list
|
||||
_pvc storage volume add testing testx 1G
|
||||
_pvc storage volume resize testing testx 2G
|
||||
_pvc storage volume rename testing testx testerX
|
||||
_pvc storage volume resize --yes testing testx 2G
|
||||
_pvc storage volume rename --yes testing testx testerX
|
||||
_pvc storage volume clone testing testerX testerY
|
||||
_pvc storage volume list --pool testing
|
||||
_pvc storage volume snapshot add testing testerX asnapshotX
|
||||
@ -132,7 +159,7 @@ _pvc vm volume add testx --type rbd --disk-id sdh --bus scsi testing/testerY --r
|
||||
sleep 30
|
||||
_pvc vm volume get testx
|
||||
_pvc vm volume remove testx testing/testerY --restart --yes
|
||||
sleep 5
|
||||
sleep 15
|
||||
|
||||
_pvc storage volume remove --yes testing testerY
|
||||
_pvc storage volume remove --yes testing testerX
|
||||
@ -142,6 +169,14 @@ _pvc storage pool remove --yes testing
|
||||
_pvc vm stop --yes testx
|
||||
_pvc vm remove --yes testx
|
||||
|
||||
_pvc provisioner profile remove --yes test
|
||||
_pvc provisioner script remove --yes script-test
|
||||
_pvc provisioner template system remove --yes system-test
|
||||
_pvc provisioner template network remove --yes network-test
|
||||
_pvc provisioner template storage remove --yes storage-test
|
||||
|
||||
popd
|
||||
|
||||
time_end=$(date +%s)
|
||||
|
||||
echo
|
||||
|
Reference in New Issue
Block a user