Compare commits

..

4 Commits

Author SHA1 Message Date
4727f6e848 Improve timing in test script 2023-08-18 11:58:13 -04:00
3b36ecf199 Improve testing with more tests 2023-08-18 11:44:39 -04:00
36558c73b8 Fix bugs for node flush for stop/shutdown/restart
Previously VMs in stop/shutdown/restart states wouldn't be properly
handled during a node flush. This fixes the bugs and ensures that the
transient VM states (shutdown/restart) are completed before proceeding,
and then avoids setting a stopped/shutdown VM to shutdown/auotstart.
2023-08-18 11:25:59 -04:00
ea3f68442c Readd errexit to test script 2023-08-18 10:33:59 -04:00
2 changed files with 33 additions and 14 deletions

View File

@ -790,6 +790,19 @@ class NodeInstance(object):
self.flush_stopper = False
return
# Wait for a VM in "restart" or "shutdown" state to complete transition
while self.zkhandler.read(("domain.state", dom_uuid)) in [
"restart",
"shutdown",
]:
self.logger.out(
'Waiting 2s for VM state change completion for VM "{}"'.format(
dom_uuid
),
state="i",
)
time.sleep(2)
self.logger.out(
'Selecting target to migrate VM "{}"'.format(dom_uuid), state="i"
)
@ -806,17 +819,19 @@ class NodeInstance(object):
if target_node is None:
self.logger.out(
'Failed to find migration target for VM "{}"; shutting down and setting autostart flag'.format(
'Failed to find migration target for running VM "{}"; shutting down and setting autostart flag'.format(
dom_uuid
),
state="e",
)
self.zkhandler.write(
[
(("domain.state", dom_uuid), "shutdown"),
(("domain.meta.autostart", dom_uuid), "True"),
]
)
if self.zkhandler.read(("domain.state", dom_uuid)) in ["start"]:
self.zkhandler.write(
[
(("domain.state", dom_uuid), "shutdown"),
(("domain.meta.autostart", dom_uuid), "True"),
]
)
else:
self.logger.out(
'Migrating VM "{}" to node "{}"'.format(dom_uuid, target_node),

View File

@ -21,11 +21,17 @@ _pvc() {
time_start=$(date +%s)
set -o errexit
pushd $( git rev-parse --show-toplevel ) &>/dev/null
# Cluster tests
_pvc connection list
_pvc connection detail
_pvc cluster maintenance on
_pvc cluster maintenance off
_pvc cluster status
backup_tmp=$(mktemp)
_pvc cluster backup --file ${backup_tmp}
if [[ -n ${test_dangerously} ]]; then
@ -78,7 +84,7 @@ _pvc vm memory set --no-restart testx 4096
_pvc vm memory get testx
_pvc vm vcpu set --no-restart testx 2
_pvc vm memory set testx 2048 --restart --yes
sleep 5
sleep 15
_pvc vm list testx
_pvc vm info --format long testx
rm ${vm_tmp} || true
@ -94,9 +100,7 @@ _pvc node flush --wait hv1
_pvc node ready --wait hv1
_pvc node list hv1
_pvc node info hv1
_pvc vm start testx
sleep 30
sleep 15
# Network tests
_pvc network add 10001 --description testing --type managed --domain testing.local --ipnet 10.100.100.0/24 --gateway 10.100.100.1 --dhcp --dhcp-start 10.100.100.100 --dhcp-end 10.100.100.199
@ -104,7 +108,7 @@ sleep 5
_pvc vm network add --restart --yes testx 10001
sleep 30
_pvc vm network remove --restart --yes testx 10001
sleep 5
sleep 15
_pvc network acl add 10001 --in --description test-acl --order 0 --rule "'ip daddr 10.0.0.0/8 counter'"
_pvc network acl list 10001
@ -122,7 +126,7 @@ _pvc vm network add testx 10001 --model virtio --restart --yes
sleep 30
_pvc vm network get testx
_pvc vm network remove testx 10001 --restart --yes
sleep 5
sleep 15
_pvc network remove --yes 10001
@ -155,7 +159,7 @@ _pvc vm volume add testx --type rbd --disk-id sdh --bus scsi testing/testerY --r
sleep 30
_pvc vm volume get testx
_pvc vm volume remove testx testing/testerY --restart --yes
sleep 5
sleep 15
_pvc storage volume remove --yes testing testerY
_pvc storage volume remove --yes testing testerX