Fix bugs in lock freeing function
1. The destination state on an error was invalid; should be "stop". 2. If a lock was listed but removing it fails (because it was already cleared somehow, this would error. In turn this would cause the VM to not migrate and be left in an undefined state. Fix that when unlocking is forced.
This commit is contained in:
parent
ae2cf8a070
commit
1293e8ae7e
|
@ -1997,6 +1997,9 @@ def vm_worker_flush_locks(zkhandler, celery, domain, force_unlock=False):
|
|||
)
|
||||
|
||||
if lock_remove_retcode != 0:
|
||||
if force_unlock and "No such file or directory" in lock_remove_stderr:
|
||||
continue
|
||||
else:
|
||||
fail(
|
||||
celery,
|
||||
f"Failed to free RBD lock {lock['id']} on volume {rbd}: {lock_remove_stderr}",
|
||||
|
|
|
@ -247,7 +247,7 @@ def migrateFromFencedNode(zkhandler, node_name, config, logger):
|
|||
)
|
||||
zkhandler.write(
|
||||
{
|
||||
(("domain.state", dom_uuid), "stopped"),
|
||||
(("domain.state", dom_uuid), "stop"),
|
||||
(("domain.meta.autostart", dom_uuid), "True"),
|
||||
}
|
||||
)
|
||||
|
|
Loading…
Reference in New Issue