Fix bugs in lock freeing function

1. The destination state on an error was invalid; should be "stop".

2. If a lock was listed but removing it fails (because it was already
cleared somehow, this would error. In turn this would cause the VM to
not migrate and be left in an undefined state. Fix that when unlocking
is forced.
This commit is contained in:
Joshua Boniface 2024-10-15 10:36:50 -04:00
parent ae2cf8a070
commit 1293e8ae7e
2 changed files with 9 additions and 6 deletions

View File

@ -1997,11 +1997,14 @@ def vm_worker_flush_locks(zkhandler, celery, domain, force_unlock=False):
) )
if lock_remove_retcode != 0: if lock_remove_retcode != 0:
fail( if force_unlock and "No such file or directory" in lock_remove_stderr:
celery, continue
f"Failed to free RBD lock {lock['id']} on volume {rbd}: {lock_remove_stderr}", else:
) fail(
return False celery,
f"Failed to free RBD lock {lock['id']} on volume {rbd}: {lock_remove_stderr}",
)
return False
current_stage += 1 current_stage += 1
return finish( return finish(

View File

@ -247,7 +247,7 @@ def migrateFromFencedNode(zkhandler, node_name, config, logger):
) )
zkhandler.write( zkhandler.write(
{ {
(("domain.state", dom_uuid), "stopped"), (("domain.state", dom_uuid), "stop"),
(("domain.meta.autostart", dom_uuid), "True"), (("domain.meta.autostart", dom_uuid), "True"),
} }
) )