Port VM on-node tasks to Celery worker system

Adds Celery versions of the flush_locks, device_attach, and
device_detach functions.
This commit is contained in:
2023-11-05 22:32:41 -05:00
parent f0c2e9d295
commit 89681d54b9
9 changed files with 577 additions and 174 deletions

View File

@ -1567,12 +1567,25 @@ def cli_vm_unmigrate(domain, wait, force_live):
)
@connection_req
@click.argument("domain")
def cli_vm_flush_locks(domain):
@click.option(
"--wait/--no-wait",
"wait_flag",
is_flag=True,
default=True,
show_default=True,
help="Wait or don't wait for task to complete, showing progress",
)
def cli_vm_flush_locks(domain, wait_flag):
"""
Flush stale RBD locks for virtual machine DOMAIN. DOMAIN may be a UUID or name. DOMAIN must be in a stopped state before flushing locks.
Flush stale RBD locks for virtual machine DOMAIN. DOMAIN may be a UUID or name. DOMAIN must be in the stop, disable, or fail state before flushing locks.
NOTE: This is a task-based command. The "--wait" flag (default) will block and show progress. Specifying the "--no-wait" flag will return immediately with a job ID instead, which can be queried externally later.
"""
retcode, retmsg = pvc.lib.vm.vm_locks(CLI_CONFIG, domain)
retcode, retmsg = pvc.lib.vm.vm_locks(CLI_CONFIG, domain, wait_flag=wait_flag)
if retcode and wait_flag:
retmsg = wait_for_flush_locks(CLI_CONFIG, retmsg)
finish(retcode, retmsg)

View File

@ -191,6 +191,66 @@ def update_store(store_path, store_data):
jdump(store_data, fh, sort_keys=True, indent=4)
def wait_for_flush_locks(CLI_CONFIG, task_detail):
"""
Wait for a flush_locks task to complete
"""
task_id = task_detail["task_id"]
run_on = task_detail["run_on"]
echo(CLI_CONFIG, f"Task ID: {task_id} assigned to node {run_on}")
echo(CLI_CONFIG, "")
# Wait for the task to start
echo(CLI_CONFIG, "Waiting for task to start...", newline=False)
while True:
sleep(0.25)
task_status = pvc.lib.common.task_status(
CLI_CONFIG, task_id=task_id, is_watching=True
)
if task_status.get("state") != "PENDING":
break
echo(CLI_CONFIG, ".", newline=False)
echo(CLI_CONFIG, " done.")
echo(CLI_CONFIG, "")
# Start following the task state, updating progress as we go
total_task = task_status.get("total")
with progressbar(length=total_task, show_eta=False) as bar:
last_task = 0
maxlen = 0
while True:
sleep(0.25)
if task_status.get("state") != "RUNNING":
break
if task_status.get("current") > last_task:
current_task = int(task_status.get("current"))
bar.update(current_task - last_task)
last_task = current_task
# The extensive spaces at the end cause this to overwrite longer previous messages
curlen = len(str(task_status.get("status")))
if curlen > maxlen:
maxlen = curlen
lendiff = maxlen - curlen
overwrite_whitespace = " " * lendiff
echo(
CLI_CONFIG,
" " + task_status.get("status") + overwrite_whitespace,
newline=False,
)
task_status = pvc.lib.common.task_status(
CLI_CONFIG, task_id=task_id, is_watching=True
)
if task_status.get("state") == "SUCCESS":
bar.update(total_task - last_task)
echo(CLI_CONFIG, "")
retdata = task_status.get("state") + ": " + task_status.get("status")
return retdata
def wait_for_provisioner(CLI_CONFIG, task_id):
"""
Wait for a provisioner task to complete

View File

@ -24,6 +24,7 @@ import math
import time
import requests
import click
from ast import literal_eval
from urllib3 import disable_warnings
@ -199,3 +200,64 @@ def call_api(
# Return the response object
return response
def task_status(config, task_id=None, is_watching=False):
"""
Get information about Celery job {task_id}, or all tasks if None
API endpoint: GET /api/v1/tasks/{task_id}
API arguments:
API schema: {json_data_object}
"""
if task_id is not None:
response = call_api(config, "get", f"/tasks/{task_id}")
else:
response = call_api(config, "get", "/tasks")
if task_id is not None:
if response.status_code == 200:
retvalue = True
respjson = response.json()
if is_watching:
# Just return the raw JSON to the watching process instead of including value
return respjson
else:
return retvalue, respjson
else:
retvalue = False
retdata = response.json().get("message", "")
else:
retvalue = True
task_data_raw = response.json()
# Format the Celery data into a more useful data structure
task_data = list()
for task_type in ["active", "reserved", "scheduled"]:
try:
type_data = task_data_raw[task_type]
except Exception:
type_data = None
if not type_data:
type_data = dict()
for task_host in type_data:
for task_job in task_data_raw[task_type][task_host]:
task = dict()
if task_type == "reserved":
task["type"] = "pending"
else:
task["type"] = task_type
task["worker"] = task_host
task["id"] = task_job.get("id")
try:
task["args"] = literal_eval(task_job.get("args"))
except Exception:
task["args"] = task_job.get("args")
try:
task["kwargs"] = literal_eval(task_job.get("kwargs"))
except Exception:
task["kwargs"] = task_job.get("kwargs")
task_data.append(task)
retdata = task_data
return retvalue, retdata

View File

@ -152,7 +152,7 @@ def vm_device_attach(config, vm, xml):
data = {"xml": xml}
response = call_api(config, "post", "/vm/{vm}/device".format(vm=vm), data=data)
if response.status_code == 200:
if response.status_code in [200, 202]:
retstatus = True
else:
retstatus = False
@ -171,7 +171,7 @@ def vm_device_detach(config, vm, xml):
data = {"xml": xml}
response = call_api(config, "delete", "/vm/{vm}/device".format(vm=vm), data=data)
if response.status_code == 200:
if response.status_code in [200, 202]:
retstatus = True
else:
retstatus = False
@ -415,7 +415,7 @@ def vm_node(config, vm, target_node, action, force=False, wait=False, force_live
return retstatus, response.json().get("message", "")
def vm_locks(config, vm):
def vm_locks(config, vm, wait_flag=False):
"""
Flush RBD locks of (stopped) VM
@ -423,14 +423,23 @@ def vm_locks(config, vm):
API arguments:
API schema: {"message":"{data}"}
"""
response = call_api(config, "post", "/vm/{vm}/locks".format(vm=vm))
response = call_api(config, "post", f"/vm/{vm}/locks")
if response.status_code == 200:
retstatus = True
if response.status_code == 202:
retvalue = True
retjson = response.json()
if not wait_flag:
retdata = (
f"Task ID: {retjson['task_id']} assigned to node {retjson['run_on']}"
)
else:
# Just return the task JSON without formatting
retdata = response.json()
else:
retstatus = False
retvalue = False
retdata = response.json().get("message", "")
return retstatus, response.json().get("message", "")
return retvalue, retdata
def vm_backup(config, vm, backup_path, incremental_parent=None, retain_snapshot=False):