2019-07-05 14:11:01 -04:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
2020-02-08 19:16:19 -05:00
|
|
|
# helper.py - PVC HTTP API helper functions
|
2019-07-05 14:11:01 -04:00
|
|
|
# Part of the Parallel Virtual Cluster (PVC) system
|
|
|
|
#
|
2020-01-08 19:38:02 -05:00
|
|
|
# Copyright (C) 2018-2020 Joshua M. Boniface <joshua@boniface.me>
|
2019-07-05 14:11:01 -04:00
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
import flask
|
2020-11-24 02:39:06 -05:00
|
|
|
import json
|
2020-01-24 13:17:48 -05:00
|
|
|
import lxml.etree as etree
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-03-06 09:40:13 -05:00
|
|
|
from distutils.util import strtobool as dustrtobool
|
2019-12-23 20:43:20 -05:00
|
|
|
|
Improve handling of large file uploads
By default, Werkzeug would require the entire file (be it an OVA or
image file) to be uploaded and saved to a temporary, fake file under
`/tmp`, before any further processing could occur. This blocked most of
the execution of these functions until the upload was completed.
This entirely defeated the purpose of what I was trying to do, which was
to save the uploads directly to the temporary blockdev in each case,
thus avoiding any sort of memory or (host) disk usage.
The solution is two-fold:
1. First, ensure that the `location='args'` value is set in
RequestParser; without this, the `files` portion would be parsed
during the argument parsing, which was the original source of this
blocking behaviour.
2. Instead of the convoluted request handling that was being done
originally here, instead entirely defer the parsing of the `files`
arguments until the point in the code where they are ready to be
saved. Then, using an override stream_factory that simply opens the
temporary blockdev, the upload can commence while being written
directly out to it, rather than using `/tmp` space.
This does alter the error handling slightly; it is impossible to check
if the argument was passed until this point in the code, so it may take
longer to fail if the API consumer does not specify a file as they
should. This is a minor trade-off and I would expect my API consumers to
be sane here.
2020-10-19 00:47:56 -04:00
|
|
|
from werkzeug.formparser import parse_form_data
|
|
|
|
|
2020-02-08 18:48:59 -05:00
|
|
|
import daemon_lib.common as pvc_common
|
|
|
|
import daemon_lib.cluster as pvc_cluster
|
|
|
|
import daemon_lib.node as pvc_node
|
|
|
|
import daemon_lib.vm as pvc_vm
|
|
|
|
import daemon_lib.network as pvc_network
|
|
|
|
import daemon_lib.ceph as pvc_ceph
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 13:23:34 -05:00
|
|
|
config = None # Set in this namespace by flaskapi
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-03-06 09:40:13 -05:00
|
|
|
def strtobool(stringv):
|
|
|
|
if stringv is None:
|
|
|
|
return False
|
|
|
|
if isinstance(stringv, bool):
|
|
|
|
return bool(stringv)
|
2020-03-09 09:30:16 -04:00
|
|
|
try:
|
|
|
|
return bool(dustrtobool(stringv))
|
2020-11-06 18:55:10 -05:00
|
|
|
except Exception:
|
2020-03-09 09:30:16 -04:00
|
|
|
return False
|
2020-03-06 09:40:13 -05:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
#
|
2020-11-24 02:39:06 -05:00
|
|
|
# Cluster base functions
|
2019-12-23 20:43:20 -05:00
|
|
|
#
|
|
|
|
def initialize_cluster():
|
|
|
|
# Open a Zookeeper connection
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
|
|
|
|
# Abort if we've initialized the cluster before
|
|
|
|
if zk_conn.exists('/primary_node'):
|
|
|
|
return False
|
|
|
|
|
|
|
|
# Create the root keys
|
|
|
|
transaction = zk_conn.transaction()
|
|
|
|
transaction.create('/primary_node', 'none'.encode('ascii'))
|
|
|
|
transaction.create('/upstream_ip', 'none'.encode('ascii'))
|
2020-01-09 10:53:27 -05:00
|
|
|
transaction.create('/maintenance', 'False'.encode('ascii'))
|
2019-12-23 20:43:20 -05:00
|
|
|
transaction.create('/nodes', ''.encode('ascii'))
|
|
|
|
transaction.create('/domains', ''.encode('ascii'))
|
|
|
|
transaction.create('/networks', ''.encode('ascii'))
|
|
|
|
transaction.create('/ceph', ''.encode('ascii'))
|
|
|
|
transaction.create('/ceph/osds', ''.encode('ascii'))
|
|
|
|
transaction.create('/ceph/pools', ''.encode('ascii'))
|
|
|
|
transaction.create('/ceph/volumes', ''.encode('ascii'))
|
|
|
|
transaction.create('/ceph/snapshots', ''.encode('ascii'))
|
|
|
|
transaction.create('/cmd', ''.encode('ascii'))
|
|
|
|
transaction.create('/cmd/domains', ''.encode('ascii'))
|
|
|
|
transaction.create('/cmd/ceph', ''.encode('ascii'))
|
|
|
|
transaction.create('/locks', ''.encode('ascii'))
|
|
|
|
transaction.create('/locks/flush_lock', ''.encode('ascii'))
|
|
|
|
transaction.create('/locks/primary_node', ''.encode('ascii'))
|
|
|
|
transaction.commit()
|
|
|
|
|
|
|
|
# Close the Zookeeper connection
|
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-12-25 12:18:26 -05:00
|
|
|
return True
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-11-24 02:39:06 -05:00
|
|
|
def backup_cluster():
|
|
|
|
# Open a zookeeper connection
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
|
|
|
|
# Dictionary of values to come
|
|
|
|
cluster_data = dict()
|
|
|
|
|
|
|
|
def get_data(path):
|
|
|
|
data_raw = zk_conn.get(path)
|
|
|
|
if data_raw:
|
|
|
|
data = data_raw[0].decode('utf8')
|
|
|
|
children = zk_conn.get_children(path)
|
|
|
|
|
|
|
|
cluster_data[path] = data
|
|
|
|
|
|
|
|
if children:
|
|
|
|
if path == '/':
|
|
|
|
child_prefix = '/'
|
|
|
|
else:
|
|
|
|
child_prefix = path + '/'
|
|
|
|
|
|
|
|
for child in children:
|
|
|
|
if child_prefix + child == '/zookeeper':
|
|
|
|
# We must skip the built-in /zookeeper tree
|
|
|
|
continue
|
|
|
|
get_data(child_prefix + child)
|
|
|
|
|
|
|
|
get_data('/')
|
|
|
|
|
|
|
|
return cluster_data, 200
|
|
|
|
|
|
|
|
|
|
|
|
def restore_cluster(cluster_data_raw):
|
|
|
|
# Open a zookeeper connection
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
|
|
|
|
# Open a single transaction (restore is atomic)
|
|
|
|
zk_transaction = zk_conn.transaction()
|
|
|
|
|
|
|
|
try:
|
|
|
|
cluster_data = json.loads(cluster_data_raw)
|
|
|
|
except Exception as e:
|
|
|
|
return {"message": "Failed to parse JSON data: {}.".format(e)}, 400
|
|
|
|
|
|
|
|
for key in cluster_data:
|
|
|
|
data = cluster_data[key]
|
|
|
|
|
|
|
|
if zk_conn.exists(key):
|
|
|
|
zk_transaction.set_data(key, str(data).encode('utf8'))
|
|
|
|
else:
|
|
|
|
zk_transaction.create(key, str(data).encode('utf8'))
|
|
|
|
|
|
|
|
try:
|
|
|
|
zk_transaction.commit()
|
|
|
|
return {'message': 'Restore completed successfully.'}, 200
|
|
|
|
except Exception as e:
|
|
|
|
raise
|
|
|
|
return {'message': 'Restore failed: {}.'.format(e)}, 500
|
|
|
|
|
|
|
|
|
2019-12-29 20:42:02 -05:00
|
|
|
#
|
2020-01-09 10:53:27 -05:00
|
|
|
# Cluster functions
|
2019-12-29 20:42:02 -05:00
|
|
|
#
|
|
|
|
def cluster_status():
|
|
|
|
"""
|
|
|
|
Get the overall status of the PVC cluster
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_cluster.get_info(zk_conn)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-12-29 20:42:02 -05:00
|
|
|
return retdata, 200
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-01-09 10:53:27 -05:00
|
|
|
def cluster_maintenance(maint_state='false'):
|
|
|
|
"""
|
|
|
|
Set the cluster in or out of maintenance state
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_cluster.set_maintenance(zk_conn, maint_state)
|
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 400
|
|
|
|
|
|
|
|
return retdata, retcode
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
#
|
|
|
|
# Node functions
|
|
|
|
#
|
2020-06-25 11:38:30 -04:00
|
|
|
def node_list(limit=None, daemon_state=None, coordinator_state=None, domain_state=None, is_fuzzy=True):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Return a list of nodes with limit LIMIT.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-06-25 11:38:30 -04:00
|
|
|
retflag, retdata = pvc_node.get_list(zk_conn, limit, daemon_state=daemon_state, coordinator_state=coordinator_state, domain_state=domain_state, is_fuzzy=is_fuzzy)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
2019-07-26 13:15:54 -04:00
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'Node not found.'
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-28 23:31:59 -04:00
|
|
|
def node_daemon_state(node):
|
|
|
|
"""
|
|
|
|
Return the daemon state of node NODE.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_node.get_list(zk_conn, node, is_fuzzy=False)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-28 23:31:59 -04:00
|
|
|
if retflag:
|
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
retdata = {
|
|
|
|
'name': node,
|
|
|
|
'daemon_state': retdata[0]['daemon_state']
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'Node not found.'
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-28 23:31:59 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-28 23:31:59 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-28 23:31:59 -04:00
|
|
|
def node_coordinator_state(node):
|
|
|
|
"""
|
|
|
|
Return the coordinator state of node NODE.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_node.get_list(zk_conn, node, is_fuzzy=False)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-28 23:31:59 -04:00
|
|
|
if retflag:
|
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
retdata = {
|
|
|
|
'name': node,
|
|
|
|
'coordinator_state': retdata[0]['coordinator_state']
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'Node not found.'
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-28 23:31:59 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-28 23:31:59 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-28 23:31:59 -04:00
|
|
|
def node_domain_state(node):
|
|
|
|
"""
|
|
|
|
Return the domain state of node NODE.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_node.get_list(zk_conn, node, is_fuzzy=False)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-28 23:31:59 -04:00
|
|
|
if retflag:
|
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
retdata = {
|
|
|
|
'name': node,
|
|
|
|
'domain_state': retdata[0]['domain_state']
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'Node not found.'
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 400
|
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-28 23:31:59 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-02-19 10:50:21 -05:00
|
|
|
def node_secondary(node):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Take NODE out of primary router mode.
|
|
|
|
"""
|
2020-11-06 19:44:14 -05:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-02-19 10:50:21 -05:00
|
|
|
retflag, retdata = pvc_node.secondary_node(zk_conn, node)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 14:11:01 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-02-19 10:50:21 -05:00
|
|
|
def node_primary(node):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Set NODE to primary router mode.
|
|
|
|
"""
|
2020-11-06 19:44:14 -05:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-02-19 10:50:21 -05:00
|
|
|
retflag, retdata = pvc_node.primary_node(zk_conn, node)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 14:11:01 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-25 20:22:00 -05:00
|
|
|
def node_flush(node, wait):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Flush NODE of running VMs.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-25 20:22:00 -05:00
|
|
|
retflag, retdata = pvc_node.flush_node(zk_conn, node, wait)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 14:11:01 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-25 20:22:00 -05:00
|
|
|
def node_ready(node, wait):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Restore NODE to active service.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-25 20:22:00 -05:00
|
|
|
retflag, retdata = pvc_node.ready_node(zk_conn, node, wait)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 14:11:01 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
#
|
|
|
|
# VM functions
|
|
|
|
#
|
2019-07-25 15:42:17 -04:00
|
|
|
def vm_is_migrated(vm):
|
|
|
|
"""
|
|
|
|
Determine if a VM is migrated or not
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retdata = pvc_vm.is_migrated(zk_conn, vm)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-26 13:15:54 -04:00
|
|
|
return retdata
|
2019-07-25 15:42:17 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-08-07 14:24:16 -04:00
|
|
|
def vm_state(vm):
|
|
|
|
"""
|
|
|
|
Return the state of virtual machine VM.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_vm.get_list(zk_conn, None, None, vm, is_fuzzy=False)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2019-12-23 20:43:20 -05:00
|
|
|
|
2019-08-07 14:24:16 -04:00
|
|
|
if retflag:
|
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
retdata = {
|
|
|
|
'name': vm,
|
2019-12-23 20:43:20 -05:00
|
|
|
'state': retdata['state']
|
2019-08-07 14:24:16 -04:00
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'VM not found.'
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-08-07 14:24:16 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-08-07 14:24:16 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-08-07 14:24:16 -04:00
|
|
|
def vm_node(vm):
|
|
|
|
"""
|
|
|
|
Return the current node of virtual machine VM.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_vm.get_list(zk_conn, None, None, vm, is_fuzzy=False)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2019-12-23 20:43:20 -05:00
|
|
|
|
2019-08-07 14:24:16 -04:00
|
|
|
if retflag:
|
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
retdata = {
|
|
|
|
'name': vm,
|
2019-12-23 20:43:20 -05:00
|
|
|
'node': retdata['node'],
|
|
|
|
'last_node': retdata['last_node']
|
2019-08-07 14:24:16 -04:00
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'VM not found.'
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-08-07 14:24:16 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-08-07 14:24:16 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-25 19:10:12 -05:00
|
|
|
def vm_console(vm, lines=None):
|
|
|
|
"""
|
|
|
|
Return the current console log for VM.
|
|
|
|
"""
|
|
|
|
# Default to 10 lines of log if not set
|
2019-12-25 19:31:51 -05:00
|
|
|
try:
|
|
|
|
lines = int(lines)
|
|
|
|
except TypeError:
|
2019-12-25 19:10:12 -05:00
|
|
|
lines = 10
|
|
|
|
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_vm.get_console_log(zk_conn, vm, lines)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2019-12-25 19:10:12 -05:00
|
|
|
|
|
|
|
if retflag:
|
2020-01-05 17:06:14 -05:00
|
|
|
retcode = 200
|
|
|
|
retdata = {
|
|
|
|
'name': vm,
|
|
|
|
'data': retdata
|
|
|
|
}
|
2019-12-25 19:10:12 -05:00
|
|
|
else:
|
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-12-25 19:10:12 -05:00
|
|
|
|
2020-01-05 15:14:11 -05:00
|
|
|
return retdata, retcode
|
2019-12-25 19:10:12 -05:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
def vm_list(node=None, state=None, limit=None, is_fuzzy=True):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Return a list of VMs with limit LIMIT.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-05 18:24:14 -04:00
|
|
|
retflag, retdata = pvc_vm.get_list(zk_conn, node, state, limit, is_fuzzy)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2019-12-23 20:43:20 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
2019-07-05 18:24:14 -04:00
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'VM not found.'
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-10-29 11:31:32 -04:00
|
|
|
def vm_define(xml, node, limit, selector, autostart, migration_method):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Define a VM from Libvirt XML in the PVC cluster.
|
|
|
|
"""
|
2020-01-24 13:17:48 -05:00
|
|
|
# Verify our XML is sensible
|
|
|
|
try:
|
|
|
|
xml_data = etree.fromstring(xml)
|
|
|
|
new_cfg = etree.tostring(xml_data, pretty_print=True).decode('utf8')
|
|
|
|
except Exception as e:
|
2020-11-07 12:57:42 -05:00
|
|
|
return {'message': 'XML is malformed or incorrect: {}'.format(e)}, 400
|
2020-01-24 13:17:48 -05:00
|
|
|
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-10-29 11:31:32 -04:00
|
|
|
retflag, retdata = pvc_vm.define_vm(zk_conn, new_cfg, node, limit, selector, autostart, migration_method, profile=None)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
def get_vm_meta(vm):
|
|
|
|
"""
|
|
|
|
Get metadata of a VM.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_vm.get_list(zk_conn, None, None, vm, is_fuzzy=False)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2019-12-23 20:43:20 -05:00
|
|
|
|
|
|
|
if retflag:
|
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
retdata = {
|
|
|
|
'name': vm,
|
|
|
|
'node_limit': retdata['node_limit'],
|
|
|
|
'node_selector': retdata['node_selector'],
|
2020-10-29 11:31:32 -04:00
|
|
|
'node_autostart': retdata['node_autostart'],
|
|
|
|
'migration_method': retdata['migration_method']
|
2019-12-23 20:43:20 -05:00
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'VM not found.'
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
|
|
|
|
return retdata, retcode
|
2019-07-05 18:24:14 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-10-29 11:31:32 -04:00
|
|
|
def update_vm_meta(vm, limit, selector, autostart, provisioner_profile, migration_method):
|
2019-10-12 01:17:39 -04:00
|
|
|
"""
|
|
|
|
Update metadata of a VM.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-01-30 11:45:46 -05:00
|
|
|
if autostart is not None:
|
|
|
|
try:
|
|
|
|
autostart = bool(strtobool(autostart))
|
2020-11-06 18:55:10 -05:00
|
|
|
except Exception:
|
2020-01-30 11:45:46 -05:00
|
|
|
autostart = False
|
2020-10-29 11:31:32 -04:00
|
|
|
retflag, retdata = pvc_vm.modify_vm_metadata(zk_conn, vm, limit, selector, autostart, provisioner_profile, migration_method)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-10-12 01:17:39 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 400
|
|
|
|
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-10-12 01:17:39 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
def vm_modify(name, restart, xml):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Modify a VM Libvirt XML in the PVC cluster.
|
|
|
|
"""
|
2020-01-24 13:17:48 -05:00
|
|
|
# Verify our XML is sensible
|
|
|
|
try:
|
|
|
|
xml_data = etree.fromstring(xml)
|
|
|
|
new_cfg = etree.tostring(xml_data, pretty_print=True).decode('utf8')
|
|
|
|
except Exception as e:
|
2020-11-07 12:57:42 -05:00
|
|
|
return {'message': 'XML is malformed or incorrect: {}'.format(e)}, 400
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-01-24 13:17:48 -05:00
|
|
|
retflag, retdata = pvc_vm.modify_vm(zk_conn, name, restart, new_cfg)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 18:24:14 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
def vm_undefine(name):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Undefine a VM from the PVC cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_vm.undefine_vm(zk_conn, name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 18:24:14 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 18:24:14 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
def vm_remove(name):
|
|
|
|
"""
|
|
|
|
Remove a VM from the PVC cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_vm.remove_vm(zk_conn, name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 18:24:14 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
def vm_start(name):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Start a VM in the PVC cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_vm.start_vm(zk_conn, name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 18:24:14 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-02-19 09:51:19 -05:00
|
|
|
def vm_restart(name, wait):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Restart a VM in the PVC cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-02-19 09:51:19 -05:00
|
|
|
retflag, retdata = pvc_vm.restart_vm(zk_conn, name, wait)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 18:24:14 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-02-19 09:51:19 -05:00
|
|
|
def vm_shutdown(name, wait):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Shutdown a VM in the PVC cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-02-19 09:51:19 -05:00
|
|
|
retflag, retdata = pvc_vm.shutdown_vm(zk_conn, name, wait)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 18:24:14 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
def vm_stop(name):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Forcibly stop a VM in the PVC cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_vm.stop_vm(zk_conn, name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 18:24:14 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-01-05 14:38:14 -05:00
|
|
|
def vm_disable(name):
|
|
|
|
"""
|
|
|
|
Disable a (stopped) VM in the PVC cluster.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_vm.disable_vm(zk_conn, name)
|
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 400
|
|
|
|
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
|
|
|
return output, retcode
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-06-06 11:49:21 -04:00
|
|
|
def vm_move(name, node, wait, force_live):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Move a VM to another node.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-06-06 11:49:21 -04:00
|
|
|
retflag, retdata = pvc_vm.move_vm(zk_conn, name, node, wait, force_live)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 18:24:14 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-06-06 11:49:21 -04:00
|
|
|
def vm_migrate(name, node, flag_force, wait, force_live):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Temporarily migrate a VM to another node.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-06-06 11:49:21 -04:00
|
|
|
retflag, retdata = pvc_vm.migrate_vm(zk_conn, name, node, flag_force, wait, force_live)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 18:24:14 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-06-06 11:49:21 -04:00
|
|
|
def vm_unmigrate(name, wait, force_live):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Unmigrate a migrated VM.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-06-06 11:49:21 -04:00
|
|
|
retflag, retdata = pvc_vm.unmigrate_vm(zk_conn, name, wait, force_live)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 18:24:14 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 18:24:14 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 18:24:14 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
def vm_flush_locks(vm):
|
2019-08-07 14:24:16 -04:00
|
|
|
"""
|
|
|
|
Flush locks of a (stopped) VM.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-23 20:43:20 -05:00
|
|
|
retflag, retdata = pvc_vm.get_list(zk_conn, None, None, vm, is_fuzzy=False)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2019-12-23 20:43:20 -05:00
|
|
|
|
|
|
|
if retdata['state'] not in ['stop', 'disable']:
|
2020-11-07 12:34:47 -05:00
|
|
|
return {"message": "VM must be stopped to flush locks"}, 400
|
2019-12-23 20:43:20 -05:00
|
|
|
|
2020-01-02 12:13:11 -05:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-23 20:43:20 -05:00
|
|
|
retflag, retdata = pvc_vm.flush_locks(zk_conn, vm)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-08-07 14:24:16 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 400
|
|
|
|
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-08-07 14:24:16 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
#
|
|
|
|
# Network functions
|
|
|
|
#
|
2019-12-23 20:43:20 -05:00
|
|
|
def net_list(limit=None, is_fuzzy=True):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Return a list of client networks with limit LIMIT.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-23 20:43:20 -05:00
|
|
|
retflag, retdata = pvc_network.get_list(zk_conn, limit, is_fuzzy)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2019-12-23 20:43:20 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
2019-07-26 13:15:54 -04:00
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'Network not found.'
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-08 23:32:03 -05:00
|
|
|
def net_add(vni, description, nettype, domain, name_servers,
|
2019-07-05 21:39:04 -04:00
|
|
|
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
|
|
|
dhcp4_flag, dhcp4_start, dhcp4_end):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Add a virtual client network to the PVC cluster.
|
|
|
|
"""
|
2020-01-06 15:37:53 -05:00
|
|
|
if dhcp4_flag:
|
|
|
|
dhcp4_flag = bool(strtobool(dhcp4_flag))
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-08 23:32:03 -05:00
|
|
|
retflag, retdata = pvc_network.add_network(zk_conn, vni, description, nettype, domain, name_servers,
|
2020-11-07 13:07:07 -05:00
|
|
|
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
|
|
|
dhcp4_flag, dhcp4_start, dhcp4_end)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 21:39:04 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 21:39:04 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-08 23:32:03 -05:00
|
|
|
def net_modify(vni, description, domain, name_servers,
|
2019-07-26 09:41:17 -04:00
|
|
|
ip4_network, ip4_gateway,
|
|
|
|
ip6_network, ip6_gateway,
|
2019-07-05 21:39:04 -04:00
|
|
|
dhcp4_flag, dhcp4_start, dhcp4_end):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Modify a virtual client network in the PVC cluster.
|
|
|
|
"""
|
2020-01-30 09:38:02 -05:00
|
|
|
if dhcp4_flag is not None:
|
2020-01-06 15:37:53 -05:00
|
|
|
dhcp4_flag = bool(strtobool(dhcp4_flag))
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-08 23:32:03 -05:00
|
|
|
retflag, retdata = pvc_network.modify_network(zk_conn, vni, description, domain, name_servers,
|
2020-11-07 13:07:07 -05:00
|
|
|
ip4_network, ip4_gateway, ip6_network, ip6_gateway,
|
|
|
|
dhcp4_flag, dhcp4_start, dhcp4_end)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 21:39:04 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 21:39:04 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-26 09:41:17 -04:00
|
|
|
def net_remove(network):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Remove a virtual client network from the PVC cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_network.remove_network(zk_conn, network)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 21:39:04 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 21:39:04 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
def net_dhcp_list(network, limit=None, static=False):
|
|
|
|
"""
|
|
|
|
Return a list of DHCP leases in network NETWORK with limit LIMIT.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-05 14:11:01 -04:00
|
|
|
retflag, retdata = pvc_network.get_list_dhcp(zk_conn, network, limit, static)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
2019-07-26 13:15:54 -04:00
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'Lease not found.'
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
def net_dhcp_add(network, ipaddress, macaddress, hostname):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Add a static DHCP lease to a virtual client network.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_network.add_dhcp_reservation(zk_conn, network, ipaddress, macaddress, hostname)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 21:39:04 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 21:39:04 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
def net_dhcp_remove(network, macaddress):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Remove a static DHCP lease from a virtual client network.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_network.remove_dhcp_reservation(zk_conn, network, macaddress)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 21:39:04 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 21:39:04 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
def net_acl_list(network, limit=None, direction=None, is_fuzzy=True):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Return a list of network ACLs in network NETWORK with limit LIMIT.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-23 20:43:20 -05:00
|
|
|
retflag, retdata = pvc_network.get_list_acl(zk_conn, network, limit, direction, is_fuzzy=True)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
2019-07-26 13:15:54 -04:00
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'ACL not found.'
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
def net_acl_add(network, direction, description, rule, order):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Add an ACL to a virtual client network.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_network.add_acl(zk_conn, network, direction, description, rule, order)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 21:39:04 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 21:39:04 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
def net_acl_remove(network, description):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Remove an ACL from a virtual client network.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-23 20:43:20 -05:00
|
|
|
retflag, retdata = pvc_network.remove_acl(zk_conn, network, description)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 21:39:04 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 21:39:04 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 21:39:04 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
#
|
|
|
|
# Ceph functions
|
|
|
|
#
|
|
|
|
def ceph_status():
|
|
|
|
"""
|
|
|
|
Get the current Ceph cluster status.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-05 14:11:01 -04:00
|
|
|
retflag, retdata = pvc_ceph.get_status(zk_conn)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-06-06 22:49:47 -04:00
|
|
|
def ceph_util():
|
2019-07-08 10:56:33 -04:00
|
|
|
"""
|
|
|
|
Get the current Ceph cluster utilization.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-06-06 22:49:47 -04:00
|
|
|
retflag, retdata = pvc_ceph.get_util(zk_conn)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-08 10:56:33 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-08 10:56:33 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-08 10:56:33 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
def ceph_osd_list(limit=None):
|
|
|
|
"""
|
|
|
|
Get the list of OSDs in the Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-05 14:11:01 -04:00
|
|
|
retflag, retdata = pvc_ceph.get_list_osd(zk_conn, limit)
|
2019-07-26 13:15:54 -04:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2020-01-02 12:13:11 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
2019-07-26 13:15:54 -04:00
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'OSD not found.'
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-26 11:23:19 -04:00
|
|
|
def ceph_osd_state(osd):
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_ceph.get_list_osd(zk_conn, osd)
|
2019-07-26 13:15:54 -04:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2020-01-02 12:13:11 -05:00
|
|
|
|
2019-07-26 11:23:19 -04:00
|
|
|
if retflag:
|
2019-07-26 13:15:54 -04:00
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'OSD not found.'
|
|
|
|
}
|
2019-07-26 11:23:19 -04:00
|
|
|
else:
|
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-26 11:23:19 -04:00
|
|
|
|
2019-07-26 11:57:14 -04:00
|
|
|
in_state = retdata[0]['stats']['in']
|
|
|
|
up_state = retdata[0]['stats']['up']
|
2019-07-26 11:23:19 -04:00
|
|
|
|
2020-11-07 12:57:42 -05:00
|
|
|
return {"id": osd, "in": in_state, "up": up_state}, retcode
|
2019-07-26 11:23:19 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_osd_add(node, device, weight):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Add a Ceph OSD to the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.add_osd(zk_conn, node, device, weight)
|
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2020-01-02 12:13:11 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 22:14:45 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_osd_remove(osd_id):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Remove a Ceph OSD from the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.remove_osd(zk_conn, osd_id)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 22:14:45 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_osd_in(osd_id):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Set in a Ceph OSD in the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.in_osd(zk_conn, osd_id)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 22:14:45 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_osd_out(osd_id):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Set out a Ceph OSD in the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.out_osd(zk_conn, osd_id)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 22:14:45 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_osd_set(option):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Set options on a Ceph OSD in the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.set_osd(zk_conn, option)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 22:14:45 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_osd_unset(option):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Unset options on a Ceph OSD in the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.unset_osd(zk_conn, option)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 22:14:45 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
def ceph_pool_list(limit=None, is_fuzzy=True):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Get the list of RBD pools in the Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-23 20:43:20 -05:00
|
|
|
retflag, retdata = pvc_ceph.get_list_pool(zk_conn, limit, is_fuzzy)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2019-12-23 20:43:20 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
2019-07-26 13:15:54 -04:00
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'Pool not found.'
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-08-23 14:12:15 -04:00
|
|
|
def ceph_pool_add(name, pgs, replcfg):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Add a Ceph RBD pool to the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-08-23 14:12:15 -04:00
|
|
|
retflag, retdata = pvc_ceph.add_pool(zk_conn, name, pgs, replcfg)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 22:14:45 -04:00
|
|
|
|
|
|
|
output = {
|
2020-01-06 11:32:27 -05:00
|
|
|
'message': retdata
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_pool_remove(name):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Remove a Ceph RBD pool to the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.remove_pool(zk_conn, name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 22:14:45 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
def ceph_volume_list(pool=None, limit=None, is_fuzzy=True):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Get the list of RBD volumes in the Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-23 20:43:20 -05:00
|
|
|
retflag, retdata = pvc_ceph.get_list_volume(zk_conn, pool, limit, is_fuzzy)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2019-12-23 20:43:20 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
2019-07-26 13:15:54 -04:00
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'Volume not found.'
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_volume_add(pool, name, size):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Add a Ceph RBD volume to the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.add_volume(zk_conn, pool, name, size)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 22:14:45 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-10-10 14:37:35 -04:00
|
|
|
def ceph_volume_clone(pool, name, source_volume):
|
|
|
|
"""
|
|
|
|
Clone a Ceph RBD volume to a new volume on the PVC Ceph storage cluster.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_ceph.clone_volume(zk_conn, pool, source_volume, name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-10-10 14:37:35 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 400
|
|
|
|
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-10-10 14:37:35 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-26 14:24:22 -04:00
|
|
|
def ceph_volume_resize(pool, name, size):
|
|
|
|
"""
|
|
|
|
Resize an existing Ceph RBD volume in the PVC Ceph storage cluster.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_ceph.resize_volume(zk_conn, pool, name, size)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-26 14:24:22 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 400
|
|
|
|
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-26 14:24:22 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-26 14:24:22 -04:00
|
|
|
def ceph_volume_rename(pool, name, new_name):
|
|
|
|
"""
|
|
|
|
Rename a Ceph RBD volume in the PVC Ceph storage cluster.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_ceph.rename_volume(zk_conn, pool, name, new_name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-26 14:24:22 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 400
|
|
|
|
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-26 14:24:22 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_volume_remove(pool, name):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Remove a Ceph RBD volume to the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.remove_volume(zk_conn, pool, name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 22:14:45 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
Improve handling of large file uploads
By default, Werkzeug would require the entire file (be it an OVA or
image file) to be uploaded and saved to a temporary, fake file under
`/tmp`, before any further processing could occur. This blocked most of
the execution of these functions until the upload was completed.
This entirely defeated the purpose of what I was trying to do, which was
to save the uploads directly to the temporary blockdev in each case,
thus avoiding any sort of memory or (host) disk usage.
The solution is two-fold:
1. First, ensure that the `location='args'` value is set in
RequestParser; without this, the `files` portion would be parsed
during the argument parsing, which was the original source of this
blocking behaviour.
2. Instead of the convoluted request handling that was being done
originally here, instead entirely defer the parsing of the `files`
arguments until the point in the code where they are ready to be
saved. Then, using an override stream_factory that simply opens the
temporary blockdev, the upload can commence while being written
directly out to it, rather than using `/tmp` space.
This does alter the error handling slightly; it is impossible to check
if the argument was passed until this point in the code, so it may take
longer to fail if the API consumer does not specify a file as they
should. This is a minor trade-off and I would expect my API consumers to
be sane here.
2020-10-19 00:47:56 -04:00
|
|
|
def ceph_volume_upload(pool, volume, img_type):
|
2020-02-09 13:43:48 -05:00
|
|
|
"""
|
|
|
|
Upload a raw file via HTTP post to a PVC Ceph volume
|
|
|
|
"""
|
2020-02-09 19:43:07 -05:00
|
|
|
# Determine the image conversion options
|
|
|
|
if img_type not in ['raw', 'vmdk', 'qcow2', 'qed', 'vdi', 'vpc']:
|
|
|
|
output = {
|
2020-02-20 22:38:31 -05:00
|
|
|
"message": "Image type '{}' is not valid.".format(img_type)
|
2020-02-09 19:43:07 -05:00
|
|
|
}
|
|
|
|
retcode = 400
|
|
|
|
return output, retcode
|
|
|
|
|
|
|
|
# Get the size of the target block device
|
2020-02-09 13:43:48 -05:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2020-02-09 19:43:07 -05:00
|
|
|
retcode, retdata = pvc_ceph.get_list_volume(zk_conn, pool, volume, is_fuzzy=False)
|
2020-02-09 13:43:48 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2020-02-09 19:43:07 -05:00
|
|
|
# If there's no target, return failure
|
|
|
|
if not retcode or len(retdata) < 1:
|
2020-02-09 13:43:48 -05:00
|
|
|
output = {
|
2020-02-20 22:38:31 -05:00
|
|
|
"message": "Target volume '{}' does not exist in pool '{}'.".format(volume, pool)
|
2020-02-09 13:43:48 -05:00
|
|
|
}
|
|
|
|
retcode = 400
|
|
|
|
return output, retcode
|
2020-02-09 19:43:07 -05:00
|
|
|
dev_size = retdata[0]['stats']['size']
|
|
|
|
|
|
|
|
def cleanup_maps_and_volumes():
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
# Unmap the target blockdev
|
|
|
|
retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, volume)
|
|
|
|
# Unmap the temporary blockdev
|
|
|
|
retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, "{}_tmp".format(volume))
|
|
|
|
# Remove the temporary blockdev
|
|
|
|
retflag, retdata = pvc_ceph.remove_volume(zk_conn, pool, "{}_tmp".format(volume))
|
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
|
|
|
# Create a temporary block device to store non-raw images
|
|
|
|
if img_type == 'raw':
|
|
|
|
# Map the target blockdev
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, volume)
|
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
if not retflag:
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
|
|
|
retcode = 400
|
|
|
|
cleanup_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
dest_blockdev = retdata
|
2020-02-09 13:43:48 -05:00
|
|
|
|
2020-02-09 19:43:07 -05:00
|
|
|
# Save the data to the blockdev directly
|
|
|
|
try:
|
2020-11-07 13:26:12 -05:00
|
|
|
# This sets up a custom stream_factory that writes directly into the ova_blockdev,
|
|
|
|
# rather than the standard stream_factory which writes to a temporary file waiting
|
|
|
|
# on a save() call. This will break if the API ever uploaded multiple files, but
|
|
|
|
# this is an acceptable workaround.
|
|
|
|
def image_stream_factory(total_content_length, filename, content_type, content_length=None):
|
|
|
|
return open(dest_blockdev, 'wb')
|
|
|
|
parse_form_data(flask.request.environ, stream_factory=image_stream_factory)
|
2020-11-06 18:55:10 -05:00
|
|
|
except Exception:
|
2020-02-09 19:43:07 -05:00
|
|
|
output = {
|
2020-11-09 09:45:54 -05:00
|
|
|
'message': "Failed to upload or write image file to temporary volume."
|
2020-02-09 19:43:07 -05:00
|
|
|
}
|
|
|
|
retcode = 400
|
|
|
|
cleanup_maps_and_volumes()
|
|
|
|
return output, retcode
|
2020-02-09 13:43:48 -05:00
|
|
|
|
|
|
|
output = {
|
2020-02-09 19:43:07 -05:00
|
|
|
'message': "Wrote uploaded file to volume '{}' in pool '{}'.".format(volume, pool)
|
2020-02-09 13:43:48 -05:00
|
|
|
}
|
2020-02-09 19:43:07 -05:00
|
|
|
retcode = 200
|
|
|
|
cleanup_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
|
|
|
|
# Write the image directly to the blockdev
|
|
|
|
else:
|
|
|
|
# Create a temporary blockdev
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_ceph.add_volume(zk_conn, pool, "{}_tmp".format(volume), dev_size)
|
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
if not retflag:
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
|
|
|
retcode = 400
|
|
|
|
cleanup_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
|
|
|
|
# Map the temporary target blockdev
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, "{}_tmp".format(volume))
|
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
if not retflag:
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
|
|
|
retcode = 400
|
|
|
|
cleanup_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
temp_blockdev = retdata
|
|
|
|
|
|
|
|
# Map the target blockdev
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, volume)
|
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
if not retflag:
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
|
|
|
retcode = 400
|
|
|
|
cleanup_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
dest_blockdev = retdata
|
|
|
|
|
|
|
|
# Save the data to the temporary blockdev directly
|
|
|
|
try:
|
Improve handling of large file uploads
By default, Werkzeug would require the entire file (be it an OVA or
image file) to be uploaded and saved to a temporary, fake file under
`/tmp`, before any further processing could occur. This blocked most of
the execution of these functions until the upload was completed.
This entirely defeated the purpose of what I was trying to do, which was
to save the uploads directly to the temporary blockdev in each case,
thus avoiding any sort of memory or (host) disk usage.
The solution is two-fold:
1. First, ensure that the `location='args'` value is set in
RequestParser; without this, the `files` portion would be parsed
during the argument parsing, which was the original source of this
blocking behaviour.
2. Instead of the convoluted request handling that was being done
originally here, instead entirely defer the parsing of the `files`
arguments until the point in the code where they are ready to be
saved. Then, using an override stream_factory that simply opens the
temporary blockdev, the upload can commence while being written
directly out to it, rather than using `/tmp` space.
This does alter the error handling slightly; it is impossible to check
if the argument was passed until this point in the code, so it may take
longer to fail if the API consumer does not specify a file as they
should. This is a minor trade-off and I would expect my API consumers to
be sane here.
2020-10-19 00:47:56 -04:00
|
|
|
# This sets up a custom stream_factory that writes directly into the ova_blockdev,
|
|
|
|
# rather than the standard stream_factory which writes to a temporary file waiting
|
|
|
|
# on a save() call. This will break if the API ever uploaded multiple files, but
|
|
|
|
# this is an acceptable workaround.
|
2020-11-09 09:45:54 -05:00
|
|
|
def image_stream_factory(total_content_length, filename, content_type, content_length=None):
|
Improve handling of large file uploads
By default, Werkzeug would require the entire file (be it an OVA or
image file) to be uploaded and saved to a temporary, fake file under
`/tmp`, before any further processing could occur. This blocked most of
the execution of these functions until the upload was completed.
This entirely defeated the purpose of what I was trying to do, which was
to save the uploads directly to the temporary blockdev in each case,
thus avoiding any sort of memory or (host) disk usage.
The solution is two-fold:
1. First, ensure that the `location='args'` value is set in
RequestParser; without this, the `files` portion would be parsed
during the argument parsing, which was the original source of this
blocking behaviour.
2. Instead of the convoluted request handling that was being done
originally here, instead entirely defer the parsing of the `files`
arguments until the point in the code where they are ready to be
saved. Then, using an override stream_factory that simply opens the
temporary blockdev, the upload can commence while being written
directly out to it, rather than using `/tmp` space.
This does alter the error handling slightly; it is impossible to check
if the argument was passed until this point in the code, so it may take
longer to fail if the API consumer does not specify a file as they
should. This is a minor trade-off and I would expect my API consumers to
be sane here.
2020-10-19 00:47:56 -04:00
|
|
|
return open(temp_blockdev, 'wb')
|
2020-11-09 09:45:54 -05:00
|
|
|
parse_form_data(flask.request.environ, stream_factory=image_stream_factory)
|
2020-11-06 18:55:10 -05:00
|
|
|
except Exception:
|
2020-02-09 19:43:07 -05:00
|
|
|
output = {
|
Improve handling of large file uploads
By default, Werkzeug would require the entire file (be it an OVA or
image file) to be uploaded and saved to a temporary, fake file under
`/tmp`, before any further processing could occur. This blocked most of
the execution of these functions until the upload was completed.
This entirely defeated the purpose of what I was trying to do, which was
to save the uploads directly to the temporary blockdev in each case,
thus avoiding any sort of memory or (host) disk usage.
The solution is two-fold:
1. First, ensure that the `location='args'` value is set in
RequestParser; without this, the `files` portion would be parsed
during the argument parsing, which was the original source of this
blocking behaviour.
2. Instead of the convoluted request handling that was being done
originally here, instead entirely defer the parsing of the `files`
arguments until the point in the code where they are ready to be
saved. Then, using an override stream_factory that simply opens the
temporary blockdev, the upload can commence while being written
directly out to it, rather than using `/tmp` space.
This does alter the error handling slightly; it is impossible to check
if the argument was passed until this point in the code, so it may take
longer to fail if the API consumer does not specify a file as they
should. This is a minor trade-off and I would expect my API consumers to
be sane here.
2020-10-19 00:47:56 -04:00
|
|
|
'message': "Failed to upload or write image file to temporary volume."
|
2020-02-09 19:43:07 -05:00
|
|
|
}
|
|
|
|
retcode = 400
|
|
|
|
cleanup_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
|
|
|
|
# Convert from the temporary to destination format on the blockdevs
|
|
|
|
retcode, stdout, stderr = pvc_common.run_os_command(
|
|
|
|
'qemu-img convert -C -f {} -O raw {} {}'.format(img_type, temp_blockdev, dest_blockdev)
|
|
|
|
)
|
|
|
|
if retcode:
|
|
|
|
output = {
|
2020-02-20 22:38:31 -05:00
|
|
|
'message': "Failed to convert image format from '{}' to 'raw': {}".format(img_type, stderr)
|
2020-02-09 19:43:07 -05:00
|
|
|
}
|
|
|
|
retcode = 400
|
|
|
|
cleanup_maps_and_volumes()
|
|
|
|
return output, retcode
|
2020-02-09 13:43:48 -05:00
|
|
|
|
|
|
|
output = {
|
2020-02-09 19:43:07 -05:00
|
|
|
'message': "Converted and wrote uploaded file to volume '{}' in pool '{}'.".format(volume, pool)
|
2020-02-09 13:43:48 -05:00
|
|
|
}
|
2020-02-09 19:43:07 -05:00
|
|
|
retcode = 200
|
|
|
|
cleanup_maps_and_volumes()
|
2020-02-09 13:43:48 -05:00
|
|
|
return output, retcode
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
def ceph_volume_snapshot_list(pool=None, volume=None, limit=None, is_fuzzy=True):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Get the list of RBD volume snapshots in the Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-12-23 20:43:20 -05:00
|
|
|
retflag, retdata = pvc_ceph.get_list_snapshot(zk_conn, pool, volume, limit, is_fuzzy)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
2019-12-23 20:43:20 -05:00
|
|
|
|
2019-07-05 14:11:01 -04:00
|
|
|
if retflag:
|
2019-07-26 13:15:54 -04:00
|
|
|
if retdata:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 404
|
|
|
|
retdata = {
|
|
|
|
'message': 'Volume snapshot not found.'
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2020-01-05 15:14:11 -05:00
|
|
|
retdata = {
|
|
|
|
'message': retdata
|
|
|
|
}
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-12-23 20:43:20 -05:00
|
|
|
return retdata, retcode
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_volume_snapshot_add(pool, volume, name):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
|
|
|
Add a Ceph RBD volume snapshot to the PVC Ceph storage cluster.
|
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.add_snapshot(zk_conn, pool, volume, name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 14:11:01 -04:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-05 22:14:45 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-28 23:00:35 -04:00
|
|
|
def ceph_volume_snapshot_rename(pool, volume, name, new_name):
|
|
|
|
"""
|
|
|
|
Rename a Ceph RBD volume snapshot in the PVC Ceph storage cluster.
|
|
|
|
"""
|
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
|
|
|
retflag, retdata = pvc_ceph.rename_snapshot(zk_conn, pool, volume, name, new_name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-28 23:00:35 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
|
|
|
retcode = 400
|
|
|
|
|
|
|
|
output = {
|
|
|
|
'message': retdata.replace('\"', '\'')
|
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|
2019-07-28 23:00:35 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
def ceph_volume_snapshot_remove(pool, volume, name):
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
2019-07-05 22:14:45 -04:00
|
|
|
Remove a Ceph RBD volume snapshot from the PVC Ceph storage cluster.
|
2019-07-05 14:11:01 -04:00
|
|
|
"""
|
2019-07-06 02:35:37 -04:00
|
|
|
zk_conn = pvc_common.startZKConnection(config['coordinators'])
|
2019-07-26 13:15:54 -04:00
|
|
|
retflag, retdata = pvc_ceph.remove_snapshot(zk_conn, pool, volume, name)
|
2020-01-02 12:13:11 -05:00
|
|
|
pvc_common.stopZKConnection(zk_conn)
|
|
|
|
|
2019-07-05 22:14:45 -04:00
|
|
|
if retflag:
|
|
|
|
retcode = 200
|
|
|
|
else:
|
2019-07-26 11:23:19 -04:00
|
|
|
retcode = 400
|
2019-07-05 22:14:45 -04:00
|
|
|
|
|
|
|
output = {
|
2019-07-26 13:15:54 -04:00
|
|
|
'message': retdata.replace('\"', '\'')
|
2019-07-05 22:14:45 -04:00
|
|
|
}
|
2019-12-23 20:43:20 -05:00
|
|
|
return output, retcode
|