2020-02-15 02:09:28 -05:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
# ova.py - PVC OVA parser library
|
|
|
|
# Part of the Parallel Virtual Cluster (PVC) system
|
|
|
|
#
|
2021-03-25 17:01:55 -04:00
|
|
|
# Copyright (C) 2018-2021 Joshua M. Boniface <joshua@boniface.me>
|
2020-02-15 02:09:28 -05:00
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
2021-03-25 16:57:17 -04:00
|
|
|
# the Free Software Foundation, version 3.
|
2020-02-15 02:09:28 -05:00
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
import flask
|
|
|
|
import psycopg2
|
|
|
|
import psycopg2.extras
|
|
|
|
import re
|
|
|
|
import math
|
|
|
|
import tarfile
|
|
|
|
|
|
|
|
import lxml.etree
|
|
|
|
|
Improve handling of large file uploads
By default, Werkzeug would require the entire file (be it an OVA or
image file) to be uploaded and saved to a temporary, fake file under
`/tmp`, before any further processing could occur. This blocked most of
the execution of these functions until the upload was completed.
This entirely defeated the purpose of what I was trying to do, which was
to save the uploads directly to the temporary blockdev in each case,
thus avoiding any sort of memory or (host) disk usage.
The solution is two-fold:
1. First, ensure that the `location='args'` value is set in
RequestParser; without this, the `files` portion would be parsed
during the argument parsing, which was the original source of this
blocking behaviour.
2. Instead of the convoluted request handling that was being done
originally here, instead entirely defer the parsing of the `files`
arguments until the point in the code where they are ready to be
saved. Then, using an override stream_factory that simply opens the
temporary blockdev, the upload can commence while being written
directly out to it, rather than using `/tmp` space.
This does alter the error handling slightly; it is impossible to check
if the argument was passed until this point in the code, so it may take
longer to fail if the API consumer does not specify a file as they
should. This is a minor trade-off and I would expect my API consumers to
be sane here.
2020-10-19 00:47:56 -04:00
|
|
|
from werkzeug.formparser import parse_form_data
|
|
|
|
|
2021-05-28 23:33:36 -04:00
|
|
|
from pvcapid.Daemon import config
|
|
|
|
|
2021-05-29 00:22:06 -04:00
|
|
|
from daemon_lib.zkhandler import ZKConnection
|
|
|
|
|
2020-02-15 02:09:28 -05:00
|
|
|
import daemon_lib.common as pvc_common
|
|
|
|
import daemon_lib.ceph as pvc_ceph
|
|
|
|
|
2020-02-17 22:52:49 -05:00
|
|
|
import pvcapid.provisioner as provisioner
|
2020-02-15 02:09:28 -05:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-02-15 02:09:28 -05:00
|
|
|
#
|
2020-02-17 22:52:49 -05:00
|
|
|
# Common functions
|
2020-02-15 02:09:28 -05:00
|
|
|
#
|
2020-02-17 22:52:49 -05:00
|
|
|
|
|
|
|
# Database connections
|
|
|
|
def open_database(config):
|
|
|
|
conn = psycopg2.connect(
|
2021-11-06 03:02:43 -04:00
|
|
|
host=config["database_host"],
|
|
|
|
port=config["database_port"],
|
|
|
|
dbname=config["database_name"],
|
|
|
|
user=config["database_user"],
|
|
|
|
password=config["database_password"],
|
2020-02-17 22:52:49 -05:00
|
|
|
)
|
|
|
|
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
|
|
|
return conn, cur
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-02-17 22:52:49 -05:00
|
|
|
def close_database(conn, cur, failed=False):
|
|
|
|
if not failed:
|
|
|
|
conn.commit()
|
|
|
|
cur.close()
|
|
|
|
conn.close()
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-02-17 22:52:49 -05:00
|
|
|
#
|
|
|
|
# OVA functions
|
|
|
|
#
|
|
|
|
def list_ova(limit, is_fuzzy=True):
|
|
|
|
if limit:
|
|
|
|
if is_fuzzy:
|
|
|
|
# Handle fuzzy vs. non-fuzzy limits
|
2021-11-06 03:02:43 -04:00
|
|
|
if not re.match(r"\^.*", limit):
|
|
|
|
limit = "%" + limit
|
2020-02-17 22:52:49 -05:00
|
|
|
else:
|
|
|
|
limit = limit[1:]
|
2021-11-06 03:02:43 -04:00
|
|
|
if not re.match(r".*\$", limit):
|
|
|
|
limit = limit + "%"
|
2020-02-17 22:52:49 -05:00
|
|
|
else:
|
|
|
|
limit = limit[:-1]
|
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
query = "SELECT id, name FROM {} WHERE name LIKE %s;".format("ova")
|
|
|
|
args = (limit,)
|
2020-02-17 22:52:49 -05:00
|
|
|
else:
|
2021-11-06 03:02:43 -04:00
|
|
|
query = "SELECT id, name FROM {};".format("ova")
|
2020-02-17 22:52:49 -05:00
|
|
|
args = ()
|
|
|
|
|
|
|
|
conn, cur = open_database(config)
|
|
|
|
cur.execute(query, args)
|
|
|
|
data = cur.fetchall()
|
|
|
|
close_database(conn, cur)
|
|
|
|
|
|
|
|
ova_data = list()
|
|
|
|
|
|
|
|
for ova in data:
|
2021-11-06 03:02:43 -04:00
|
|
|
ova_id = ova.get("id")
|
|
|
|
ova_name = ova.get("name")
|
2020-02-17 22:52:49 -05:00
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
query = "SELECT pool, volume_name, volume_format, disk_id, disk_size_gb FROM {} WHERE ova = %s;".format(
|
|
|
|
"ova_volume"
|
|
|
|
)
|
2020-02-17 22:52:49 -05:00
|
|
|
args = (ova_id,)
|
|
|
|
conn, cur = open_database(config)
|
|
|
|
cur.execute(query, args)
|
|
|
|
volumes = cur.fetchall()
|
|
|
|
close_database(conn, cur)
|
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
ova_data.append({"id": ova_id, "name": ova_name, "volumes": volumes})
|
2020-02-17 22:52:49 -05:00
|
|
|
|
|
|
|
if ova_data:
|
|
|
|
return ova_data, 200
|
|
|
|
else:
|
2021-11-06 03:02:43 -04:00
|
|
|
return {"message": "No OVAs found."}, 404
|
2020-02-17 22:52:49 -05:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2021-05-29 00:22:06 -04:00
|
|
|
@ZKConnection(config)
|
|
|
|
def delete_ova(zkhandler, name):
|
2020-02-17 22:52:49 -05:00
|
|
|
ova_data, retcode = list_ova(name, is_fuzzy=False)
|
|
|
|
if retcode != 200:
|
2021-11-06 03:02:43 -04:00
|
|
|
retmsg = {"message": 'The OVA "{}" does not exist.'.format(name)}
|
2020-02-17 22:52:49 -05:00
|
|
|
retcode = 400
|
|
|
|
return retmsg, retcode
|
|
|
|
|
|
|
|
conn, cur = open_database(config)
|
2021-11-06 03:02:43 -04:00
|
|
|
ova_id = ova_data[0].get("id")
|
2020-02-17 22:52:49 -05:00
|
|
|
try:
|
|
|
|
# Get the list of volumes for this OVA
|
|
|
|
query = "SELECT pool, volume_name FROM ova_volume WHERE ova = %s;"
|
|
|
|
args = (ova_id,)
|
|
|
|
cur.execute(query, args)
|
|
|
|
volumes = cur.fetchall()
|
|
|
|
|
|
|
|
# Remove each volume for this OVA
|
|
|
|
for volume in volumes:
|
2021-11-06 03:02:43 -04:00
|
|
|
pvc_ceph.remove_volume(
|
|
|
|
zkhandler, volume.get("pool"), volume.get("volume_name")
|
|
|
|
)
|
2020-02-17 22:52:49 -05:00
|
|
|
|
|
|
|
# Delete the volume entries from the database
|
|
|
|
query = "DELETE FROM ova_volume WHERE ova = %s;"
|
|
|
|
args = (ova_id,)
|
|
|
|
cur.execute(query, args)
|
|
|
|
|
|
|
|
# Delete the profile entries from the database
|
|
|
|
query = "DELETE FROM profile WHERE ova = %s;"
|
|
|
|
args = (ova_id,)
|
|
|
|
cur.execute(query, args)
|
|
|
|
|
|
|
|
# Delete the system_template entries from the database
|
|
|
|
query = "DELETE FROM system_template WHERE ova = %s;"
|
|
|
|
args = (ova_id,)
|
|
|
|
cur.execute(query, args)
|
|
|
|
|
|
|
|
# Delete the OVA entry from the database
|
|
|
|
query = "DELETE FROM ova WHERE id = %s;"
|
|
|
|
args = (ova_id,)
|
|
|
|
cur.execute(query, args)
|
|
|
|
|
2020-11-07 12:57:42 -05:00
|
|
|
retmsg = {"message": 'Removed OVA image "{}".'.format(name)}
|
2020-02-17 22:52:49 -05:00
|
|
|
retcode = 200
|
|
|
|
except Exception as e:
|
2021-11-06 03:02:43 -04:00
|
|
|
retmsg = {"message": 'Failed to remove OVA "{}": {}'.format(name, e)}
|
2020-02-17 22:52:49 -05:00
|
|
|
retcode = 400
|
|
|
|
close_database(conn, cur)
|
|
|
|
return retmsg, retcode
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2021-05-29 00:22:06 -04:00
|
|
|
@ZKConnection(config)
|
|
|
|
def upload_ova(zkhandler, pool, name, ova_size):
|
2020-02-17 22:52:49 -05:00
|
|
|
ova_archive = None
|
2020-02-15 02:09:28 -05:00
|
|
|
|
|
|
|
# Cleanup function
|
|
|
|
def cleanup_ova_maps_and_volumes():
|
|
|
|
# Close the OVA archive
|
2020-02-17 22:52:49 -05:00
|
|
|
if ova_archive:
|
|
|
|
ova_archive.close()
|
2020-02-15 02:09:28 -05:00
|
|
|
# Unmap the OVA temporary blockdev
|
2021-05-29 00:22:06 -04:00
|
|
|
retflag, retdata = pvc_ceph.unmap_volume(zkhandler, pool, "ova_{}".format(name))
|
2020-02-15 02:09:28 -05:00
|
|
|
# Remove the OVA temporary blockdev
|
2021-11-06 03:02:43 -04:00
|
|
|
retflag, retdata = pvc_ceph.remove_volume(
|
|
|
|
zkhandler, pool, "ova_{}".format(name)
|
|
|
|
)
|
2020-02-15 02:09:28 -05:00
|
|
|
|
2020-05-05 15:45:46 -04:00
|
|
|
# Normalize the OVA size to bytes
|
2021-02-17 11:27:26 -05:00
|
|
|
ova_size_bytes = pvc_ceph.format_bytes_fromhuman(ova_size)
|
2021-11-06 03:02:43 -04:00
|
|
|
ova_size = "{}B".format(ova_size_bytes)
|
2020-02-15 02:09:28 -05:00
|
|
|
|
2020-02-17 22:52:49 -05:00
|
|
|
# Verify that the cluster has enough space to store the OVA volumes (2x OVA size, temporarily, 1x permanently)
|
2021-05-29 00:22:06 -04:00
|
|
|
pool_information = pvc_ceph.getPoolInformation(zkhandler, pool)
|
2021-11-06 03:02:43 -04:00
|
|
|
pool_free_space_bytes = int(pool_information["stats"]["free_bytes"])
|
2020-02-17 22:52:49 -05:00
|
|
|
if ova_size_bytes * 2 >= pool_free_space_bytes:
|
|
|
|
output = {
|
2021-11-06 03:02:43 -04:00
|
|
|
"message": "The cluster does not have enough free space ({}) to store the OVA volume ({}).".format(
|
2020-02-17 22:52:49 -05:00
|
|
|
pvc_ceph.format_bytes_tohuman(pool_free_space_bytes),
|
2021-11-06 03:02:43 -04:00
|
|
|
pvc_ceph.format_bytes_tohuman(ova_size_bytes),
|
2020-02-17 22:52:49 -05:00
|
|
|
)
|
|
|
|
}
|
|
|
|
retcode = 400
|
|
|
|
cleanup_ova_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
|
2020-02-15 02:09:28 -05:00
|
|
|
# Create a temporary OVA blockdev
|
2021-11-06 03:02:43 -04:00
|
|
|
retflag, retdata = pvc_ceph.add_volume(
|
|
|
|
zkhandler, pool, "ova_{}".format(name), ova_size
|
|
|
|
)
|
2020-02-15 02:09:28 -05:00
|
|
|
if not retflag:
|
2021-11-06 03:02:43 -04:00
|
|
|
output = {"message": retdata.replace('"', "'")}
|
2020-02-15 02:09:28 -05:00
|
|
|
retcode = 400
|
|
|
|
cleanup_ova_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
|
|
|
|
# Map the temporary OVA blockdev
|
2021-05-29 00:22:06 -04:00
|
|
|
retflag, retdata = pvc_ceph.map_volume(zkhandler, pool, "ova_{}".format(name))
|
2020-02-15 02:09:28 -05:00
|
|
|
if not retflag:
|
2021-11-06 03:02:43 -04:00
|
|
|
output = {"message": retdata.replace('"', "'")}
|
2020-02-15 02:09:28 -05:00
|
|
|
retcode = 400
|
|
|
|
cleanup_ova_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
ova_blockdev = retdata
|
|
|
|
|
|
|
|
# Save the OVA data to the temporary blockdev directly
|
|
|
|
try:
|
Improve handling of large file uploads
By default, Werkzeug would require the entire file (be it an OVA or
image file) to be uploaded and saved to a temporary, fake file under
`/tmp`, before any further processing could occur. This blocked most of
the execution of these functions until the upload was completed.
This entirely defeated the purpose of what I was trying to do, which was
to save the uploads directly to the temporary blockdev in each case,
thus avoiding any sort of memory or (host) disk usage.
The solution is two-fold:
1. First, ensure that the `location='args'` value is set in
RequestParser; without this, the `files` portion would be parsed
during the argument parsing, which was the original source of this
blocking behaviour.
2. Instead of the convoluted request handling that was being done
originally here, instead entirely defer the parsing of the `files`
arguments until the point in the code where they are ready to be
saved. Then, using an override stream_factory that simply opens the
temporary blockdev, the upload can commence while being written
directly out to it, rather than using `/tmp` space.
This does alter the error handling slightly; it is impossible to check
if the argument was passed until this point in the code, so it may take
longer to fail if the API consumer does not specify a file as they
should. This is a minor trade-off and I would expect my API consumers to
be sane here.
2020-10-19 00:47:56 -04:00
|
|
|
# This sets up a custom stream_factory that writes directly into the ova_blockdev,
|
|
|
|
# rather than the standard stream_factory which writes to a temporary file waiting
|
|
|
|
# on a save() call. This will break if the API ever uploaded multiple files, but
|
|
|
|
# this is an acceptable workaround.
|
2021-11-06 03:02:43 -04:00
|
|
|
def ova_stream_factory(
|
|
|
|
total_content_length, filename, content_type, content_length=None
|
|
|
|
):
|
|
|
|
return open(ova_blockdev, "wb")
|
|
|
|
|
Improve handling of large file uploads
By default, Werkzeug would require the entire file (be it an OVA or
image file) to be uploaded and saved to a temporary, fake file under
`/tmp`, before any further processing could occur. This blocked most of
the execution of these functions until the upload was completed.
This entirely defeated the purpose of what I was trying to do, which was
to save the uploads directly to the temporary blockdev in each case,
thus avoiding any sort of memory or (host) disk usage.
The solution is two-fold:
1. First, ensure that the `location='args'` value is set in
RequestParser; without this, the `files` portion would be parsed
during the argument parsing, which was the original source of this
blocking behaviour.
2. Instead of the convoluted request handling that was being done
originally here, instead entirely defer the parsing of the `files`
arguments until the point in the code where they are ready to be
saved. Then, using an override stream_factory that simply opens the
temporary blockdev, the upload can commence while being written
directly out to it, rather than using `/tmp` space.
This does alter the error handling slightly; it is impossible to check
if the argument was passed until this point in the code, so it may take
longer to fail if the API consumer does not specify a file as they
should. This is a minor trade-off and I would expect my API consumers to
be sane here.
2020-10-19 00:47:56 -04:00
|
|
|
parse_form_data(flask.request.environ, stream_factory=ova_stream_factory)
|
2020-11-06 18:55:10 -05:00
|
|
|
except Exception:
|
2021-11-06 03:02:43 -04:00
|
|
|
output = {"message": "Failed to upload or write OVA file to temporary volume."}
|
2020-02-15 02:09:28 -05:00
|
|
|
retcode = 400
|
|
|
|
cleanup_ova_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
|
|
|
|
try:
|
|
|
|
# Set up the TAR reader for the OVA temporary blockdev
|
|
|
|
ova_archive = tarfile.open(name=ova_blockdev)
|
|
|
|
# Determine the files in the OVA
|
|
|
|
members = ova_archive.getmembers()
|
|
|
|
except tarfile.TarError:
|
2021-11-06 03:02:43 -04:00
|
|
|
output = {"message": "The uploaded OVA file is not readable."}
|
2020-02-15 02:09:28 -05:00
|
|
|
retcode = 400
|
|
|
|
cleanup_ova_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
|
|
|
|
# Parse through the members list and extract the OVF file
|
2021-11-06 03:02:43 -04:00
|
|
|
for element in set(x for x in members if re.match(r".*\.ovf$", x.name)):
|
2020-02-15 02:09:28 -05:00
|
|
|
ovf_file = ova_archive.extractfile(element)
|
|
|
|
|
|
|
|
# Parse the OVF file to get our VM details
|
|
|
|
ovf_parser = OVFParser(ovf_file)
|
2020-02-17 22:52:49 -05:00
|
|
|
ovf_xml_raw = ovf_parser.getXML()
|
2020-02-15 02:09:28 -05:00
|
|
|
virtual_system = ovf_parser.getVirtualSystems()[0]
|
|
|
|
virtual_hardware = ovf_parser.getVirtualHardware(virtual_system)
|
|
|
|
disk_map = ovf_parser.getDiskMap(virtual_system)
|
|
|
|
|
|
|
|
# Close the OVF file
|
|
|
|
ovf_file.close()
|
|
|
|
|
|
|
|
# Create and upload each disk volume
|
|
|
|
for idx, disk in enumerate(disk_map):
|
2021-11-06 03:02:43 -04:00
|
|
|
disk_identifier = "sd{}".format(chr(ord("a") + idx))
|
2020-02-17 22:52:49 -05:00
|
|
|
volume = "ova_{}_{}".format(name, disk_identifier)
|
2021-11-06 03:02:43 -04:00
|
|
|
dev_src = disk.get("src")
|
2020-02-17 22:52:49 -05:00
|
|
|
dev_size_raw = ova_archive.getmember(dev_src).size
|
2021-11-06 03:02:43 -04:00
|
|
|
vm_volume_size = disk.get("capacity")
|
2020-02-15 02:09:28 -05:00
|
|
|
|
2020-05-05 15:45:46 -04:00
|
|
|
# Normalize the dev size to bytes
|
2021-11-06 03:02:43 -04:00
|
|
|
dev_size = "{}B".format(pvc_ceph.format_bytes_fromhuman(dev_size_raw))
|
2020-02-15 02:09:28 -05:00
|
|
|
|
2020-02-17 22:52:49 -05:00
|
|
|
def cleanup_img_maps():
|
2020-02-15 02:09:28 -05:00
|
|
|
# Unmap the temporary blockdev
|
2021-05-29 00:22:06 -04:00
|
|
|
retflag, retdata = pvc_ceph.unmap_volume(zkhandler, pool, volume)
|
2020-02-15 02:09:28 -05:00
|
|
|
|
2020-02-17 22:52:49 -05:00
|
|
|
# Create the blockdev
|
2021-05-29 00:22:06 -04:00
|
|
|
retflag, retdata = pvc_ceph.add_volume(zkhandler, pool, volume, dev_size)
|
2020-02-15 02:09:28 -05:00
|
|
|
if not retflag:
|
2021-11-06 03:02:43 -04:00
|
|
|
output = {"message": retdata.replace('"', "'")}
|
2020-02-15 02:09:28 -05:00
|
|
|
retcode = 400
|
2020-02-17 22:52:49 -05:00
|
|
|
cleanup_img_maps()
|
2020-02-15 02:09:28 -05:00
|
|
|
cleanup_ova_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
|
2020-02-17 22:52:49 -05:00
|
|
|
# Map the blockdev
|
2021-05-29 00:22:06 -04:00
|
|
|
retflag, retdata = pvc_ceph.map_volume(zkhandler, pool, volume)
|
2020-02-15 02:09:28 -05:00
|
|
|
if not retflag:
|
2021-11-06 03:02:43 -04:00
|
|
|
output = {"message": retdata.replace('"', "'")}
|
2020-02-15 02:09:28 -05:00
|
|
|
retcode = 400
|
2020-02-17 22:52:49 -05:00
|
|
|
cleanup_img_maps()
|
2020-02-15 02:09:28 -05:00
|
|
|
cleanup_ova_maps_and_volumes()
|
|
|
|
return output, retcode
|
2020-02-17 22:52:49 -05:00
|
|
|
temp_blockdev = retdata
|
2020-02-15 02:09:28 -05:00
|
|
|
|
|
|
|
try:
|
|
|
|
# Open (extract) the TAR archive file and seek to byte 0
|
2021-11-06 03:02:43 -04:00
|
|
|
vmdk_file = ova_archive.extractfile(disk.get("src"))
|
2020-02-15 02:09:28 -05:00
|
|
|
vmdk_file.seek(0)
|
|
|
|
# Open the temporary blockdev and seek to byte 0
|
2021-11-06 03:02:43 -04:00
|
|
|
blk_file = open(temp_blockdev, "wb")
|
2020-02-15 02:09:28 -05:00
|
|
|
blk_file.seek(0)
|
2020-11-10 15:32:43 -05:00
|
|
|
# Write the contents of vmdk_file into blk_file
|
|
|
|
blk_file.write(vmdk_file.read())
|
2020-02-15 02:09:28 -05:00
|
|
|
# Close blk_file (and flush the buffers)
|
|
|
|
blk_file.close()
|
|
|
|
# Close vmdk_file
|
|
|
|
vmdk_file.close()
|
|
|
|
# Perform an OS-level sync
|
2021-11-06 03:02:43 -04:00
|
|
|
pvc_common.run_os_command("sync")
|
2020-11-06 18:55:10 -05:00
|
|
|
except Exception:
|
2020-02-15 02:09:28 -05:00
|
|
|
output = {
|
2021-11-06 03:02:43 -04:00
|
|
|
"message": "Failed to write image file '{}' to temporary volume.".format(
|
|
|
|
disk.get("src")
|
|
|
|
)
|
2020-02-15 02:09:28 -05:00
|
|
|
}
|
|
|
|
retcode = 400
|
2020-02-17 22:52:49 -05:00
|
|
|
cleanup_img_maps()
|
2020-02-15 02:09:28 -05:00
|
|
|
cleanup_ova_maps_and_volumes()
|
|
|
|
return output, retcode
|
|
|
|
|
2020-02-17 22:52:49 -05:00
|
|
|
cleanup_img_maps()
|
|
|
|
|
|
|
|
cleanup_ova_maps_and_volumes()
|
|
|
|
|
|
|
|
# Prepare the database entries
|
|
|
|
query = "INSERT INTO ova (name, ovf) VALUES (%s, %s);"
|
|
|
|
args = (name, ovf_xml_raw)
|
|
|
|
conn, cur = open_database(config)
|
|
|
|
try:
|
|
|
|
cur.execute(query, args)
|
|
|
|
close_database(conn, cur)
|
|
|
|
except Exception as e:
|
2021-11-06 03:02:43 -04:00
|
|
|
output = {"message": 'Failed to create OVA entry "{}": {}'.format(name, e)}
|
2020-02-17 22:52:49 -05:00
|
|
|
retcode = 400
|
|
|
|
close_database(conn, cur)
|
|
|
|
return output, retcode
|
|
|
|
|
|
|
|
# Get the OVA database id
|
|
|
|
query = "SELECT id FROM ova WHERE name = %s;"
|
2021-11-06 03:02:43 -04:00
|
|
|
args = (name,)
|
2020-02-17 22:52:49 -05:00
|
|
|
conn, cur = open_database(config)
|
|
|
|
cur.execute(query, args)
|
2021-11-06 03:02:43 -04:00
|
|
|
ova_id = cur.fetchone()["id"]
|
2020-02-17 22:52:49 -05:00
|
|
|
close_database(conn, cur)
|
|
|
|
|
|
|
|
# Prepare disk entries in ova_volume
|
|
|
|
for idx, disk in enumerate(disk_map):
|
2021-11-06 03:02:43 -04:00
|
|
|
disk_identifier = "sd{}".format(chr(ord("a") + idx))
|
|
|
|
volume_type = disk.get("src").split(".")[-1]
|
2020-02-17 22:52:49 -05:00
|
|
|
volume = "ova_{}_{}".format(name, disk_identifier)
|
2021-11-06 03:02:43 -04:00
|
|
|
vm_volume_size = disk.get("capacity")
|
2020-02-17 22:52:49 -05:00
|
|
|
|
|
|
|
# The function always return XXXXB, so strip off the B and convert to an integer
|
2021-02-17 11:27:26 -05:00
|
|
|
vm_volume_size_bytes = pvc_ceph.format_bytes_fromhuman(vm_volume_size)
|
2020-02-17 22:52:49 -05:00
|
|
|
vm_volume_size_gb = math.ceil(vm_volume_size_bytes / 1024 / 1024 / 1024)
|
|
|
|
|
|
|
|
query = "INSERT INTO ova_volume (ova, pool, volume_name, volume_format, disk_id, disk_size_gb) VALUES (%s, %s, %s, %s, %s, %s);"
|
|
|
|
args = (ova_id, pool, volume, volume_type, disk_identifier, vm_volume_size_gb)
|
|
|
|
|
|
|
|
conn, cur = open_database(config)
|
|
|
|
try:
|
|
|
|
cur.execute(query, args)
|
|
|
|
close_database(conn, cur)
|
|
|
|
except Exception as e:
|
2020-02-15 02:09:28 -05:00
|
|
|
output = {
|
2021-11-06 03:02:43 -04:00
|
|
|
"message": 'Failed to create OVA volume entry "{}": {}'.format(
|
|
|
|
volume, e
|
|
|
|
)
|
2020-02-15 02:09:28 -05:00
|
|
|
}
|
|
|
|
retcode = 400
|
2020-02-17 22:52:49 -05:00
|
|
|
close_database(conn, cur)
|
2020-02-15 02:09:28 -05:00
|
|
|
return output, retcode
|
|
|
|
|
2020-02-17 22:52:49 -05:00
|
|
|
# Prepare a system_template for the OVA
|
2021-11-06 03:02:43 -04:00
|
|
|
vcpu_count = virtual_hardware.get("vcpus")
|
|
|
|
vram_mb = virtual_hardware.get("vram")
|
|
|
|
if virtual_hardware.get("graphics-controller") == 1:
|
2020-02-17 22:52:49 -05:00
|
|
|
vnc = True
|
|
|
|
serial = False
|
|
|
|
else:
|
|
|
|
vnc = False
|
|
|
|
serial = True
|
2021-11-06 03:02:43 -04:00
|
|
|
retdata, retcode = provisioner.create_template_system(
|
|
|
|
name, vcpu_count, vram_mb, serial, vnc, vnc_bind=None, ova=ova_id
|
|
|
|
)
|
2020-05-05 15:28:39 -04:00
|
|
|
if retcode != 200:
|
|
|
|
return retdata, retcode
|
2020-02-17 22:52:49 -05:00
|
|
|
system_template, retcode = provisioner.list_template_system(name, is_fuzzy=False)
|
2020-05-05 15:28:39 -04:00
|
|
|
if retcode != 200:
|
|
|
|
return retdata, retcode
|
2021-11-06 03:02:43 -04:00
|
|
|
system_template_name = system_template[0].get("name")
|
2020-02-17 22:52:49 -05:00
|
|
|
|
|
|
|
# Prepare a barebones profile for the OVA
|
2021-11-06 03:02:43 -04:00
|
|
|
retdata, retcode = provisioner.create_profile(
|
|
|
|
name,
|
|
|
|
"ova",
|
|
|
|
system_template_name,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
userdata=None,
|
|
|
|
script=None,
|
|
|
|
ova=name,
|
|
|
|
arguments=None,
|
|
|
|
)
|
2020-05-05 15:28:39 -04:00
|
|
|
if retcode != 200:
|
|
|
|
return retdata, retcode
|
2020-02-15 02:09:28 -05:00
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
output = {"message": "Imported OVA image '{}'.".format(name)}
|
2020-02-15 02:09:28 -05:00
|
|
|
retcode = 200
|
|
|
|
return output, retcode
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-02-15 02:09:28 -05:00
|
|
|
#
|
|
|
|
# OVF parser
|
|
|
|
#
|
|
|
|
class OVFParser(object):
|
2020-02-17 22:52:49 -05:00
|
|
|
RASD_TYPE = {
|
2020-11-07 14:52:27 -05:00
|
|
|
"1": "vmci",
|
|
|
|
"3": "vcpus",
|
|
|
|
"4": "vram",
|
|
|
|
"5": "ide-controller",
|
|
|
|
"6": "scsi-controller",
|
2020-02-17 22:52:49 -05:00
|
|
|
"10": "ethernet-adapter",
|
2021-07-27 16:32:32 -04:00
|
|
|
"14": "floppy",
|
2020-02-17 22:52:49 -05:00
|
|
|
"15": "cdrom",
|
|
|
|
"17": "disk",
|
|
|
|
"20": "other-storage-device",
|
|
|
|
"23": "usb-controller",
|
|
|
|
"24": "graphics-controller",
|
2021-11-06 03:02:43 -04:00
|
|
|
"35": "sound-controller",
|
2020-02-17 22:52:49 -05:00
|
|
|
}
|
|
|
|
|
2020-02-15 02:09:28 -05:00
|
|
|
def _getFilelist(self):
|
2020-02-17 22:52:49 -05:00
|
|
|
path = "{{{schema}}}References/{{{schema}}}File".format(schema=self.OVF_SCHEMA)
|
|
|
|
id_attr = "{{{schema}}}id".format(schema=self.OVF_SCHEMA)
|
|
|
|
href_attr = "{{{schema}}}href".format(schema=self.OVF_SCHEMA)
|
2020-11-06 19:44:14 -05:00
|
|
|
current_list = self.xml.findall(path)
|
2020-02-15 02:09:28 -05:00
|
|
|
results = [(x.get(id_attr), x.get(href_attr)) for x in current_list]
|
|
|
|
return results
|
|
|
|
|
|
|
|
def _getDisklist(self):
|
2020-02-17 22:52:49 -05:00
|
|
|
path = "{{{schema}}}DiskSection/{{{schema}}}Disk".format(schema=self.OVF_SCHEMA)
|
|
|
|
id_attr = "{{{schema}}}diskId".format(schema=self.OVF_SCHEMA)
|
|
|
|
ref_attr = "{{{schema}}}fileRef".format(schema=self.OVF_SCHEMA)
|
|
|
|
cap_attr = "{{{schema}}}capacity".format(schema=self.OVF_SCHEMA)
|
|
|
|
cap_units = "{{{schema}}}capacityAllocationUnits".format(schema=self.OVF_SCHEMA)
|
2020-11-06 19:44:14 -05:00
|
|
|
current_list = self.xml.findall(path)
|
2021-11-06 03:02:43 -04:00
|
|
|
results = [
|
|
|
|
(x.get(id_attr), x.get(ref_attr), x.get(cap_attr), x.get(cap_units))
|
|
|
|
for x in current_list
|
|
|
|
]
|
2020-02-15 02:09:28 -05:00
|
|
|
return results
|
|
|
|
|
|
|
|
def _getAttributes(self, virtual_system, path, attribute):
|
2020-11-06 19:44:14 -05:00
|
|
|
current_list = virtual_system.findall(path)
|
2020-02-15 02:09:28 -05:00
|
|
|
results = [x.get(attribute) for x in current_list]
|
|
|
|
return results
|
|
|
|
|
|
|
|
def __init__(self, ovf_file):
|
|
|
|
self.xml = lxml.etree.parse(ovf_file)
|
2020-02-17 22:52:49 -05:00
|
|
|
|
|
|
|
# Define our schemas
|
|
|
|
envelope_tag = self.xml.find(".")
|
2021-11-06 03:02:43 -04:00
|
|
|
self.XML_SCHEMA = envelope_tag.nsmap.get("xsi")
|
|
|
|
self.OVF_SCHEMA = envelope_tag.nsmap.get("ovf")
|
|
|
|
self.RASD_SCHEMA = envelope_tag.nsmap.get("rasd")
|
|
|
|
self.SASD_SCHEMA = envelope_tag.nsmap.get("sasd")
|
|
|
|
self.VSSD_SCHEMA = envelope_tag.nsmap.get("vssd")
|
2020-02-17 22:52:49 -05:00
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
self.ovf_version = int(self.OVF_SCHEMA.split("/")[-1])
|
2020-02-17 22:52:49 -05:00
|
|
|
|
|
|
|
# Get the file and disk lists
|
2020-02-15 02:09:28 -05:00
|
|
|
self.filelist = self._getFilelist()
|
|
|
|
self.disklist = self._getDisklist()
|
|
|
|
|
|
|
|
def getVirtualSystems(self):
|
2021-11-06 03:02:43 -04:00
|
|
|
return self.xml.findall(
|
|
|
|
"{{{schema}}}VirtualSystem".format(schema=self.OVF_SCHEMA)
|
|
|
|
)
|
2020-02-17 22:52:49 -05:00
|
|
|
|
|
|
|
def getXML(self):
|
2021-11-06 03:02:43 -04:00
|
|
|
return lxml.etree.tostring(self.xml, pretty_print=True).decode("utf8")
|
2020-02-15 02:09:28 -05:00
|
|
|
|
|
|
|
def getVirtualHardware(self, virtual_system):
|
|
|
|
hardware_list = virtual_system.findall(
|
2021-11-06 03:02:43 -04:00
|
|
|
"{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(
|
|
|
|
schema=self.OVF_SCHEMA
|
|
|
|
)
|
2020-02-15 02:09:28 -05:00
|
|
|
)
|
|
|
|
virtual_hardware = {}
|
|
|
|
|
|
|
|
for item in hardware_list:
|
|
|
|
try:
|
2021-11-06 03:02:43 -04:00
|
|
|
item_type = self.RASD_TYPE[
|
|
|
|
item.find(
|
|
|
|
"{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)
|
|
|
|
).text
|
|
|
|
]
|
2020-11-06 18:55:10 -05:00
|
|
|
except Exception:
|
2020-02-15 02:09:28 -05:00
|
|
|
continue
|
2021-11-06 03:02:43 -04:00
|
|
|
quantity = item.find(
|
|
|
|
"{{{rasd}}}VirtualQuantity".format(rasd=self.RASD_SCHEMA)
|
|
|
|
)
|
2020-02-15 02:09:28 -05:00
|
|
|
if quantity is None:
|
2020-02-17 22:52:49 -05:00
|
|
|
virtual_hardware[item_type] = 1
|
|
|
|
else:
|
|
|
|
virtual_hardware[item_type] = quantity.text
|
2020-02-15 02:09:28 -05:00
|
|
|
|
|
|
|
return virtual_hardware
|
|
|
|
|
|
|
|
def getDiskMap(self, virtual_system):
|
2020-02-17 22:52:49 -05:00
|
|
|
# OVF v2 uses the StorageItem field, while v1 uses the normal Item field
|
|
|
|
if self.ovf_version < 2:
|
|
|
|
hardware_list = virtual_system.findall(
|
2021-11-06 03:02:43 -04:00
|
|
|
"{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(
|
|
|
|
schema=self.OVF_SCHEMA
|
|
|
|
)
|
2020-02-17 22:52:49 -05:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
hardware_list = virtual_system.findall(
|
2021-11-06 03:02:43 -04:00
|
|
|
"{{{schema}}}VirtualHardwareSection/{{{schema}}}StorageItem".format(
|
|
|
|
schema=self.OVF_SCHEMA
|
|
|
|
)
|
2020-02-17 22:52:49 -05:00
|
|
|
)
|
2020-02-15 02:09:28 -05:00
|
|
|
disk_list = []
|
2020-11-06 19:05:48 -05:00
|
|
|
|
2020-02-15 02:09:28 -05:00
|
|
|
for item in hardware_list:
|
|
|
|
item_type = None
|
2020-02-17 22:52:49 -05:00
|
|
|
|
|
|
|
if self.SASD_SCHEMA is not None:
|
2021-11-06 03:02:43 -04:00
|
|
|
item_type = self.RASD_TYPE[
|
|
|
|
item.find(
|
|
|
|
"{{{sasd}}}ResourceType".format(sasd=self.SASD_SCHEMA)
|
|
|
|
).text
|
|
|
|
]
|
2020-02-17 22:52:49 -05:00
|
|
|
else:
|
2021-11-06 03:02:43 -04:00
|
|
|
item_type = self.RASD_TYPE[
|
|
|
|
item.find(
|
|
|
|
"{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)
|
|
|
|
).text
|
|
|
|
]
|
2020-02-15 02:09:28 -05:00
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
if item_type != "disk":
|
2020-02-15 02:09:28 -05:00
|
|
|
continue
|
|
|
|
|
|
|
|
hostref = None
|
2020-02-17 22:52:49 -05:00
|
|
|
if self.SASD_SCHEMA is not None:
|
2021-11-06 03:02:43 -04:00
|
|
|
hostref = item.find(
|
|
|
|
"{{{sasd}}}HostResource".format(sasd=self.SASD_SCHEMA)
|
|
|
|
)
|
2020-02-17 22:52:49 -05:00
|
|
|
else:
|
2021-11-06 03:02:43 -04:00
|
|
|
hostref = item.find(
|
|
|
|
"{{{rasd}}}HostResource".format(rasd=self.RASD_SCHEMA)
|
|
|
|
)
|
2020-02-15 02:09:28 -05:00
|
|
|
if hostref is None:
|
|
|
|
continue
|
|
|
|
disk_res = hostref.text
|
|
|
|
|
|
|
|
# Determine which file this disk_res ultimately represents
|
2021-11-06 03:02:43 -04:00
|
|
|
(disk_id, disk_ref, disk_capacity, disk_capacity_unit) = [
|
|
|
|
x for x in self.disklist if x[0] == disk_res.split("/")[-1]
|
|
|
|
][0]
|
2020-02-15 02:09:28 -05:00
|
|
|
(file_id, disk_src) = [x for x in self.filelist if x[0] == disk_ref][0]
|
|
|
|
|
2020-02-17 22:52:49 -05:00
|
|
|
if disk_capacity_unit is not None:
|
|
|
|
# Handle the unit conversion
|
|
|
|
base_unit, action, multiple = disk_capacity_unit.split()
|
2021-11-06 03:02:43 -04:00
|
|
|
multiple_base, multiple_exponent = multiple.split("^")
|
|
|
|
disk_capacity = int(disk_capacity) * (
|
|
|
|
int(multiple_base) ** int(multiple_exponent)
|
|
|
|
)
|
2020-02-17 22:52:49 -05:00
|
|
|
|
2020-02-15 02:09:28 -05:00
|
|
|
# Append the disk with all details to the list
|
2021-11-06 03:02:43 -04:00
|
|
|
disk_list.append(
|
|
|
|
{
|
|
|
|
"id": disk_id,
|
|
|
|
"ref": disk_ref,
|
|
|
|
"capacity": disk_capacity,
|
|
|
|
"src": disk_src,
|
|
|
|
}
|
|
|
|
)
|
2020-02-15 02:09:28 -05:00
|
|
|
|
|
|
|
return disk_list
|