Complete implementation of OVA handling

Add functions for uploading, listing, and removing OVA images to the API
and CLI interfaces. Includes improved parsing of the OVF and creation of
a system_template and profile for each OVA.

Also modifies some behaviour around profiles, making most components
option at creation to support both profile types (and incomplete
profiles generally).

Implementation part 2/3 - remaining: OVA VM creation

References #71
This commit is contained in:
Joshua Boniface 2020-02-17 22:52:49 -05:00
parent 7c99618752
commit db558ec91f
7 changed files with 1165 additions and 310 deletions

View File

@ -5193,6 +5193,228 @@ class API_Provisioner_Script_Element(Resource):
) )
api.add_resource(API_Provisioner_Script_Element, '/provisioner/script/<script>') api.add_resource(API_Provisioner_Script_Element, '/provisioner/script/<script>')
# /provisioner/profile
class API_Provisioner_OVA_Root(Resource):
@RequestParser([
{ 'name': 'limit' }
])
@Authenticator
def get(self, reqargs):
"""
Return a list of OVA sources
---
tags:
- provisioner
definitions:
- schema:
type: object
id: ova
properties:
id:
type: integer
description: Internal provisioner OVA ID
name:
type: string
description: OVA name
volumes:
type: list
items:
type: object
id: ova_volume
properties:
disk_id:
type: string
description: Disk identifier
disk_size_gb:
type: string
description: Disk size in GB
pool:
type: string
description: Pool containing the OVA volume
volume_name:
type: string
description: Storage volume containing the OVA image
volume_format:
type: string
description: OVA image format
parameters:
- in: query
name: limit
type: string
required: false
description: An OVA name search limit; fuzzy by default, use ^/$ to force exact matches
responses:
200:
description: OK
schema:
type: list
items:
$ref: '#/definitions/ova'
"""
return api_ova.list_ova(
reqargs.get('limit', None)
)
@RequestParser([
{ 'name': 'pool', 'required': True, 'helpmsg': "A storage pool must be specified" },
{ 'name': 'name', 'required': True, 'helpmsg': "A VM name must be specified" },
{ 'name': 'ova_size', 'required': True, 'helpmsg': "An OVA size must be specified" },
])
@Authenticator
def post(self, reqargs):
"""
Upload an OVA image to the cluster
The API client is responsible for determining and setting the ova_size value, as this value cannot be determined dynamically before the upload proceeds.
---
tags:
- provisioner
parameters:
- in: query
name: pool
type: string
required: true
description: Storage pool name
- in: query
name: name
type: string
required: true
description: OVA name on the cluster (usually identical to the OVA file name)
- in: query
name: ova_size
type: string
required: true
description: Size of the OVA file in bytes
responses:
200:
description: OK
schema:
type: object
id: Message
400:
description: Bad request
schema:
type: object
id: Message
"""
from flask_restful import reqparse
from werkzeug.datastructures import FileStorage
parser = reqparse.RequestParser()
parser.add_argument('file', type=FileStorage, location='files')
data = parser.parse_args()
ova_data = data.get('file', None)
if not ova_data:
return {'message':'An OVA file contents must be specified'}, 400
return api_ova.upload_ova(
ova_data,
reqargs.get('pool', None),
reqargs.get('name', None),
reqargs.get('ova_size', None),
)
api.add_resource(API_Provisioner_OVA_Root, '/provisioner/ova')
# /provisioner/ova/<ova>
class API_Provisioner_OVA_Element(Resource):
@Authenticator
def get(self, ova):
"""
Return information about OVA image {ova}
---
tags:
- provisioner
responses:
200:
description: OK
schema:
$ref: '#/definitions/ova'
404:
description: Not found
schema:
type: object
id: Message
"""
return api_ova.list_ova(
ova,
is_fuzzy=False
)
@RequestParser([
{ 'name': 'pool', 'required': True, 'helpmsg': "A storage pool must be specified" },
{ 'name': 'ova_size', 'required': True, 'helpmsg': "An OVA size must be specified" },
])
@Authenticator
def post(self, ova, reqargs):
"""
Upload an OVA image to the cluster
The API client is responsible for determining and setting the ova_size value, as this value cannot be determined dynamically before the upload proceeds.
---
tags:
- provisioner
parameters:
- in: query
name: pool
type: string
required: true
description: Storage pool name
- in: query
name: ova_size
type: string
required: true
description: Size of the OVA file in bytes
responses:
200:
description: OK
schema:
type: object
id: Message
400:
description: Bad request
schema:
type: object
id: Message
"""
from flask_restful import reqparse
from werkzeug.datastructures import FileStorage
parser = reqparse.RequestParser()
parser.add_argument('file', type=FileStorage, location='files')
data = parser.parse_args()
ova_data = data.get('file', None)
if not ova_data:
return {'message':'An OVA file contents must be specified'}, 400
return api_ova.upload_ova(
ova_data,
reqargs.get('pool', None),
ova,
reqargs.get('ova_size', None),
)
@Authenticator
def delete(self, ova):
"""
Remove ova {ova}
---
tags:
- provisioner
responses:
200:
description: OK
schema:
type: object
id: Message
404:
description: Not found
schema:
type: object
id: Message
"""
return api_ova.delete_ova(
ova
)
api.add_resource(API_Provisioner_OVA_Element, '/provisioner/ova/<ova>')
# /provisioner/profile # /provisioner/profile
class API_Provisioner_Profile_Root(Resource): class API_Provisioner_Profile_Root(Resource):
@RequestParser([ @RequestParser([
@ -5256,11 +5478,13 @@ class API_Provisioner_Profile_Root(Resource):
@RequestParser([ @RequestParser([
{ 'name': 'name', 'required': True, 'helpmsg': "A profile name must be specified" }, { 'name': 'name', 'required': True, 'helpmsg': "A profile name must be specified" },
{ 'name': 'system_template', 'required': True, 'helpmsg': "A system template name must be specified" }, { 'name': 'profile_type', 'required': True, 'helpmsg': "A profile type must be specified" },
{ 'name': 'network_template', 'required': True, 'helpmsg': "A network template name must be specified" }, { 'name': 'system_template' },
{ 'name': 'storage_template', 'required': True, 'helpmsg': "A storage template name must be specified" }, { 'name': 'network_template' },
{ 'name': 'userdata', 'required': True, 'helpmsg': "A userdata document name must be specified (use 'empty' if no template is desired)" }, { 'name': 'storage_template' },
{ 'name': 'script', 'required': True, 'helpmsg': "A system name must be specified" }, { 'name': 'userdata' },
{ 'name': 'script' },
{ 'name': 'ova' },
{ 'name': 'arg', 'action': 'append' } { 'name': 'arg', 'action': 'append' }
]) ])
@Authenticator @Authenticator
@ -5277,30 +5501,43 @@ class API_Provisioner_Profile_Root(Resource):
required: true required: true
description: Profile name description: Profile name
- in: query - in: query
name: script name: profile_type
type: string type: string
required: true required: true
description: Profile type
enum:
- provisioner
- ova
- in: query
name: script
type: string
required: false
description: Script name description: Script name
- in: query - in: query
name: system_template name: system_template
type: string type: string
required: true required: false
description: System template name description: System template name
- in: query - in: query
name: network_template name: network_template
type: string type: string
required: true required: false
description: Network template name description: Network template name
- in: query - in: query
name: storage_template name: storage_template
type: string type: string
required: true required: false
description: Storage template name description: Storage template name
- in: query - in: query
name: userdata name: userdata
type: string type: string
required: true required: false
description: Userdata template name description: Userdata template name
- in: query
name: ova
type: string
required: false
description: OVA image source
- in: query - in: query
name: arg name: arg
type: string type: string
@ -5319,11 +5556,13 @@ class API_Provisioner_Profile_Root(Resource):
""" """
return api_provisioner.create_profile( return api_provisioner.create_profile(
reqargs.get('name', None), reqargs.get('name', None),
reqargs.get('profile_type', None),
reqargs.get('system_template', None), reqargs.get('system_template', None),
reqargs.get('network_template', None), reqargs.get('network_template', None),
reqargs.get('storage_template', None), reqargs.get('storage_template', None),
reqargs.get('userdata', None), reqargs.get('userdata', None),
reqargs.get('script', None), reqargs.get('script', None),
reqargs.get('ova', None),
reqargs.get('arg', []) reqargs.get('arg', [])
) )
api.add_resource(API_Provisioner_Profile_Root, '/provisioner/profile') api.add_resource(API_Provisioner_Profile_Root, '/provisioner/profile')
@ -5354,11 +5593,13 @@ class API_Provisioner_Profile_Element(Resource):
) )
@RequestParser([ @RequestParser([
{ 'name': 'system_template', 'required': True, 'helpmsg': "A system template name must be specified" }, { 'name': 'profile_type', 'required': True, 'helpmsg': "A profile type must be specified" },
{ 'name': 'network_template', 'required': True, 'helpmsg': "A network template name must be specified" }, { 'name': 'system_template' },
{ 'name': 'storage_template', 'required': True, 'helpmsg': "A storage template name must be specified" }, { 'name': 'network_template' },
{ 'name': 'userdata', 'required': True, 'helpmsg': "A userdata document name must be specified (use 'empty' if no template is desired)" }, { 'name': 'storage_template' },
{ 'name': 'script', 'required': True, 'helpmsg': "A system name must be specified" }, { 'name': 'userdata' },
{ 'name': 'script' },
{ 'name': 'ova' },
{ 'name': 'arg', 'action': 'append' } { 'name': 'arg', 'action': 'append' }
]) ])
@Authenticator @Authenticator
@ -5369,6 +5610,14 @@ class API_Provisioner_Profile_Element(Resource):
tags: tags:
- provisioner - provisioner
parameters: parameters:
- in: query
name: profile_type
type: string
required: true
description: Profile type
enum:
- provisioner
- ova
- in: query - in: query
name: script name: script
type: string type: string
@ -5394,6 +5643,11 @@ class API_Provisioner_Profile_Element(Resource):
type: string type: string
required: true required: true
description: Userdata template name description: Userdata template name
- in: query
name: ova
type: string
required: false
description: OVA image source
- in: query - in: query
name: arg name: arg
type: string type: string
@ -5412,11 +5666,13 @@ class API_Provisioner_Profile_Element(Resource):
""" """
return api_provisioner.create_profile( return api_provisioner.create_profile(
profile, profile,
reqargs.get('profile_type', None),
reqargs.get('system_template', None), reqargs.get('system_template', None),
reqargs.get('network_template', None), reqargs.get('network_template', None),
reqargs.get('storage_template', None), reqargs.get('storage_template', None),
reqargs.get('userdata', None), reqargs.get('userdata', None),
reqargs.get('script', None), reqargs.get('script', None),
reqargs.get('ova', None),
reqargs.get('arg', []) reqargs.get('arg', [])
) )
@ -5479,12 +5735,14 @@ class API_Provisioner_Profile_Element(Resource):
""" """
return api_provisioner.modify_profile( return api_provisioner.modify_profile(
profile, profile,
None, # Can't modify the profile type
reqargs.get('system_template', None), reqargs.get('system_template', None),
reqargs.get('network_template', None), reqargs.get('network_template', None),
reqargs.get('storage_template', None), reqargs.get('storage_template', None),
reqargs.get('userdata', None), reqargs.get('userdata', None),
reqargs.get('script', None), reqargs.get('script', None),
reqargs.get('arg', []) None, # Can't modify the OVA
reqargs.get('arg', []),
) )
@Authenticator @Authenticator
@ -5587,98 +5845,6 @@ class API_Provisioner_Create_Root(Resource):
return { "task_id": task.id }, 202, { 'Location': Api.url_for(api, API_Provisioner_Status_Element, task_id=task.id) } return { "task_id": task.id }, 202, { 'Location': Api.url_for(api, API_Provisioner_Status_Element, task_id=task.id) }
api.add_resource(API_Provisioner_Create_Root, '/provisioner/create') api.add_resource(API_Provisioner_Create_Root, '/provisioner/create')
# /provisioner/upload
class API_Provisioner_Upload(Resource):
@RequestParser([
{ 'name': 'ova_size', 'required': True, 'helpmsg': "An OVA size must be specified" },
{ 'name': 'pool', 'required': True, 'helpmsg': "A storage pool must be specified" },
{ 'name': 'name', 'required': True, 'helpmsg': "A VM name must be specified" },
{ 'name': 'define_vm' },
{ 'name': 'start_vm' }
])
@Authenticator
def post(self, reqargs):
"""
Upload an OVA image to a new virtual machine
The API client is responsible for determining and setting the ova_size value, as this value cannot be determined dynamically before the upload proceeds.
Even if define_vm is false, the name will be used to name the resulting disk volumes as it would with a normally-provisioned VM.
The resulting VM, even if defined, will not have an attached provisioner profile.
---
tags:
- provisioner
parameters:
- in: query
name: ova_size
type: string
required: true
description: Size of the OVA file in bytes
- in: query
name: pool
type: string
required: true
description: Storage pool name
- in: query
name: name
type: string
required: true
description: Virtual machine name
- in: query
name: define_vm
type: boolean
required: false
description: Whether to define the VM on the cluster during provisioning
- in: query
name: start_vm
type: boolean
required: false
description: Whether to start the VM after provisioning
responses:
200:
description: OK
schema:
type: object
properties:
task_id:
type: string
description: Task ID for the provisioner Celery worker
400:
description: Bad request
schema:
type: object
id: Message
"""
from flask_restful import reqparse
from werkzeug.datastructures import FileStorage
parser = reqparse.RequestParser()
parser.add_argument('file', type=FileStorage, location='files')
data = parser.parse_args()
ova_data = data.get('file', None)
if not ova_data:
return {'message':'An OVA file contents must be specified'}, 400
if bool(strtobool(reqargs.get('define_vm', 'true'))):
define_vm = True
else:
define_vm = False
if bool(strtobool(reqargs.get('start_vm', 'true'))):
start_vm = True
else:
start_vm = False
return api_ova.upload_ova(
ova_data,
reqargs.get('ova_size', None),
reqargs.get('pool', None),
reqargs.get('name', None),
define_vm,
start_vm
)
api.add_resource(API_Provisioner_Upload, '/provisioner/upload')
# /provisioner/status # /provisioner/status
class API_Provisioner_Status_Root(Resource): class API_Provisioner_Status_Root(Resource):
@Authenticator @Authenticator

View File

@ -0,0 +1,46 @@
#
# TEMP
#
def tempstuff():
# Verify that the cluster has enough space to store all OVA disk volumes
total_size_bytes = 0
for disk in disk_map:
# Normalize the dev size to MB
# The function always return XXXXB, so strip off the B and convert to an integer
dev_size_bytes = int(pvc_ceph.format_bytes_fromhuman(disk.get('capacity', 0))[:-1])
ova_size_bytes = int(pvc_ceph.format_bytes_fromhuman(ova_size)[:-1])
# Get the actual image size
total_size_bytes += dev_size_bytes
# Add on the OVA size to account for the VMDK
total_size_bytes += ova_size_bytes
zk_conn = pvc_common.startZKConnection(config['coordinators'])
pool_information = pvc_ceph.getPoolInformation(zk_conn, pool)
pvc_common.stopZKConnection(zk_conn)
pool_free_space_bytes = int(pool_information['stats']['free_bytes'])
if total_size_bytes >= pool_free_space_bytes:
output = {
'message': "ERROR: The cluster does not have enough free space ({}) to store the VM ({}).".format(
pvc_ceph.format_bytes_tohuman(pool_free_space_bytes),
pvc_ceph.format_bytes_tohuman(total_size_bytes)
)
}
retcode = 400
cleanup_ova_maps_and_volumes()
return output, retcode
# Convert from the temporary to destination format on the blockdevs
retcode, stdout, stderr = pvc_common.run_os_command(
'qemu-img convert -C -f {} -O raw {} {}'.format(img_type, temp_blockdev, dest_blockdev)
)
if retcode:
output = {
'message': "ERROR: Failed to convert image '{}' format from '{}' to 'raw': {}".format(disk.get('src'), img_type, stderr)
}
retcode = 400
cleanup_img_maps_and_volumes()
cleanup_ova_maps_and_volumes()
return output, retcode

View File

@ -42,47 +42,167 @@ import daemon_lib.network as pvc_network
import daemon_lib.ceph as pvc_ceph import daemon_lib.ceph as pvc_ceph
import pvcapid.libvirt_schema as libvirt_schema import pvcapid.libvirt_schema as libvirt_schema
import pvcapid.provisioner as provisioner
# #
# OVA upload function # Common functions
# #
def upload_ova(ova_data, ova_size, pool, name, define_vm, start_vm):
# Upload flow is as follows: # Database connections
# 1. Create temporary volume of ova_size def open_database(config):
# 2. Map the temporary volume for reading conn = psycopg2.connect(
# 3. Write OVA upload file to temporary volume host=config['database_host'],
# 4. Read tar from temporary volume, extract OVF port=config['database_port'],
# 5. Parse OVF, obtain disk list and VM details dbname=config['database_name'],
# 6. Extract and "upload" via API each disk image to Ceph user=config['database_user'],
# 7. Unmap and remove the temporary volume password=config['database_password']
# 8. Define VM (if applicable) )
# 9. Start VM (if applicable) cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
########################################################### return conn, cur
def close_database(conn, cur, failed=False):
if not failed:
conn.commit()
cur.close()
conn.close()
#
# OVA functions
#
def list_ova(limit, is_fuzzy=True):
if limit:
if is_fuzzy:
# Handle fuzzy vs. non-fuzzy limits
if not re.match('\^.*', limit):
limit = '%' + limit
else:
limit = limit[1:]
if not re.match('.*\$', limit):
limit = limit + '%'
else:
limit = limit[:-1]
query = "SELECT id, name FROM {} WHERE name LIKE %s;".format('ova')
args = (limit, )
else:
query = "SELECT id, name FROM {};".format('ova')
args = ()
conn, cur = open_database(config)
cur.execute(query, args)
data = cur.fetchall()
close_database(conn, cur)
ova_data = list()
for ova in data:
ova_id = ova.get('id')
ova_name = ova.get('name')
query = "SELECT pool, volume_name, volume_format, disk_id, disk_size_gb FROM {} WHERE ova = %s;".format('ova_volume')
args = (ova_id,)
conn, cur = open_database(config)
cur.execute(query, args)
volumes = cur.fetchall()
close_database(conn, cur)
ova_data.append({'id': ova_id, 'name': ova_name, 'volumes': volumes})
if ova_data:
return ova_data, 200
else:
return {'message': 'No OVAs found'}, 404
def delete_ova(name):
ova_data, retcode = list_ova(name, is_fuzzy=False)
if retcode != 200:
retmsg = { 'message': 'The OVA "{}" does not exist'.format(name) }
retcode = 400
return retmsg, retcode
conn, cur = open_database(config)
ova_id = ova_data[0].get('id')
try:
# Get the list of volumes for this OVA
query = "SELECT pool, volume_name FROM ova_volume WHERE ova = %s;"
args = (ova_id,)
cur.execute(query, args)
volumes = cur.fetchall()
# Remove each volume for this OVA
zk_conn = pvc_common.startZKConnection(config['coordinators'])
for volume in volumes:
pvc_ceph.remove_volume(zk_conn, volume.get('pool'), volume.get('volume_name'))
# Delete the volume entries from the database
query = "DELETE FROM ova_volume WHERE ova = %s;"
args = (ova_id,)
cur.execute(query, args)
# Delete the profile entries from the database
query = "DELETE FROM profile WHERE ova = %s;"
args = (ova_id,)
cur.execute(query, args)
# Delete the system_template entries from the database
query = "DELETE FROM system_template WHERE ova = %s;"
args = (ova_id,)
cur.execute(query, args)
# Delete the OVA entry from the database
query = "DELETE FROM ova WHERE id = %s;"
args = (ova_id,)
cur.execute(query, args)
retmsg = { "message": 'Removed OVA image "{}"'.format(name) }
retcode = 200
except Exception as e:
retmsg = { 'message': 'Failed to remove OVA "{}": {}'.format(name, e) }
retcode = 400
close_database(conn, cur)
return retmsg, retcode
def upload_ova(ova_data, pool, name, ova_size):
ova_archive = None
# Cleanup function # Cleanup function
def cleanup_ova_maps_and_volumes(): def cleanup_ova_maps_and_volumes():
# Close the OVA archive # Close the OVA archive
if ova_archive:
ova_archive.close() ova_archive.close()
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
# Unmap the OVA temporary blockdev # Unmap the OVA temporary blockdev
retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, "{}_ova".format(name)) retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, "ova_{}".format(name))
# Remove the OVA temporary blockdev # Remove the OVA temporary blockdev
retflag, retdata = pvc_ceph.remove_volume(zk_conn, pool, "{}_ova".format(name)) retflag, retdata = pvc_ceph.remove_volume(zk_conn, pool, "ova_{}".format(name))
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
# Normalize the OVA size to MB # Normalize the OVA size to MB
print("Normalize the OVA size to MB")
# The function always return XXXXB, so strip off the B and convert to an integer # The function always return XXXXB, so strip off the B and convert to an integer
ova_size_bytes = int(pvc_ceph.format_bytes_fromhuman(ova_size)[:-1]) ova_size_bytes = int(pvc_ceph.format_bytes_fromhuman(ova_size)[:-1])
# Put the size into KB which rbd --size can understand # Put the size into KB which rbd --size can understand
ova_size_kb = math.ceil(ova_size_bytes / 1024) ova_size_kb = math.ceil(ova_size_bytes / 1024)
ova_size = "{}K".format(ova_size_kb) ova_size = "{}K".format(ova_size_kb)
# Create a temporary OVA blockdev # Verify that the cluster has enough space to store the OVA volumes (2x OVA size, temporarily, 1x permanently)
print("Create a temporary OVA blockdev")
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
print(ova_size) pool_information = pvc_ceph.getPoolInformation(zk_conn, pool)
retflag, retdata = pvc_ceph.add_volume(zk_conn, pool, "{}_ova".format(name), ova_size) pvc_common.stopZKConnection(zk_conn)
pool_free_space_bytes = int(pool_information['stats']['free_bytes'])
if ova_size_bytes * 2 >= pool_free_space_bytes:
output = {
'message': "ERROR: The cluster does not have enough free space ({}) to store the OVA volume ({}).".format(
pvc_ceph.format_bytes_tohuman(pool_free_space_bytes),
pvc_ceph.format_bytes_tohuman(ova_size_bytes)
)
}
retcode = 400
cleanup_ova_maps_and_volumes()
return output, retcode
# Create a temporary OVA blockdev
zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_ceph.add_volume(zk_conn, pool, "ova_{}".format(name), ova_size)
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
if not retflag: if not retflag:
output = { output = {
@ -93,9 +213,8 @@ def upload_ova(ova_data, ova_size, pool, name, define_vm, start_vm):
return output, retcode return output, retcode
# Map the temporary OVA blockdev # Map the temporary OVA blockdev
print("Map the temporary OVA blockdev")
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, "{}_ova".format(name)) retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, "ova_{}".format(name))
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
if not retflag: if not retflag:
output = { output = {
@ -107,7 +226,6 @@ def upload_ova(ova_data, ova_size, pool, name, define_vm, start_vm):
ova_blockdev = retdata ova_blockdev = retdata
# Save the OVA data to the temporary blockdev directly # Save the OVA data to the temporary blockdev directly
print("Save the OVA data to the temporary blockdev directly")
try: try:
ova_data.save(ova_blockdev) ova_data.save(ova_blockdev)
except: except:
@ -120,10 +238,8 @@ def upload_ova(ova_data, ova_size, pool, name, define_vm, start_vm):
try: try:
# Set up the TAR reader for the OVA temporary blockdev # Set up the TAR reader for the OVA temporary blockdev
print("Set up the TAR reader for the OVA temporary blockdev")
ova_archive = tarfile.open(name=ova_blockdev) ova_archive = tarfile.open(name=ova_blockdev)
# Determine the files in the OVA # Determine the files in the OVA
print("Determine the files in the OVA")
members = ova_archive.getmembers() members = ova_archive.getmembers()
except tarfile.TarError: except tarfile.TarError:
output = { output = {
@ -134,109 +250,54 @@ def upload_ova(ova_data, ova_size, pool, name, define_vm, start_vm):
return output, retcode return output, retcode
# Parse through the members list and extract the OVF file # Parse through the members list and extract the OVF file
print("Parse through the members list and extract the OVF file")
for element in set(x for x in members if re.match('.*\.ovf$', x.name)): for element in set(x for x in members if re.match('.*\.ovf$', x.name)):
ovf_file = ova_archive.extractfile(element) ovf_file = ova_archive.extractfile(element)
print(ovf_file)
# Parse the OVF file to get our VM details # Parse the OVF file to get our VM details
print("Parse the OVF file to get our VM details")
ovf_parser = OVFParser(ovf_file) ovf_parser = OVFParser(ovf_file)
ovf_xml_raw = ovf_parser.getXML()
virtual_system = ovf_parser.getVirtualSystems()[0] virtual_system = ovf_parser.getVirtualSystems()[0]
virtual_hardware = ovf_parser.getVirtualHardware(virtual_system) virtual_hardware = ovf_parser.getVirtualHardware(virtual_system)
disk_map = ovf_parser.getDiskMap(virtual_system) disk_map = ovf_parser.getDiskMap(virtual_system)
# Close the OVF file # Close the OVF file
print("Close the OVF file")
ovf_file.close() ovf_file.close()
print(virtual_hardware)
print(disk_map)
# Verify that the cluster has enough space to store all OVA disk volumes
total_size_bytes = 0
for disk in disk_map:
# Normalize the dev size to MB
print("Normalize the dev size to MB")
# The function always return XXXXB, so strip off the B and convert to an integer
dev_size_bytes = int(pvc_ceph.format_bytes_fromhuman(disk.get('capacity', 0))[:-1])
ova_size_bytes = int(pvc_ceph.format_bytes_fromhuman(ova_size)[:-1])
# Get the actual image size
total_size_bytes += dev_size_bytes
# Add on the OVA size to account for the VMDK
total_size_bytes += ova_size_bytes
zk_conn = pvc_common.startZKConnection(config['coordinators'])
pool_information = pvc_ceph.getPoolInformation(zk_conn, pool)
pvc_common.stopZKConnection(zk_conn)
pool_free_space_bytes = int(pool_information['stats']['free_bytes'])
if total_size_bytes >= pool_free_space_bytes:
output = {
'message': "ERROR: The cluster does not have enough free space ({}) to store the VM ({}).".format(
pvc_ceph.format_bytes_tohuman(pool_free_space_bytes),
pvc_ceph.format_bytes_tohuman(total_size_bytes)
)
}
retcode = 400
cleanup_ova_maps_and_volumes()
return output, retcode
# Create and upload each disk volume # Create and upload each disk volume
for idx, disk in enumerate(disk_map): for idx, disk in enumerate(disk_map):
disk_identifier = "sd{}".format(chr(ord('a') + idx)) disk_identifier = "sd{}".format(chr(ord('a') + idx))
volume = "{}_{}".format(name, disk_identifier) volume = "ova_{}_{}".format(name, disk_identifier)
dev_size = disk.get('capacity') dev_src = disk.get('src')
dev_type = dev_src.split('.')[-1]
dev_size_raw = ova_archive.getmember(dev_src).size
vm_volume_size = disk.get('capacity')
# Normalize the dev size to MB # Normalize the dev size to KB
print("Normalize the dev size to MB")
# The function always return XXXXB, so strip off the B and convert to an integer # The function always return XXXXB, so strip off the B and convert to an integer
dev_size_bytes = int(pvc_ceph.format_bytes_fromhuman(dev_size)[:-1]) dev_size_bytes = int(pvc_ceph.format_bytes_fromhuman(dev_size_raw)[:-1])
dev_size_mb = math.ceil(dev_size_bytes / 1024 / 1024) dev_size_kb = math.ceil(dev_size_bytes / 1024)
dev_size = "{}M".format(dev_size_mb) dev_size = "{}K".format(dev_size_kb)
def cleanup_img_maps_and_volumes(): def cleanup_img_maps():
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
# Unmap the target blockdev
retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, volume)
# Unmap the temporary blockdev # Unmap the temporary blockdev
retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, "{}_tmp".format(volume)) retflag, retdata = pvc_ceph.unmap_volume(zk_conn, pool, volume)
# Remove the temporary blockdev
retflag, retdata = pvc_ceph.remove_volume(zk_conn, pool, "{}_tmp".format(volume))
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
# Create target blockdev # Create the blockdev
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
pool_information = pvc_ceph.add_volume(zk_conn, pool, volume, dev_size) retflag, retdata = pvc_ceph.add_volume(zk_conn, pool, volume, dev_size)
pvc_common.stopZKConnection(zk_conn)
# Create a temporary blockdev
zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_ceph.add_volume(zk_conn, pool, "{}_tmp".format(volume), ova_size)
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
if not retflag: if not retflag:
output = { output = {
'message': retdata.replace('\"', '\'') 'message': retdata.replace('\"', '\'')
} }
retcode = 400 retcode = 400
cleanup_img_maps_and_volumes() cleanup_img_maps()
cleanup_ova_maps_and_volumes() cleanup_ova_maps_and_volumes()
return output, retcode return output, retcode
# Map the temporary target blockdev # Map the blockdev
zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, "{}_tmp".format(volume))
pvc_common.stopZKConnection(zk_conn)
if not retflag:
output = {
'message': retdata.replace('\"', '\'')
}
retcode = 400
cleanup_img_maps_and_volumes()
cleanup_ova_maps_and_volumes()
return output, retcode
temp_blockdev = retdata
# Map the target blockdev
zk_conn = pvc_common.startZKConnection(config['coordinators']) zk_conn = pvc_common.startZKConnection(config['coordinators'])
retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, volume) retflag, retdata = pvc_ceph.map_volume(zk_conn, pool, volume)
pvc_common.stopZKConnection(zk_conn) pvc_common.stopZKConnection(zk_conn)
@ -245,13 +306,10 @@ def upload_ova(ova_data, ova_size, pool, name, define_vm, start_vm):
'message': retdata.replace('\"', '\'') 'message': retdata.replace('\"', '\'')
} }
retcode = 400 retcode = 400
cleanup_img_maps_and_volumes() cleanup_img_maps()
cleanup_ova_maps_and_volumes() cleanup_ova_maps_and_volumes()
return output, retcode return output, retcode
dest_blockdev = retdata temp_blockdev = retdata
# Save the data to the temporary blockdev directly
img_type = disk.get('src').split('.')[-1]
try: try:
# Open (extract) the TAR archive file and seek to byte 0 # Open (extract) the TAR archive file and seek to byte 0
@ -268,41 +326,86 @@ def upload_ova(ova_data, ova_size, pool, name, define_vm, start_vm):
vmdk_file.close() vmdk_file.close()
# Perform an OS-level sync # Perform an OS-level sync
pvc_common.run_os_command('sync') pvc_common.run_os_command('sync')
# Shrink the tmp RBD image to the exact size of the written file
# This works around a bug in this method where an EOF is never written to the end of the
# target blockdev, thus causing an "Invalid footer" error. Instead, if we just shrink the
# RBD volume to the exact size, this is treated as an EOF
pvc_common.run_os_command('rbd resize {}/{}_{}_tmp --size {}B --allow-shrink'.format(pool, name, disk_identifier, bytes_written))
except: except:
output = { output = {
'message': "ERROR: Failed to write image file '{}' to temporary volume.".format(disk.get('src')) 'message': "ERROR: Failed to write image file '{}' to temporary volume.".format(disk.get('src'))
} }
retcode = 400 retcode = 400
cleanup_img_maps_and_volumes() cleanup_img_maps()
cleanup_ova_maps_and_volumes() cleanup_ova_maps_and_volumes()
return output, retcode return output, retcode
# Convert from the temporary to destination format on the blockdevs cleanup_img_maps()
retcode, stdout, stderr = pvc_common.run_os_command(
'qemu-img convert -C -f {} -O raw {} {}'.format(img_type, temp_blockdev, dest_blockdev) cleanup_ova_maps_and_volumes()
)
if retcode: # Prepare the database entries
query = "INSERT INTO ova (name, ovf) VALUES (%s, %s);"
args = (name, ovf_xml_raw)
conn, cur = open_database(config)
try:
cur.execute(query, args)
close_database(conn, cur)
except Exception as e:
output = { output = {
'message': "ERROR: Failed to convert image '{}' format from '{}' to 'raw': {}".format(disk.get('src'), img_type, stderr) 'message': 'Failed to create OVA entry "{}": {}'.format(name, e)
} }
retcode = 400 retcode = 400
cleanup_img_maps_and_volumes() close_database(conn, cur)
cleanup_ova_maps_and_volumes()
return output, retcode return output, retcode
cleanup_img_maps_and_volumes() # Get the OVA database id
query = "SELECT id FROM ova WHERE name = %s;"
args = (name, )
conn, cur = open_database(config)
cur.execute(query, args)
ova_id = cur.fetchone()['id']
close_database(conn, cur)
cleanup_ova_maps_and_volumes() # Prepare disk entries in ova_volume
for idx, disk in enumerate(disk_map):
disk_identifier = "sd{}".format(chr(ord('a') + idx))
volume_type = disk.get('src').split('.')[-1]
volume = "ova_{}_{}".format(name, disk_identifier)
vm_volume_size = disk.get('capacity')
# Prepare a VM configuration # The function always return XXXXB, so strip off the B and convert to an integer
vm_volume_size_bytes = int(pvc_ceph.format_bytes_fromhuman(vm_volume_size)[:-1])
vm_volume_size_gb = math.ceil(vm_volume_size_bytes / 1024 / 1024 / 1024)
query = "INSERT INTO ova_volume (ova, pool, volume_name, volume_format, disk_id, disk_size_gb) VALUES (%s, %s, %s, %s, %s, %s);"
args = (ova_id, pool, volume, volume_type, disk_identifier, vm_volume_size_gb)
conn, cur = open_database(config)
try:
cur.execute(query, args)
close_database(conn, cur)
except Exception as e:
output = {
'message': 'Failed to create OVA volume entry "{}": {}'.format(volume, e)
}
retcode = 400
close_database(conn, cur)
return output, retcode
# Prepare a system_template for the OVA
vcpu_count = virtual_hardware.get('vcpus')
vram_mb = virtual_hardware.get('vram')
if virtual_hardware.get('graphics-controller') == 1:
vnc = True
serial = False
else:
vnc = False
serial = True
retdata, retcode = provisioner.create_template_system(name, vcpu_count, vram_mb, serial, vnc, vnc_bind=None, ova=ova_id)
system_template, retcode = provisioner.list_template_system(name, is_fuzzy=False)
system_template_name = system_template[0].get('name')
# Prepare a barebones profile for the OVA
retdata, retcode = provisioner.create_profile(name, 'ova', system_template_name, None, None, userdata=None, script=None, ova=name, arguments=None)
output = { output = {
'message': "Imported OVA file to new VM '{}'".format(name) 'message': "Imported OVA image '{}'".format(name)
} }
retcode = 200 retcode = 200
return output, retcode return output, retcode
@ -310,13 +413,9 @@ def upload_ova(ova_data, ova_size, pool, name, define_vm, start_vm):
# #
# OVF parser # OVF parser
# #
OVF_SCHEMA = "http://schemas.dmtf.org/ovf/envelope/2" class OVFParser(object):
RASD_SCHEMA = "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" RASD_TYPE = {
SASD_SCHEMA = "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_StorageAllocationSettingData.xsd" "1": "vmci",
VSSD_SCHEMA = "http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
XML_SCHEMA = "http://www.w3.org/2001/XMLSchema-instance"
RASD_TYPE = {
"3": "vcpus", "3": "vcpus",
"4": "vram", "4": "vram",
"5": "ide-controller", "5": "ide-controller",
@ -328,28 +427,24 @@ RASD_TYPE = {
"23": "usb-controller", "23": "usb-controller",
"24": "graphics-controller", "24": "graphics-controller",
"35": "sound-controller" "35": "sound-controller"
} }
SASD_TYPE = {
"15": "cdrom",
"17": "disk"
}
class OVFParser(object):
def _getFilelist(self): def _getFilelist(self):
path = "{{{schema}}}References/{{{schema}}}File".format(schema=OVF_SCHEMA) path = "{{{schema}}}References/{{{schema}}}File".format(schema=self.OVF_SCHEMA)
id_attr = "{{{schema}}}id".format(schema=OVF_SCHEMA) id_attr = "{{{schema}}}id".format(schema=self.OVF_SCHEMA)
href_attr = "{{{schema}}}href".format(schema=OVF_SCHEMA) href_attr = "{{{schema}}}href".format(schema=self.OVF_SCHEMA)
current_list = self.xml.findall(path) current_list = self.xml.findall(path)
results = [(x.get(id_attr), x.get(href_attr)) for x in current_list] results = [(x.get(id_attr), x.get(href_attr)) for x in current_list]
return results return results
def _getDisklist(self): def _getDisklist(self):
path = "{{{schema}}}DiskSection/{{{schema}}}Disk".format(schema=OVF_SCHEMA) path = "{{{schema}}}DiskSection/{{{schema}}}Disk".format(schema=self.OVF_SCHEMA)
id_attr = "{{{schema}}}diskId".format(schema=OVF_SCHEMA) id_attr = "{{{schema}}}diskId".format(schema=self.OVF_SCHEMA)
ref_attr = "{{{schema}}}fileRef".format(schema=OVF_SCHEMA) ref_attr = "{{{schema}}}fileRef".format(schema=self.OVF_SCHEMA)
cap_attr = "{{{schema}}}capacity".format(schema=OVF_SCHEMA) cap_attr = "{{{schema}}}capacity".format(schema=self.OVF_SCHEMA)
cap_units = "{{{schema}}}capacityAllocationUnits".format(schema=self.OVF_SCHEMA)
current_list = self.xml.findall(path) current_list = self.xml.findall(path)
results = [(x.get(id_attr), x.get(ref_attr), x.get(cap_attr)) for x in current_list] results = [(x.get(id_attr), x.get(ref_attr), x.get(cap_attr), x.get(cap_units)) for x in current_list]
return results return results
def _getAttributes(self, virtual_system, path, attribute): def _getAttributes(self, virtual_system, path, attribute):
@ -359,60 +454,88 @@ class OVFParser(object):
def __init__(self, ovf_file): def __init__(self, ovf_file):
self.xml = lxml.etree.parse(ovf_file) self.xml = lxml.etree.parse(ovf_file)
# Define our schemas
envelope_tag = self.xml.find(".")
self.XML_SCHEMA = envelope_tag.nsmap.get('xsi')
self.OVF_SCHEMA = envelope_tag.nsmap.get('ovf')
self.RASD_SCHEMA = envelope_tag.nsmap.get('rasd')
self.SASD_SCHEMA = envelope_tag.nsmap.get('sasd')
self.VSSD_SCHEMA = envelope_tag.nsmap.get('vssd')
self.ovf_version = int(self.OVF_SCHEMA.split('/')[-1])
# Get the file and disk lists
self.filelist = self._getFilelist() self.filelist = self._getFilelist()
self.disklist = self._getDisklist() self.disklist = self._getDisklist()
def getVirtualSystems(self): def getVirtualSystems(self):
return self.xml.findall("{{{schema}}}VirtualSystem".format(schema=OVF_SCHEMA)) return self.xml.findall("{{{schema}}}VirtualSystem".format(schema=self.OVF_SCHEMA))
def getXML(self):
return lxml.etree.tostring(self.xml, pretty_print=True).decode('utf8')
def getVirtualHardware(self, virtual_system): def getVirtualHardware(self, virtual_system):
hardware_list = virtual_system.findall( hardware_list = virtual_system.findall(
"{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(schema=OVF_SCHEMA) "{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(schema=self.OVF_SCHEMA)
) )
virtual_hardware = {} virtual_hardware = {}
for item in hardware_list: for item in hardware_list:
try: try:
item_type = RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=RASD_SCHEMA)).text] item_type = self.RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)).text]
except: except:
continue continue
quantity = item.find("{{{rasd}}}VirtualQuantity".format(rasd=RASD_SCHEMA)) quantity = item.find("{{{rasd}}}VirtualQuantity".format(rasd=self.RASD_SCHEMA))
if quantity is None: if quantity is None:
continue virtual_hardware[item_type] = 1
print(item_type) else:
virtual_hardware[item_type] = quantity.text virtual_hardware[item_type] = quantity.text
return virtual_hardware return virtual_hardware
def getDiskMap(self, virtual_system): def getDiskMap(self, virtual_system):
# OVF v2 uses the StorageItem field, while v1 uses the normal Item field
if self.ovf_version < 2:
hardware_list = virtual_system.findall( hardware_list = virtual_system.findall(
"{{{schema}}}VirtualHardwareSection/{{{schema}}}StorageItem".format(schema=OVF_SCHEMA) "{{{schema}}}VirtualHardwareSection/{{{schema}}}Item".format(schema=self.OVF_SCHEMA)
)
else:
hardware_list = virtual_system.findall(
"{{{schema}}}VirtualHardwareSection/{{{schema}}}StorageItem".format(schema=self.OVF_SCHEMA)
) )
disk_list = [] disk_list = []
for item in hardware_list: for item in hardware_list:
item_type = None item_type = None
try:
item_type = SASD_TYPE[item.find("{{{sasd}}}ResourceType".format(sasd=SASD_SCHEMA)).text] if self.SASD_SCHEMA is not None:
except: item_type = self.RASD_TYPE[item.find("{{{sasd}}}ResourceType".format(sasd=self.SASD_SCHEMA)).text]
item_type = RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=RASD_SCHEMA)).text] else:
item_type = self.RASD_TYPE[item.find("{{{rasd}}}ResourceType".format(rasd=self.RASD_SCHEMA)).text]
if item_type != 'disk': if item_type != 'disk':
continue continue
hostref = None hostref = None
try: if self.SASD_SCHEMA is not None:
hostref = item.find("{{{sasd}}}HostResource".format(sasd=SASD_SCHEMA)) hostref = item.find("{{{sasd}}}HostResource".format(sasd=self.SASD_SCHEMA))
except: else:
hostref = item.find("{{{rasd}}}HostResource".format(rasd=RASD_SCHEMA)) hostref = item.find("{{{rasd}}}HostResource".format(rasd=self.RASD_SCHEMA))
if hostref is None: if hostref is None:
continue continue
disk_res = hostref.text disk_res = hostref.text
# Determine which file this disk_res ultimately represents # Determine which file this disk_res ultimately represents
(disk_id, disk_ref, disk_capacity) = [x for x in self.disklist if x[0] == disk_res.split('/')[-1]][0] (disk_id, disk_ref, disk_capacity, disk_capacity_unit) = [x for x in self.disklist if x[0] == disk_res.split('/')[-1]][0]
(file_id, disk_src) = [x for x in self.filelist if x[0] == disk_ref][0] (file_id, disk_src) = [x for x in self.filelist if x[0] == disk_ref][0]
if disk_capacity_unit is not None:
# Handle the unit conversion
base_unit, action, multiple = disk_capacity_unit.split()
multiple_base, multiple_exponent = multiple.split('^')
disk_capacity = int(disk_capacity) * ( int(multiple_base) ** int(multiple_exponent) )
# Append the disk with all details to the list # Append the disk with all details to the list
disk_list.append({ disk_list.append({
"id": disk_id, "id": disk_id,

View File

@ -38,6 +38,8 @@ import daemon_lib.ceph as pvc_ceph
import pvcapid.libvirt_schema as libvirt_schema import pvcapid.libvirt_schema as libvirt_schema
from pvcapid.ova import list_ova
# #
# Exceptions (used by Celery tasks) # Exceptions (used by Celery tasks)
# #
@ -197,14 +199,14 @@ def template_list(limit):
# #
# Template Create functions # Template Create functions
# #
def create_template_system(name, vcpu_count, vram_mb, serial=False, vnc=False, vnc_bind=None, node_limit=None, node_selector=None, node_autostart=False): def create_template_system(name, vcpu_count, vram_mb, serial=False, vnc=False, vnc_bind=None, node_limit=None, node_selector=None, node_autostart=False, ova=None):
if list_template_system(name, is_fuzzy=False)[-1] != 404: if list_template_system(name, is_fuzzy=False)[-1] != 404:
retmsg = { 'message': 'The system template "{}" already exists'.format(name) } retmsg = { 'message': 'The system template "{}" already exists'.format(name) }
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
query = "INSERT INTO system_template (name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, node_autostart) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);" query = "INSERT INTO system_template (name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, node_autostart, ova) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"
args = (name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, node_autostart) args = (name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, node_autostart, ova)
conn, cur = open_database(config) conn, cur = open_database(config)
try: try:
@ -661,10 +663,6 @@ def delete_script(name):
close_database(conn, cur) close_database(conn, cur)
return retmsg, retcode return retmsg, retcode
#
# OVA functions
#
# #
# Profile functions # Profile functions
# #
@ -703,7 +701,8 @@ def list_profile(limit, is_fuzzy=True):
cur.execute(query, args) cur.execute(query, args)
try: try:
name = cur.fetchone()['name'] name = cur.fetchone()['name']
except: except Exception as e:
print(e)
name = "N/A" name = "N/A"
profile_data[etype] = name profile_data[etype] = name
# Split the arguments back into a list # Split the arguments back into a list
@ -722,8 +721,8 @@ def create_profile(name, profile_type, system_template, network_template, storag
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
if profile_type not in ['script', 'clone', 'ova']: if profile_type not in ['provisioner', 'ova']:
retmsg = { 'message': 'A valid profile type (script, clone, ova) must be specified' } retmsg = { 'message': 'A valid profile type (provisioner, ova) must be specified' }
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -742,7 +741,7 @@ def create_profile(name, profile_type, system_template, network_template, storag
for template in network_templates: for template in network_templates:
if template['name'] == network_template: if template['name'] == network_template:
network_template_id = template['id'] network_template_id = template['id']
if not network_template_id: if not network_template_id and profile_type != 'ova':
retmsg = { 'message': 'The network template "{}" for profile "{}" does not exist'.format(network_template, name) } retmsg = { 'message': 'The network template "{}" for profile "{}" does not exist'.format(network_template, name) }
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -752,7 +751,7 @@ def create_profile(name, profile_type, system_template, network_template, storag
for template in storage_templates: for template in storage_templates:
if template['name'] == storage_template: if template['name'] == storage_template:
storage_template_id = template['id'] storage_template_id = template['id']
if not storage_template_id: if not storage_template_id and profile_type != 'ova':
retmsg = { 'message': 'The storage template "{}" for profile "{}" does not exist'.format(storage_template, name) } retmsg = { 'message': 'The storage template "{}" for profile "{}" does not exist'.format(storage_template, name) }
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
@ -782,7 +781,7 @@ def create_profile(name, profile_type, system_template, network_template, storag
conn, cur = open_database(config) conn, cur = open_database(config)
try: try:
query = "INSERT INTO profile (name, type, system_template, network_template, storage_template, userdata, script, ova, arguments) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);" query = "INSERT INTO profile (name, profile_type, system_template, network_template, storage_template, userdata, script, ova, arguments) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);"
args = (name, profile_type, system_template_id, network_template_id, storage_template_id, userdata_id, script_id, ova_id, arguments_formatted) args = (name, profile_type, system_template_id, network_template_id, storage_template_id, userdata_id, script_id, ova_id, arguments_formatted)
cur.execute(query, args) cur.execute(query, args)
retmsg = { "message": 'Created VM profile "{}"'.format(name) } retmsg = { "message": 'Created VM profile "{}"'.format(name) }
@ -802,8 +801,8 @@ def modify_profile(name, profile_type, system_template, network_template, storag
fields = [] fields = []
if profile_type is not None: if profile_type is not None:
if profile_type not in ['script', 'clone', 'ova']: if profile_type not in ['provisioner', 'ova']:
retmsg = { 'message': 'A valid profile type (script, clone, ova) must be specified' } retmsg = { 'message': 'A valid profile type (provisioner, ova) must be specified' }
retcode = 400 retcode = 400
return retmsg, retcode return retmsg, retcode
fields.append({'field': 'type', 'data': profile_type}) fields.append({'field': 'type', 'data': profile_type})

View File

@ -340,6 +340,77 @@ def script_remove(config, name):
return retvalue, response.json()['message'] return retvalue, response.json()['message']
def ova_info(config, name):
"""
Get information about OVA image {name}
API endpoint: GET /api/v1/provisioner/ova/{name}
API arguments:
API schema: {json_data_object}
"""
response = call_api(config, 'get', '/provisioner/ova/{name}'.format(name=name))
if response.status_code == 200:
return True, response.json()[0]
else:
return False, response.json()['message']
def ova_list(config, limit):
"""
Get list information about OVA images (limited by {limit})
API endpoint: GET /api/v1/provisioner/ova
API arguments: limit={limit}
API schema: [{json_data_object},{json_data_object},etc.]
"""
params = dict()
if limit:
params['limit'] = limit
response = call_api(config, 'get', '/provisioner/ova', params=params)
if response.status_code == 200:
return True, response.json()
else:
return False, response.json()['message']
def ova_upload(config, name, ova_file, params):
"""
Upload an OVA image to the cluster
API endpoint: POST /api/v1/provisioner/ova/{name}
API arguments: pool={pool}, ova_size={ova_size}
API schema: {"message":"{data}"}
"""
files = {
'file': open(ova_file,'rb')
}
response = call_api(config, 'post', '/provisioner/ova/{}'.format(name), params=params, files=files)
if response.status_code == 200:
retstatus = True
else:
retstatus = False
return retstatus, response.json()['message']
def ova_remove(config, name):
"""
Remove OVA image {name}
API endpoint: DELETE /api/v1/provisioner/ova/{name}
API_arguments:
API schema: {message}
"""
response = call_api(config, 'delete', '/provisioner/ova/{name}'.format(name=name))
if response.status_code == 200:
retvalue = True
else:
retvalue = False
return retvalue, response.json()['message']
def profile_info(config, profile): def profile_info(config, profile):
""" """
Get information about profile Get information about profile
@ -1069,6 +1140,119 @@ def format_list_script(script_data, lines=None):
return '\n'.join([script_list_output_header] + script_list_output) return '\n'.join([script_list_output_header] + script_list_output)
def format_list_ova(ova_data):
if isinstance(ova_data, dict):
ova_data = [ ova_data ]
ova_list_output = []
# Determine optimal column widths
ova_name_length = 5
ova_id_length = 3
ova_disk_id_length = 8
ova_disk_size_length = 10
ova_disk_pool_length = 5
ova_disk_volume_format_length = 7
ova_disk_volume_name_length = 13
for ova in ova_data:
# ova_name column
_ova_name_length = len(str(ova['name'])) + 1
if _ova_name_length > ova_name_length:
ova_name_length = _ova_name_length
# ova_id column
_ova_id_length = len(str(ova['id'])) + 1
if _ova_id_length > ova_id_length:
ova_id_length = _ova_id_length
for disk in ova['volumes']:
# ova_disk_id column
_ova_disk_id_length = len(str(disk['disk_id'])) + 1
if _ova_disk_id_length > ova_disk_id_length:
ova_disk_id_length = _ova_disk_id_length
# ova_disk_size column
_ova_disk_size_length = len(str(disk['disk_size_gb'])) + 1
if _ova_disk_size_length > ova_disk_size_length:
ova_disk_size_length = _ova_disk_size_length
# ova_disk_pool column
_ova_disk_pool_length = len(str(disk['pool'])) + 1
if _ova_disk_pool_length > ova_disk_pool_length:
ova_disk_pool_length = _ova_disk_pool_length
# ova_disk_volume_format column
_ova_disk_volume_format_length = len(str(disk['volume_format'])) + 1
if _ova_disk_volume_format_length > ova_disk_volume_format_length:
ova_disk_volume_format_length = _ova_disk_volume_format_length
# ova_disk_volume_name column
_ova_disk_volume_name_length = len(str(disk['volume_name'])) + 1
if _ova_disk_volume_name_length > ova_disk_volume_name_length:
ova_disk_volume_name_length = _ova_disk_volume_name_length
# Format the string (header)
ova_list_output_header = '{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}} \
{ova_disk_id: <{ova_disk_id_length}} \
{ova_disk_size: <{ova_disk_size_length}} \
{ova_disk_pool: <{ova_disk_pool_length}} \
{ova_disk_volume_format: <{ova_disk_volume_format_length}} \
{ova_disk_volume_name: <{ova_disk_volume_name_length}}{end_bold}'.format(
ova_name_length=ova_name_length,
ova_id_length=ova_id_length,
ova_disk_id_length=ova_disk_id_length,
ova_disk_pool_length=ova_disk_pool_length,
ova_disk_size_length=ova_disk_size_length,
ova_disk_volume_format_length=ova_disk_volume_format_length,
ova_disk_volume_name_length=ova_disk_volume_name_length,
bold=ansiprint.bold(),
end_bold=ansiprint.end(),
ova_name='Name',
ova_id='ID',
ova_disk_id='Disk ID',
ova_disk_size='Size [GB]',
ova_disk_pool='Pool',
ova_disk_volume_format='Format',
ova_disk_volume_name='Source Volume',
)
# Format the string (elements)
for ova in sorted(ova_data, key=lambda i: i.get('name', None)):
ova_list_output.append(
'{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}}{end_bold}'.format(
ova_name_length=ova_name_length,
ova_id_length=ova_id_length,
bold='',
end_bold='',
ova_name=str(ova['name']),
ova_id=str(ova['id'])
)
)
for disk in sorted(ova['volumes'], key=lambda i: i.get('disk_id', None)):
ova_list_output.append(
'{bold}{ova_name: <{ova_name_length}} {ova_id: <{ova_id_length}} \
{ova_disk_id: <{ova_disk_id_length}} \
{ova_disk_size: <{ova_disk_size_length}} \
{ova_disk_pool: <{ova_disk_pool_length}} \
{ova_disk_volume_format: <{ova_disk_volume_format_length}} \
{ova_disk_volume_name: <{ova_disk_volume_name_length}}{end_bold}'.format(
ova_name_length=ova_name_length,
ova_id_length=ova_id_length,
ova_disk_id_length=ova_disk_id_length,
ova_disk_size_length=ova_disk_size_length,
ova_disk_pool_length=ova_disk_pool_length,
ova_disk_volume_format_length=ova_disk_volume_format_length,
ova_disk_volume_name_length=ova_disk_volume_name_length,
bold='',
end_bold='',
ova_name='',
ova_id='',
ova_disk_id=str(disk['disk_id']),
ova_disk_size=str(disk['disk_size_gb']),
ova_disk_pool=str(disk['pool']),
ova_disk_volume_format=str(disk['volume_format']),
ova_disk_volume_name=str(disk['volume_name']),
)
)
return '\n'.join([ova_list_output_header] + ova_list_output)
def format_list_profile(profile_data): def format_list_profile(profile_data):
if isinstance(profile_data, dict): if isinstance(profile_data, dict):
profile_data = [ profile_data ] profile_data = [ profile_data ]
@ -1078,7 +1262,7 @@ def format_list_profile(profile_data):
profile_type = profile['type'] profile_type = profile['type']
if 'ova' in profile_type: if 'ova' in profile_type:
# Set the source to the name of the OVA: # Set the source to the name of the OVA:
profile['source'] = profile_data['ova'] profile['source'] = 'OVA {}'.format(profile['ova'])
else: else:
# Set the source to be the type # Set the source to be the type
profile['source'] = profile_type profile['source'] = profile_type

View File

@ -2833,6 +2833,99 @@ def provisioner_script_remove(name, confirm_flag):
cleanup(retcode, retdata) cleanup(retcode, retdata)
###############################################################################
# pvc provisioner ova
###############################################################################
@click.group(name='ova', short_help='Manage PVC provisioner OVA images.', context_settings=CONTEXT_SETTINGS)
def provisioner_ova():
"""
Manage ovas in the PVC provisioner.
"""
# Abort commands under this group if config is bad
if config.get('badcfg', None):
click.echo('No cluster specified and no local pvcapid.yaml configuration found. Use "pvc cluster" to add a cluster API to connect to.')
exit(1)
###############################################################################
# pvc provisioner ova list
###############################################################################
@click.command(name='list', short_help='List all OVA images.')
@click.argument(
'limit', default=None, required=False
)
def provisioner_ova_list(limit):
"""
List all OVA images in the PVC cluster provisioner.
"""
retcode, retdata = pvc_provisioner.ova_list(config, limit)
if retcode:
retdata = pvc_provisioner.format_list_ova(retdata)
cleanup(retcode, retdata)
###############################################################################
# pvc provisioner ova upload
###############################################################################
@click.command(name='upload', short_help='Upload OVA file.')
@click.argument(
'name'
)
@click.argument(
'filename'
)
@click.option(
'-p', '--pool', 'pool',
required=True,
help='The storage pool for the OVA images.'
)
def provisioner_ova_upload(name, filename, pool):
"""
Upload a new OVA image NAME from FILENAME.
Only single-file (.ova) OVA/OVF images are supported. For multi-file (.ovf + .vmdk) OVF images, concatenate them with "tar" then upload the resulting file.
Once uploaded, a provisioner system template and OVA-type profile, each named NAME, will be created to store the configuration of the OVA.
Note that the provisioner profile for the OVA will not contain any network template definitions, and will ignore network definitions from the OVA itself. The administrator must modify the profile's network template as appropriate to set the desired network configuration.
Storage templates, provisioning scripts, and arguments for OVA-type profiles will be ignored and should not be set.
"""
if not os.path.exists(filename):
click.echo("ERROR: File '{}' does not exist!".format(filename))
exit(1)
params = dict()
params['pool'] = pool
params['ova_size'] = os.path.getsize(filename)
retcode, retdata = pvc_provisioner.ova_upload(config, name, filename, params)
cleanup(retcode, retdata)
###############################################################################
# pvc provisioner ova remove
###############################################################################
@click.command(name='remove', short_help='Remove OVA image.')
@click.argument(
'name'
)
@click.option(
'-y', '--yes', 'confirm_flag',
is_flag=True, default=False,
help='Confirm the removal'
)
def provisioner_ova_remove(name, confirm_flag):
"""
Remove OVA image NAME from the PVC cluster provisioner.
"""
if not confirm_flag:
try:
click.confirm('Remove ova {}'.format(name), prompt_suffix='? ', abort=True)
except:
exit(0)
retcode, retdata = pvc_provisioner.ova_remove(config, name)
cleanup(retcode, retdata)
############################################################################### ###############################################################################
# pvc provisioner profile # pvc provisioner profile
############################################################################### ###############################################################################
@ -2869,47 +2962,54 @@ def provisioner_profile_list(limit):
@click.argument( @click.argument(
'name' 'name'
) )
@click.option(
'-p', '--profile-type', 'profile_type',
default='provisioner', show_default=True,
type=click.Choice(['provisioner', 'ova'], case_sensitive=False),
help='The type of profile.'
)
@click.option( @click.option(
'-s', '--system-template', 'system_template', '-s', '--system-template', 'system_template',
required=True,
help='The system template for the profile.' help='The system template for the profile.'
) )
@click.option( @click.option(
'-n', '--network-template', 'network_template', '-n', '--network-template', 'network_template',
required=True,
help='The network template for the profile.' help='The network template for the profile.'
) )
@click.option( @click.option(
'-t', '--storage-template', 'storage_template', '-t', '--storage-template', 'storage_template',
required=True,
help='The storage template for the profile.' help='The storage template for the profile.'
) )
@click.option( @click.option(
'-u', '--userdata', 'userdata', '-u', '--userdata', 'userdata',
required=True,
help='The userdata document for the profile.' help='The userdata document for the profile.'
) )
@click.option( @click.option(
'-x', '--script', 'script', '-x', '--script', 'script',
required=True,
help='The script for the profile.' help='The script for the profile.'
) )
@click.option(
'-o', '--ova', 'ova',
help='The OVA image for the profile.'
)
@click.option( @click.option(
'-a', '--script-arg', 'script_args', '-a', '--script-arg', 'script_args',
default=[], multiple=True, default=[], multiple=True,
help='Additional argument to the script install() function in key=value format.' help='Additional argument to the script install() function in key=value format.'
) )
def provisioner_profile_add(name, system_template, network_template, storage_template, userdata, script, script_args): def provisioner_profile_add(name, profile_type, system_template, network_template, storage_template, userdata, script, ova, script_args):
""" """
Add a new provisioner profile NAME. Add a new provisioner profile NAME.
""" """
params = dict() params = dict()
params['name'] = name params['name'] = name
params['profile_type'] = profile_type
params['system_template'] = system_template params['system_template'] = system_template
params['network_template'] = network_template params['network_template'] = network_template
params['storage_template'] = storage_template params['storage_template'] = storage_template
params['userdata'] = userdata params['userdata'] = userdata
params['script'] = script params['script'] = script
params['ova'] = ova
params['arg'] = script_args params['arg'] = script_args
retcode, retdata = pvc_provisioner.profile_add(config, params) retcode, retdata = pvc_provisioner.profile_add(config, params)
@ -3362,6 +3462,10 @@ provisioner_script.add_command(provisioner_script_add)
provisioner_script.add_command(provisioner_script_modify) provisioner_script.add_command(provisioner_script_modify)
provisioner_script.add_command(provisioner_script_remove) provisioner_script.add_command(provisioner_script_remove)
provisioner_ova.add_command(provisioner_ova_list)
provisioner_ova.add_command(provisioner_ova_upload)
provisioner_ova.add_command(provisioner_ova_remove)
provisioner_profile.add_command(provisioner_profile_list) provisioner_profile.add_command(provisioner_profile_list)
provisioner_profile.add_command(provisioner_profile_add) provisioner_profile.add_command(provisioner_profile_add)
provisioner_profile.add_command(provisioner_profile_modify) provisioner_profile.add_command(provisioner_profile_modify)
@ -3370,6 +3474,7 @@ provisioner_profile.add_command(provisioner_profile_remove)
cli_provisioner.add_command(provisioner_template) cli_provisioner.add_command(provisioner_template)
cli_provisioner.add_command(provisioner_userdata) cli_provisioner.add_command(provisioner_userdata)
cli_provisioner.add_command(provisioner_script) cli_provisioner.add_command(provisioner_script)
cli_provisioner.add_command(provisioner_ova)
cli_provisioner.add_command(provisioner_profile) cli_provisioner.add_command(provisioner_profile)
cli_provisioner.add_command(provisioner_create) cli_provisioner.add_command(provisioner_create)
cli_provisioner.add_command(provisioner_status) cli_provisioner.add_command(provisioner_status)

View File

@ -554,6 +554,48 @@
}, },
"type": "object" "type": "object"
}, },
"ova": {
"properties": {
"id": {
"description": "Internal provisioner OVA ID",
"type": "integer"
},
"name": {
"description": "OVA name",
"type": "string"
},
"volumes": {
"items": {
"id": "ova_volume",
"properties": {
"disk_id": {
"description": "Disk identifier",
"type": "string"
},
"disk_size_gb": {
"description": "Disk size in GB",
"type": "string"
},
"pool": {
"description": "Pool containing the OVA volume",
"type": "string"
},
"volume_format": {
"description": "OVA image format",
"type": "string"
},
"volume_name": {
"description": "Storage volume containing the OVA image",
"type": "string"
}
},
"type": "object"
},
"type": "list"
}
},
"type": "object"
},
"pool": { "pool": {
"properties": { "properties": {
"name": { "name": {
@ -2190,6 +2232,160 @@
] ]
} }
}, },
"/api/v1/provisioner/ova": {
"get": {
"description": "",
"parameters": [
{
"description": "An OVA name search limit; fuzzy by default, use ^/$ to force exact matches",
"in": "query",
"name": "limit",
"required": false,
"type": "string"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"items": {
"$ref": "#/definitions/ova"
},
"type": "list"
}
}
},
"summary": "Return a list of OVA sources",
"tags": [
"provisioner"
]
},
"post": {
"description": "<br/>The API client is responsible for determining and setting the ova_size value, as this value cannot be determined dynamically before the upload proceeds.",
"parameters": [
{
"description": "Storage pool name",
"in": "query",
"name": "pool",
"required": true,
"type": "string"
},
{
"description": "OVA name on the cluster (usually identical to the OVA file name)",
"in": "query",
"name": "name",
"required": true,
"type": "string"
},
{
"description": "Size of the OVA file in bytes",
"in": "query",
"name": "ova_size",
"required": true,
"type": "string"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/Message"
}
},
"400": {
"description": "Bad request",
"schema": {
"$ref": "#/definitions/Message"
}
}
},
"summary": "Upload an OVA image to the cluster",
"tags": [
"provisioner"
]
}
},
"/api/v1/provisioner/ova/{ova}": {
"delete": {
"description": "",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/Message"
}
},
"404": {
"description": "Not found",
"schema": {
"$ref": "#/definitions/Message"
}
}
},
"summary": "Remove ova {ova}",
"tags": [
"provisioner"
]
},
"get": {
"description": "",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/ova"
}
},
"404": {
"description": "Not found",
"schema": {
"$ref": "#/definitions/Message"
}
}
},
"summary": "Return information about OVA image {ova}",
"tags": [
"provisioner"
]
},
"post": {
"description": "<br/>The API client is responsible for determining and setting the ova_size value, as this value cannot be determined dynamically before the upload proceeds.",
"parameters": [
{
"description": "Storage pool name",
"in": "query",
"name": "pool",
"required": true,
"type": "string"
},
{
"description": "Size of the OVA file in bytes",
"in": "query",
"name": "ova_size",
"required": true,
"type": "string"
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/Message"
}
},
"400": {
"description": "Bad request",
"schema": {
"$ref": "#/definitions/Message"
}
}
},
"summary": "Upload an OVA image to the cluster",
"tags": [
"provisioner"
]
}
},
"/api/v1/provisioner/profile": { "/api/v1/provisioner/profile": {
"get": { "get": {
"description": "", "description": "",
@ -2228,39 +2424,57 @@
"required": true, "required": true,
"type": "string" "type": "string"
}, },
{
"description": "Profile type",
"enum": [
"provisioner",
"ova"
],
"in": "query",
"name": "profile_type",
"required": true,
"type": "string"
},
{ {
"description": "Script name", "description": "Script name",
"in": "query", "in": "query",
"name": "script", "name": "script",
"required": true, "required": false,
"type": "string" "type": "string"
}, },
{ {
"description": "System template name", "description": "System template name",
"in": "query", "in": "query",
"name": "system_template", "name": "system_template",
"required": true, "required": false,
"type": "string" "type": "string"
}, },
{ {
"description": "Network template name", "description": "Network template name",
"in": "query", "in": "query",
"name": "network_template", "name": "network_template",
"required": true, "required": false,
"type": "string" "type": "string"
}, },
{ {
"description": "Storage template name", "description": "Storage template name",
"in": "query", "in": "query",
"name": "storage_template", "name": "storage_template",
"required": true, "required": false,
"type": "string" "type": "string"
}, },
{ {
"description": "Userdata template name", "description": "Userdata template name",
"in": "query", "in": "query",
"name": "userdata", "name": "userdata",
"required": true, "required": false,
"type": "string"
},
{
"description": "OVA image source",
"in": "query",
"name": "ova",
"required": false,
"type": "string" "type": "string"
}, },
{ {
@ -2336,6 +2550,17 @@
"post": { "post": {
"description": "", "description": "",
"parameters": [ "parameters": [
{
"description": "Profile type",
"enum": [
"provisioner",
"ova"
],
"in": "query",
"name": "profile_type",
"required": true,
"type": "string"
},
{ {
"description": "Script name", "description": "Script name",
"in": "query", "in": "query",
@ -2371,6 +2596,13 @@
"required": true, "required": true,
"type": "string" "type": "string"
}, },
{
"description": "OVA image source",
"in": "query",
"name": "ova",
"required": false,
"type": "string"
},
{ {
"description": "Script install() function keywork argument in \"arg=data\" format; may be specified multiple times to add multiple arguments", "description": "Script install() function keywork argument in \"arg=data\" format; may be specified multiple times to add multiple arguments",
"in": "query", "in": "query",