diff --git a/client-api/api_lib/libvirt_schema.py b/client-api/api_lib/libvirt_schema.py new file mode 100755 index 00000000..fa3b6fae --- /dev/null +++ b/client-api/api_lib/libvirt_schema.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 + +# libvirt_schema.py - Libvirt schema elements +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2019 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +# File header, containing default values for various non-device components +# Variables: +# * vm_name +# * vm_uuid +# * vm_description +# * vm_memory +# * vm_vcpus +# * vm_architecture +libvirt_header = """ + {vm_name} + {vm_uuid} + {vm_description} + {vm_memory} + {vm_vcpus} + + + + + hvm + + + + + + + + + + + destroy + restart + restart + +""" + +# File footer, closing devices and domain elements +libvirt_footer = """ +""" + +# Default devices for all VMs +devices_default = """ /usr/bin/kvm + + + + + /dev/random + +""" + +# Serial device +# Variables: +# * vm_name +devices_serial = """ + + + +""" + +# VNC device +# Variables: +# * vm_vncport +# * vm_vnc_autoport +# * vm_vnc_bind +devices_vnc = """ +""" + +# VirtIO SCSI device +devices_scsi_controller = """ +""" + +# Disk device header +# Variables: +# * ceph_storage_secret +# * disk_pool +# * vm_name +# * disk_id +devices_disk_header = """ + + + + + + +""" + +# Disk device coordinator element +# Variables: +# * coordinator_name +# * coordinator_ceph_mon_port +devices_disk_coordinator = """ +""" + +# Disk device footer +devices_disk_footer = """ + +""" + +# vhostmd virtualization passthrough device +devices_vhostmd = """ + + + + + +""" + +# Network interface device +# Variables: +# * eth_macaddr +# * eth_bridge +devices_net_interface = """ + + + + +""" diff --git a/client-api/api_lib/api.py b/client-api/api_lib/pvcapi_helper.py similarity index 99% rename from client-api/api_lib/api.py rename to client-api/api_lib/pvcapi_helper.py index fa7942e2..7d41ef30 100755 --- a/client-api/api_lib/api.py +++ b/client-api/api_lib/pvcapi_helper.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 -# api.py - PVC HTTP API functions +# pvcapi_helper.py - PVC HTTP API functions # Part of the Parallel Virtual Cluster (PVC) system # # Copyright (C) 2018-2019 Joshua M. Boniface diff --git a/client-api/api_lib/pvcapi_provisioner.py b/client-api/api_lib/pvcapi_provisioner.py new file mode 100755 index 00000000..6f7d7fc6 --- /dev/null +++ b/client-api/api_lib/pvcapi_provisioner.py @@ -0,0 +1,1228 @@ +#!/usr/bin/env python3 + +# pvcapi_provisioner.py - PVC Provisioner functions +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2019 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +import flask +import json +import psycopg2 +import psycopg2.extras +import os +import re +import time +import shlex +import subprocess + +import client_lib.common as pvc_common +import client_lib.node as pvc_node +import client_lib.vm as pvc_vm +import client_lib.network as pvc_network +import client_lib.ceph as pvc_ceph + +import api_lib.libvirt_schema as libvirt_schema + +# +# Exceptions (used by Celery tasks) +# +class ValidationError(Exception): + """ + An exception that results from some value being un- or mis-defined. + """ + pass + +class ClusterError(Exception): + """ + An exception that results from the PVC cluster being out of alignment with the action. + """ + pass + +class ProvisioningError(Exception): + """ + An exception that results from a failure of a provisioning command. + """ + pass + +# +# Common functions +# + +# Database connections +def open_database(config): + conn = psycopg2.connect( + host=config['database_host'], + port=config['database_port'], + dbname=config['database_name'], + user=config['database_user'], + password=config['database_password'] + ) + cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) + return conn, cur + +def close_database(conn, cur, failed=False): + if not failed: + conn.commit() + cur.close() + conn.close() + +# +# Template List functions +# +def list_template(limit, table, is_fuzzy=True): + if limit: + if is_fuzzy: + # Handle fuzzy vs. non-fuzzy limits + if not re.match('\^.*', limit): + limit = '%' + limit + else: + limit = limit[1:] + if not re.match('.*\$', limit): + limit = limit + '%' + else: + limit = limit[:-1] + + args = (limit, ) + query = "SELECT * FROM {} WHERE name LIKE %s;".format(table) + else: + args = () + query = "SELECT * FROM {};".format(table) + + conn, cur = open_database(config) + cur.execute(query, args) + data = cur.fetchall() + + if table == 'network_template': + for template_id, template_data in enumerate(data): + # Fetch list of VNIs from network table + query = "SELECT vni FROM network WHERE network_template = %s;" + args = (template_data['id'],) + cur.execute(query, args) + vnis = cur.fetchall() + data[template_id]['networks'] = vnis + + if table == 'storage_template': + for template_id, template_data in enumerate(data): + # Fetch list of VNIs from network table + query = "SELECT * FROM storage WHERE storage_template = %s;" + args = (template_data['id'],) + cur.execute(query, args) + disks = cur.fetchall() + data[template_id]['disks'] = disks + + close_database(conn, cur) + return data + +def list_template_system(limit, is_fuzzy=True): + """ + Obtain a list of system templates. + """ + data = list_template(limit, 'system_template', is_fuzzy) + return data + +def list_template_network(limit, is_fuzzy=True): + """ + Obtain a list of network templates. + """ + data = list_template(limit, 'network_template', is_fuzzy) + return data + +def list_template_network_vnis(name): + """ + Obtain a list of network template VNIs. + """ + data = list_template(name, 'network_template', is_fuzzy=False)[0] + networks = data['networks'] + return networks + +def list_template_storage(limit, is_fuzzy=True): + """ + Obtain a list of storage templates. + """ + data = list_template(limit, 'storage_template', is_fuzzy) + return data + +def list_template_storage_disks(name): + """ + Obtain a list of storage template disks. + """ + data = list_template(name, 'storage_template', is_fuzzy=False)[0] + disks = data['disks'] + return disks + +def list_template_userdata(limit, is_fuzzy=True): + """ + Obtain a list of userdata templates. + """ + data = list_template(limit, 'userdata_template', is_fuzzy) + return data + +def template_list(limit): + system_templates = list_template_system(limit) + network_templates = list_template_network(limit) + storage_templates = list_template_storage(limit) + userdata_templates = list_template_userdata(limit) + + return { "system_templates": system_templates, "network_templates": network_templates, "storage_templates": storage_templates, "userdata_templates": userdata_templates } + +# +# Template Create functions +# +def create_template_system(name, vcpu_count, vram_mb, serial=False, vnc=False, vnc_bind=None, node_limit=None, node_selector=None, start_with_node=False): + if list_template_system(name, is_fuzzy=False): + retmsg = { "message": "The system template {} already exists".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + query = "INSERT INTO system_template (name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, start_with_node) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);" + args = (name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, start_with_node) + + conn, cur = open_database(config) + try: + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to create entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def create_template_network(name, mac_template=None): + if list_template_network(name, is_fuzzy=False): + retmsg = { "message": "The network template {} already exists".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "INSERT INTO network_template (name, mac_template) VALUES (%s, %s);" + args = (name, mac_template) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to create entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def create_template_network_element(name, vni): + if not list_template_network(name, is_fuzzy=False): + retmsg = { "message": "The network template {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + networks = list_template_network_vnis(name) + found_vni = False + for network in networks: + if int(network['vni']) == vni: + found_vni = True + if found_vni: + retmsg = { "message": "The VNI {} in network template {} already exists".format(vni, name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "SELECT id FROM network_template WHERE name = %s;" + args = (name,) + cur.execute(query, args) + template_id = cur.fetchone()['id'] + query = "INSERT INTO network (network_template, vni) VALUES (%s, %s);" + args = (template_id, vni) + cur.execute(query, args) + retmsg = { "name": name, "vni": vni } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to create entry {}".format(vni), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def create_template_storage(name): + if list_template_storage(name, is_fuzzy=False): + retmsg = { "message": "The storage template {} already exists".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "INSERT INTO storage_template (name) VALUES (%s);" + args = (name,) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to create entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def create_template_storage_element(name, pool, disk_id, disk_size_gb, filesystem=None, filesystem_args=[], mountpoint=None): + if not list_template_storage(name, is_fuzzy=False): + retmsg = { "message": "The storage template {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + disks = list_template_storage_disks(name) + found_disk = False + for disk in disks: + if disk['disk_id'] == disk_id: + found_disk = True + if found_disk: + retmsg = { "message": "The disk {} in storage template {} already exists".format(disk_id, name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + if mountpoint and not filesystem: + retmsg = { "message": "A filesystem must be specified along with a mountpoint." } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "SELECT id FROM storage_template WHERE name = %s;" + args = (name,) + cur.execute(query, args) + template_id = cur.fetchone()['id'] + query = "INSERT INTO storage (storage_template, pool, disk_id, disk_size_gb, mountpoint, filesystem, filesystem_args) VALUES (%s, %s, %s, %s, %s, %s, %s);" + args = (template_id, pool, disk_id, disk_size_gb, mountpoint, filesystem, ' '.join(filesystem_args)) + cur.execute(query, args) + retmsg = { "name": name, "disk_id": disk_id } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to create entry {}".format(disk_id), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def create_template_userdata(name, userdata): + if list_template_userdata(name, is_fuzzy=False): + retmsg = { "message": "The userdata template {} already exists".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "INSERT INTO userdata_template (name, userdata) VALUES (%s, %s);" + args = (name, userdata) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to create entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +# +# Template update functions +# +def update_template_userdata(name, userdata): + if not list_template_userdata(name, is_fuzzy=False): + retmsg = { "message": "The userdata template {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + tid = list_template_userdata(name, is_fuzzy=False)[0]['id'] + + conn, cur = open_database(config) + try: + query = "UPDATE userdata_template SET userdata = %s WHERE id = %s;" + args = (userdata, tid) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to update entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +# +# Template Delete functions +# +def delete_template_system(name): + if not list_template_system(name, is_fuzzy=False): + retmsg = { "message": "The system template {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "DELETE FROM system_template WHERE name = %s;" + args = (name,) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to delete entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def delete_template_network(name): + if not list_template_network(name, is_fuzzy=False): + retmsg = { "message": "The network template {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "SELECT id FROM network_template WHERE name = %s;" + args = (name,) + cur.execute(query, args) + template_id = cur.fetchone()['id'] + query = "DELETE FROM network WHERE network_template = %s;" + args = (template_id,) + cur.execute(query, args) + query = "DELETE FROM network_template WHERE name = %s;" + args = (name,) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to delete entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def delete_template_network_element(name, vni): + if not list_template_network(name, is_fuzzy=False): + retmsg = { "message": "The network template {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + networks = list_template_network_vnis(name) + found_vni = False + for network in networks: + if network['vni'] == int(vni): + found_vni = True + if not found_vni: + retmsg = { "message": "The VNI {} in network template {} does not exist".format(vni, name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "SELECT id FROM network_template WHERE name = %s;" + args = (name,) + cur.execute(query, args) + template_id = cur.fetchone()['id'] + query = "DELETE FROM network WHERE network_template = %s and vni = %s;" + args = (template_id, vni) + cur.execute(query, args) + retmsg = { "name": name, "vni": vni } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to delete entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def delete_template_storage(name): + if not list_template_storage(name, is_fuzzy=False): + retmsg = { "message": "The storage template {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "SELECT id FROM storage_template WHERE name = %s;" + args = (name,) + cur.execute(query, args) + template_id = cur.fetchone()['id'] + query = "DELETE FROM storage WHERE storage_template = %s;" + args = (template_id,) + cur.execute(query, args) + query = "DELETE FROM storage_template WHERE name = %s;" + args = (name,) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to delete entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def delete_template_storage_element(name, disk_id): + if not list_template_storage(name, is_fuzzy=False): + retmsg = { "message": "The storage template {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + disks = list_template_storage_disks(name) + found_disk = False + for disk in disks: + if disk['disk_id'] == disk_id: + found_disk = True + if not found_disk: + retmsg = { "message": "The disk {} in storage template {} does not exist".format(disk_id, name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "SELECT id FROM storage_template WHERE name = %s;" + args = (name,) + cur.execute(query, args) + template_id = cur.fetchone()['id'] + query = "DELETE FROM storage WHERE storage_template = %s and disk_id = %s;" + args = (template_id, disk_id) + cur.execute(query, args) + retmsg = { "name": name, "disk_id": disk_id } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to delete entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def delete_template_userdata(name): + if not list_template_userdata(name, is_fuzzy=False): + retmsg = { "message": "The userdata template {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "DELETE FROM userdata_template WHERE name = %s;" + args = (name,) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to delete entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +# +# Script functions +# +def list_script(limit, is_fuzzy=True): + if limit: + if is_fuzzy: + # Handle fuzzy vs. non-fuzzy limits + if not re.match('\^.*', limit): + limit = '%' + limit + else: + limit = limit[1:] + if not re.match('.*\$', limit): + limit = limit + '%' + else: + limit = limit[:-1] + + query = "SELECT * FROM {} WHERE name LIKE %s;".format('script') + args = (limit, ) + else: + query = "SELECT * FROM {};".format('script') + args = () + + conn, cur = open_database(config) + cur.execute(query, args) + data = cur.fetchall() + close_database(conn, cur) + return data + +def create_script(name, script): + if list_script(name, is_fuzzy=False): + retmsg = { "message": "The script {} already exists".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "INSERT INTO script (name, script) VALUES (%s, %s);" + args = (name, script) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to create entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def update_script(name, script): + if not list_script(name, is_fuzzy=False): + retmsg = { "message": "The script {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + tid = list_script(name, is_fuzzy=False)[0]['id'] + + conn, cur = open_database(config) + try: + query = "UPDATE script SET script = %s WHERE id = %s;" + args = (script, tid) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to update entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def delete_script(name): + if not list_script(name, is_fuzzy=False): + retmsg = { "message": "The script {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "DELETE FROM script WHERE name = %s;" + args = (name,) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to delete entry {}".format(name), "error": str(e) } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +# +# Profile functions +# +def list_profile(limit, is_fuzzy=True): + if limit: + if is_fuzzy: + # Handle fuzzy vs. non-fuzzy limits + if not re.match('\^.*', limit): + limit = '%' + limit + else: + limit = limit[1:] + if not re.match('.*\$', limit): + limit = limit + '%' + else: + limit = limit[:-1] + + query = "SELECT * FROM {} WHERE name LIKE %s;".format('profile') + args = (limit, ) + else: + query = "SELECT * FROM {};".format('profile') + args = () + + conn, cur = open_database(config) + cur.execute(query, args) + orig_data = cur.fetchall() + data = list() + for profile in orig_data: + profile_data = dict() + profile_data['name'] = profile['name'] + # Parse the name of each subelement + for etype in 'system_template', 'network_template', 'storage_template', 'userdata_template', 'script': + query = 'SELECT name from {} WHERE id = %s'.format(etype) + args = (profile[etype],) + cur.execute(query, args) + name = cur.fetchone()['name'] + profile_data[etype] = name + # Split the arguments back into a list + profile_data['arguments'] = profile['arguments'].split('|') + # Append the new data to our actual output structure + data.append(profile_data) + close_database(conn, cur) + return data + +def create_profile(name, system_template, network_template, storage_template, userdata_template, script, arguments=[]): + if list_profile(name, is_fuzzy=False): + retmsg = { "message": "The profile {} already exists".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + system_templates = list_template_system(None) + system_template_id = None + for template in system_templates: + if template['name'] == system_template: + system_template_id = template['id'] + if not system_template_id: + retmsg = { "message": "The system template {} for profile {} does not exist".format(system_template, name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + network_templates = list_template_network(None) + network_template_id = None + for template in network_templates: + if template['name'] == network_template: + network_template_id = template['id'] + if not network_template_id: + retmsg = { "message": "The network template {} for profile {} does not exist".format(network_template, name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + storage_templates = list_template_storage(None) + storage_template_id = None + for template in storage_templates: + if template['name'] == storage_template: + storage_template_id = template['id'] + if not storage_template_id: + retmsg = { "message": "The storage template {} for profile {} does not exist".format(storage_template, name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + userdata_templates = list_template_userdata(None) + userdata_template_id = None + for template in userdata_templates: + if template['name'] == userdata_template: + userdata_template_id = template['id'] + if not userdata_template_id: + retmsg = { "message": "The userdata template {} for profile {} does not exist".format(userdata_template, name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + scripts = list_script(None) + script_id = None + for scr in scripts: + if scr['name'] == script: + script_id = scr['id'] + if not script_id: + retmsg = { "message": "The script {} for profile {} does not exist".format(script, name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + arguments_formatted = '|'.join(arguments) + + conn, cur = open_database(config) + try: + query = "INSERT INTO profile (name, system_template, network_template, storage_template, userdata_template, script, arguments) VALUES (%s, %s, %s, %s, %s, %s, %s);" + args = (name, system_template_id, network_template_id, storage_template_id, userdata_template_id, script_id, arguments_formatted) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to create entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +def delete_profile(name): + if not list_profile(name, is_fuzzy=False): + retmsg = { "message": "The profile {} does not exist".format(name) } + retcode = 400 + return flask.jsonify(retmsg), retcode + + conn, cur = open_database(config) + try: + query = "DELETE FROM profile WHERE name = %s;" + args = (name,) + cur.execute(query, args) + retmsg = { "name": name } + retcode = 200 + except psycopg2.IntegrityError as e: + retmsg = { "message": "Failed to delete entry {}".format(name), "error": e } + retcode = 400 + close_database(conn, cur) + return flask.jsonify(retmsg), retcode + +# +# VM provisioning helper functions +# +def run_os_command(command_string, background=False, environment=None, timeout=None): + command = shlex.split(command_string) + try: + command_output = subprocess.run( + command, + env=environment, + timeout=timeout, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + retcode = command_output.returncode + except subprocess.TimeoutExpired: + retcode = 128 + + try: + stdout = command_output.stdout.decode('ascii') + except: + stdout = '' + try: + stderr = command_output.stderr.decode('ascii') + except: + stderr = '' + return retcode, stdout, stderr + +# +# Cloned VM provisioning function - executed by the Celery worker +# +def clone_vm(self, vm_name, vm_profile, source_volumes): + pass + +# +# Main VM provisioning function - executed by the Celery worker +# +def create_vm(self, vm_name, vm_profile, define_vm=True, start_vm=True): + # Runtime imports + import time + import importlib + import uuid + import datetime + import random + + time.sleep(2) + + print("Starting provisioning of VM '{}' with profile '{}'".format(vm_name, vm_profile)) + + # Phase 0 - connect to databases + try: + db_conn, db_cur = open_database(config) + except: + print('FATAL - failed to connect to Postgres') + raise Exception + + try: + zk_conn = pvc_common.startZKConnection(config['coordinators']) + except: + print('FATAL - failed to connect to Zookeeper') + raise Exception + + # Phase 1 - setup + # * Get the profile elements + # * Get the details from these elements + # * Assemble a VM configuration dictionary + self.update_state(state='RUNNING', meta={'current': 1, 'total': 10, 'status': 'Collecting configuration'}) + time.sleep(1) + + vm_id = re.findall(r'/(\d+)$/', vm_name) + if not vm_id: + vm_id = 0 + else: + vm_id = vm_id[0] + + vm_data = dict() + + # Get the profile information + query = "SELECT system_template, network_template, storage_template, script, arguments FROM profile WHERE name = %s" + args = (vm_profile,) + db_cur.execute(query, args) + profile_data = db_cur.fetchone() + vm_data['script_arguments'] = profile_data['arguments'].split('|') + + # Get the system details + query = 'SELECT vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, start_with_node FROM system_template WHERE id = %s' + args = (profile_data['system_template'],) + db_cur.execute(query, args) + vm_data['system_details'] = db_cur.fetchone() + + # Get the MAC template + query = 'SELECT mac_template FROM network_template WHERE id = %s' + args = (profile_data['network_template'],) + db_cur.execute(query, args) + vm_data['mac_template'] = db_cur.fetchone()['mac_template'] + + # Get the networks + query = 'SELECT vni FROM network WHERE network_template = %s' + args = (profile_data['network_template'],) + db_cur.execute(query, args) + vm_data['networks'] = db_cur.fetchall() + + # Get the storage volumes + query = 'SELECT pool, disk_id, disk_size_gb, mountpoint, filesystem, filesystem_args FROM storage WHERE storage_template = %s' + args = (profile_data['storage_template'],) + db_cur.execute(query, args) + vm_data['volumes'] = db_cur.fetchall() + + # Get the script + query = 'SELECT script FROM script WHERE id = %s' + args = (profile_data['script'],) + db_cur.execute(query, args) + vm_data['script'] = db_cur.fetchone()['script'] + + close_database(db_conn, db_cur) + + print("VM configuration data:\n{}".format(json.dumps(vm_data, sort_keys=True, indent=2))) + + # Phase 2 - verification + # * Ensure that at least one node has enough free RAM to hold the VM (becomes main host) + # * Ensure that all networks are valid + # * Ensure that there is enough disk space in the Ceph cluster for the disks + # This is the "safe fail" step when an invalid configuration will be caught + self.update_state(state='RUNNING', meta={'current': 2, 'total': 10, 'status': 'Verifying configuration against cluster'}) + time.sleep(1) + + # Verify that a VM with this name does not already exist + if pvc_vm.searchClusterByName(zk_conn, vm_name): + raise ClusterError("A VM with the name '{}' already exists in the cluster".format(vm_name)) + + # Verify that at least one host has enough free RAM to run the VM + _discard, nodes = pvc_node.get_list(zk_conn, None) + target_node = None + last_free = 0 + for node in nodes: + # Skip the node if it is not ready to run VMs + if node ['daemon_state'] != "run" or node['domain_state'] != "ready": + continue + # Skip the node if its free memory is less than the new VM's size, plus a 512MB buffer + if node['memory']['free'] < (vm_data['system_details']['vram_mb'] + 512): + continue + # If this node has the most free, use it + if node['memory']['free'] > last_free: + last_free = node['memory']['free'] + target_node = node['name'] + # Raise if no node was found + if not target_node: + raise ClusterError("No ready cluster node contains at least {}+512 MB of free RAM".format(vm_data['system_details']['vram_mb'])) + + print("Selecting target node {} with {} MB free RAM".format(target_node, last_free)) + + # Verify that all configured networks are present on the cluster + cluster_networks, _discard = pvc_network.getClusterNetworkList(zk_conn) + for network in vm_data['networks']: + vni = str(network['vni']) + if not vni in cluster_networks: + raise ClusterError("The network VNI {} is not present on the cluster".format(vni)) + + print("All configured networks for VM are valid") + + # Verify that there is enough disk space free to provision all VM disks + pools = dict() + for volume in vm_data['volumes']: + if not volume['pool'] in pools: + pools[volume['pool']] = volume['disk_size_gb'] + else: + pools[volume['pool']] += volume['disk_size_gb'] + + for pool in pools: + pool_information = pvc_ceph.getPoolInformation(zk_conn, pool) + if not pool_information: + raise ClusterError("Pool {} is not present on the cluster".format(pool)) + pool_free_space_gb = int(pool_information['stats']['free_bytes'] / 1024 / 1024 / 1024) + pool_vm_usage_gb = int(pools[pool]) + + if pool_vm_usage_gb >= pool_free_space_gb: + raise ClusterError("Pool {} has only {} GB free and VM requires {} GB".format(pool, pool_free_space_gb, pool_vm_usage_gb)) + + print("There is enough space on cluster to store VM volumes") + + # Verify that every specified filesystem is valid + used_filesystems = list() + for volume in vm_data['volumes']: + if volume['filesystem'] and volume['filesystem'] not in used_filesystems: + used_filesystems.append(volume['filesystem']) + + for filesystem in used_filesystems: + retcode, stdout, stderr = run_os_command("which mkfs.{}".format(filesystem)) + if retcode: + raise ProvisioningError("Failed to find binary for mkfs.{}: {}".format(filesystem, stderr)) + + print("All selected filesystems are valid") + + # Phase 3 - provisioning script preparation + # * Import the provisioning script as a library with importlib + # * Ensure the required function(s) are present + self.update_state(state='RUNNING', meta={'current': 3, 'total': 10, 'status': 'Preparing provisioning script'}) + time.sleep(1) + + # Write the script out to a temporary file + retcode, stdout, stderr = run_os_command("mktemp") + if retcode: + raise ProvisioningError("Failed to create a temporary file: {}".format(stderr)) + script_file = stdout.strip() + with open(script_file, 'w') as fh: + fh.write(vm_data['script']) + fh.write('\n') + + # Import the script file + loader = importlib.machinery.SourceFileLoader('installer_script', script_file) + spec = importlib.util.spec_from_loader(loader.name, loader) + installer_script = importlib.util.module_from_spec(spec) + loader.exec_module(installer_script) + + # Verify that the install() function is valid + if not "install" in dir(installer_script): + raise ProvisioningError("Specified script does not contain an install() function") + + print("Provisioning script imported successfully") + + # Phase 4 - disk creation + # * Create each Ceph storage volume for the disks + self.update_state(state='RUNNING', meta={'current': 4, 'total': 10, 'status': 'Creating storage volumes'}) + time.sleep(1) + + for volume in vm_data['volumes']: + success, message = pvc_ceph.add_volume(zk_conn, volume['pool'], "{}_{}".format(vm_name, volume['disk_id']), "{}G".format(volume['disk_size_gb'])) + print(message) + if not success: + raise ClusterError("Failed to create volume {}".format(volume['disk_id'])) + + # Phase 5 - disk mapping + # * Map each volume to the local host in order + # * Format each volume with any specified filesystems + # * If any mountpoints are specified, create a temporary mount directory + # * Mount any volumes to their respective mountpoints + self.update_state(state='RUNNING', meta={'current': 5, 'total': 10, 'status': 'Mapping, formatting, and mounting storage volumes locally'}) + time.sleep(1) + + for volume in reversed(vm_data['volumes']): + if not volume['filesystem']: + continue + + rbd_volume = "{}/{}_{}".format(volume['pool'], vm_name, volume['disk_id']) + + filesystem_args_list = list() + for arg in volume['filesystem_args'].split(' '): + arg_entry, arg_data = arg.split('=') + filesystem_args_list.append(arg_entry) + filesystem_args_list.append(arg_data) + filesystem_args = ' '.join(filesystem_args_list) + + # Map the RBD device + retcode, stdout, stderr = run_os_command("rbd map {}".format(rbd_volume)) + if retcode: + raise ProvisioningError("Failed to map volume {}: {}".format(rbd_volume, stderr)) + + # Create the filesystem + retcode, stdout, stderr = run_os_command("mkfs.{} {} /dev/rbd/{}".format(volume['filesystem'], filesystem_args, rbd_volume)) + if retcode: + raise ProvisioningError("Failed to create {} filesystem on {}: {}".format(volume['filesystem'], rbd_volume, stderr)) + + print("Created {} filesystem on {}:\n{}".format(volume['filesystem'], rbd_volume, stdout)) + + # Create temporary directory + retcode, stdout, stderr = run_os_command("mktemp -d") + if retcode: + raise ProvisioningError("Failed to create a temporary directory: {}".format(stderr)) + temp_dir = stdout.strip() + + for volume in vm_data['volumes']: + if not volume['mountpoint']: + continue + + mapped_rbd_volume = "/dev/rbd/{}/{}_{}".format(volume['pool'], vm_name, volume['disk_id']) + mount_path = "{}{}".format(temp_dir, volume['mountpoint']) + + # Ensure the mount path exists (within the filesystems) + retcode, stdout, stderr = run_os_command("mkdir -p {}".format(mount_path)) + if retcode: + raise ProvisioningError("Failed to create mountpoint {}: {}".format(mount_path, stderr)) + + # Mount filesystems to temporary directory + retcode, stdout, stderr = run_os_command("mount {} {}".format(mapped_rbd_volume, mount_path)) + if retcode: + raise ProvisioningError("Failed to mount {} on {}: {}".format(mapped_rbd_volume, mount_path, stderr)) + + print("Successfully mounted {} on {}".format(mapped_rbd_volume, mount_path)) + + # Phase 6 - provisioning script execution + # * Execute the provisioning script main function ("install") passing any custom arguments + self.update_state(state='RUNNING', meta={'current': 6, 'total': 10, 'status': 'Executing provisioning script'}) + time.sleep(1) + + print("Running installer script") + + # Parse the script arguments + script_arguments = dict() + for argument in vm_data['script_arguments']: + argument_name, argument_data = argument.split('=') + script_arguments[argument_name] = argument_data + + # Run the script + installer_script.install( + vm_name=vm_name, + vm_id=vm_id, + temporary_directory=temp_dir, + disks=vm_data['volumes'], + networks=vm_data['networks'], + **script_arguments + ) + + # Phase 7 - install cleanup + # * Unmount any mounted volumes + # * Remove any temporary directories + self.update_state(state='RUNNING', meta={'current': 7, 'total': 10, 'status': 'Cleaning up local mounts and directories'}) + time.sleep(1) + + for volume in list(reversed(vm_data['volumes'])): + # Unmount the volume + if volume['mountpoint']: + print("Cleaning up mount {}{}".format(temp_dir, volume['mountpoint'])) + + mount_path = "{}{}".format(temp_dir, volume['mountpoint']) + retcode, stdout, stderr = run_os_command("umount {}".format(mount_path)) + if retcode: + raise ProvisioningError("Failed to unmount {}: {}".format(mount_path, stderr)) + + # Unmap the RBD device + if volume['filesystem']: + print("Cleaning up RBD mapping /dev/rbd/{}/{}_{}".format(volume['pool'], vm_name, volume['disk_id'])) + + rbd_volume = "/dev/rbd/{}/{}_{}".format(volume['pool'], vm_name, volume['disk_id']) + retcode, stdout, stderr = run_os_command("rbd unmap {}".format(rbd_volume)) + if retcode: + raise ProvisioningError("Failed to unmap volume {}: {}".format(rbd_volume, stderr)) + + print("Cleaning up temporary directories and files") + + # Remove temporary mount directory (don't fail if not removed) + retcode, stdout, stderr = run_os_command("rmdir {}".format(temp_dir)) + if retcode: + print("Failed to delete temporary directory {}: {}".format(temp_dir, stderr)) + + # Remote temporary script (don't fail if not removed) + retcode, stdout, stderr = run_os_command("rm -f {}".format(script_file)) + if retcode: + print("Failed to delete temporary script file {}: {}".format(script_file, stderr)) + + # Phase 8 - configuration creation + # * Create the libvirt XML configuration + self.update_state(state='RUNNING', meta={'current': 8, 'total': 10, 'status': 'Preparing Libvirt XML configuration'}) + time.sleep(1) + + print("Creating Libvirt configuration") + + # Get information about VM + vm_uuid = uuid.uuid4() + vm_description = "PVC provisioner @ {}, profile '{}'".format(datetime.datetime.now(), vm_profile) + + retcode, stdout, stderr = run_os_command("uname -m") + system_architecture = stdout.strip() + + # Begin assembling libvirt schema + vm_schema = "" + + vm_schema += libvirt_schema.libvirt_header.format( + vm_name=vm_name, + vm_uuid=vm_uuid, + vm_description=vm_description, + vm_memory=vm_data['system_details']['vram_mb'], + vm_vcpus=vm_data['system_details']['vcpu_count'], + vm_architecture=system_architecture + ) + + # Add network devices + network_id = 0 + for network in vm_data['networks']: + vni = network['vni'] + eth_bridge = "vmbr{}".format(vni) + + vm_id_hex = '{:x}'.format(int(vm_id % 16)) + net_id_hex = '{:x}'.format(int(network_id % 16)) + mac_prefix = '52:54:00' + + if vm_data['mac_template']: + mactemplate = "{prefix}:ff:f6:{vmid}{netid}" + macgen_template = vm_data['mac_template'] + eth_macaddr = macgen_template.format( + prefix=mac_prefix, + vmid=vm_id_hex, + netid=net_id_hex, + ) + else: + random_octet_A = '{:x}'.format(random.randint(16,238)) + random_octet_B = '{:x}'.format(random.randint(16,238)) + random_octet_C = '{:x}'.format(random.randint(16,238)) + + macgen_template = '{prefix}:{octetA}:{octetB}:{octetC}' + eth_macaddr = macgen_template.format( + prefix=mac_prefix, + octetA=random_octet_A, + octetB=random_octet_B, + octetC=random_octet_C + ) + + vm_schema += libvirt_schema.devices_net_interface.format( + eth_macaddr=eth_macaddr, + eth_bridge=eth_bridge + ) + + network_id += 1 + + # Add disk devices + monitor_list = list() + coordinator_names = config['storage_hosts'] + for coordinator in coordinator_names: + monitor_list.append("{}.{}".format(coordinator, config['storage_domain'])) + + ceph_storage_secret = config['ceph_storage_secret_uuid'] + + for volume in vm_data['volumes']: + vm_schema += libvirt_schema.devices_disk_header.format( + ceph_storage_secret=ceph_storage_secret, + disk_pool=volume['pool'], + vm_name=vm_name, + disk_id=volume['disk_id'] + ) + for monitor in monitor_list: + vm_schema += libvirt_schema.devices_disk_coordinator.format( + coordinator_name=monitor, + coordinator_ceph_mon_port=config['ceph_monitor_port'] + ) + vm_schema += libvirt_schema.devices_disk_footer + + vm_schema += libvirt_schema.devices_vhostmd + + # Add default devices + vm_schema += libvirt_schema.devices_default + + # Add serial device + if vm_data['system_details']['serial']: + vm_schema += libvirt_schema.devices_serial.format( + vm_name=vm_name + ) + + # Add VNC device + if vm_data['system_details']['vnc']: + if vm_data['system_details']['vnc_bind']: + vm_vnc_bind = vm_data['system_details']['vnc_bind'] + else: + vm_vnc_bind = "127.0.0.1" + + vm_vncport = 5900 + vm_vnc_autoport = "yes" + + vm_schema += libvirt_schema.devices_vnc.format( + vm_vncport=vm_vncport, + vm_vnc_autoport=vm_vnc_autoport, + vm_vnc_bind=vm_vnc_bind + ) + + # Add SCSI controller + vm_schema += libvirt_schema.devices_scsi_controller + + # Add footer + vm_schema += libvirt_schema.libvirt_footer + + print("Final VM schema:\n{}\n".format(vm_schema)) + + # Phase 9 - definition + # * Create the VM in the PVC cluster + # * Start the VM in the PVC cluster + self.update_state(state='RUNNING', meta={'current': 9, 'total': 10, 'status': 'Defining and starting VM on the cluster'}) + time.sleep(1) + + if start_vm and not define_vm: + start_vm = False + + if define_vm or start_vm: + print("Defining and starting VM on cluster") + + if define_vm: + retcode, retmsg = pvc_vm.define_vm(zk_conn, vm_schema, target_node, vm_data['system_details']['node_limit'].split(','), vm_data['system_details']['node_selector'], vm_data['system_details']['start_with_node'], vm_profile) + print(retmsg) + + if start_vm: + retcode, retmsg = pvc_vm.start_vm(zk_conn, vm_name) + print(retmsg) + + pvc_common.stopZKConnection(zk_conn) + return {"status": "VM '{}' with profile '{}' has been provisioned and started successfully".format(vm_name, vm_profile), "current": 10, "total": 10} + diff --git a/client-api/provisioner/examples/debootstrap_script.py b/client-api/provisioner/examples/debootstrap_script.py new file mode 100644 index 00000000..da5bc666 --- /dev/null +++ b/client-api/provisioner/examples/debootstrap_script.py @@ -0,0 +1,232 @@ +#!/usr/bin/env python3 + +# debootstrap_script.py - PVC Provisioner example script for Debootstrap +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2019 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +# This script provides an example of a PVC provisioner script. It will install +# a Debian system, of the release specified in the keyword argument `deb_release` +# and from the mirror specified in the keyword argument `deb_mirror`, and +# including the packages specified in the keyword argument `deb_packages` (a list +# of strings, which is then joined together as a CSV and passed to debootstrap), +# to the configured disks, configure fstab, and install GRUB. Any later config +# should be done within the VM, for instance via cloud-init. + +# This script can thus be used as an example or reference implementation of a +# PVC provisioner script and expanded upon as required. + +# This script will run under root privileges as the provisioner does. Be careful +# with that. + +import os + +# Installation function - performs a debootstrap install of a Debian system +# Note that the only arguments are keyword arguments. +def install(**kwargs): + # The provisioner has already mounted the disks on kwargs['temporary_directory']. + # by this point, so we can get right to running the debootstrap after setting + # some nicer variable names; you don't necessarily have to do this. + vm_name = kwargs['vm_name'] + temporary_directory = kwargs['temporary_directory'] + disks = kwargs['disks'] + networks = kwargs['networks'] + # Our own required arguments. We should, though are not required to, handle + # failures of these gracefully, should administrators forget to specify them. + try: + deb_release = kwargs['deb_release'] + except: + deb_release = "stable" + try: + deb_mirror = kwargs['deb_mirror'] + except: + deb_mirror = "http://ftp.debian.org/debian" + try: + deb_packages = kwargs['deb_packages'].split(',') + except: + deb_packages = ["linux-image-amd64", "grub-pc", "cloud-init", "python3-cffi-backend"] + + # We need to know our root disk + root_disk = None + for disk in disks: + if disk['mountpoint'] == '/': + root_disk = disk + if not root_disk: + return + + # Ensure we have debootstrap intalled on the provisioner system; this is a + # good idea to include if you plan to use anything that is not part of the + # base Debian host system, just in case the provisioner host is not properly + # configured already. + os.system( + "apt-get install -y debootstrap" + ) + + # Perform a deboostrap installation + os.system( + "debootstrap --include={pkgs} {suite} {target} {mirror}".format( + suite=deb_release, + target=temporary_directory, + mirror=deb_mirror, + pkgs=','.join(deb_packages) + ) + ) + + # Bind mount the devfs + os.system( + "mount --bind /dev {}/dev".format( + temporary_directory + ) + ) + + # Create an fstab entry for each disk + fstab_file = "{}/etc/fstab".format(temporary_directory) + for disk in disks: + # We assume SSD-based/-like storage, and dislike atimes + options = "defaults,discard,noatime,nodiratime" + + # The root and var volumes have specific values + if disk['mountpoint'] == "/": + dump = 0 + cpass = 1 + elif disk['mountpoint'] == '/var': + dump = 0 + cpass = 2 + else: + dump = 0 + cpass = 0 + + # Append the fstab line + with open(fstab_file, 'a') as fh: + data = "/dev/{disk} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format( + disk=disk['disk_id'], + mountpoint=disk['mountpoint'], + filesystem=disk['filesystem'], + options=options, + dump=dump, + cpass=cpass + ) + fh.write(data) + + # Write the hostname + hostname_file = "{}/etc/hostname".format(temporary_directory) + with open(hostname_file, 'w') as fh: + fh.write("{}".format(vm_name)) + + # Fix the cloud-init.target since it's broken + cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(temporary_directory) + with open(cloudinit_target_file, 'w') as fh: + data = """[Install] +WantedBy=multi-user.target +[Unit] +Description=Cloud-init target +After=multi-user.target +""" + fh.write(data) + + # NOTE: Due to device ordering within the Libvirt XML configuration, the first Ethernet interface + # will always be on PCI bus ID 2, hence the name "ens2". + # Write a DHCP stanza for ens2 + ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(temporary_directory) + with open(ens2_network_file, 'w') as fh: + data = """auto ens2 +iface ens2 inet dhcp +""" + fh.write(data) + + # Write the DHCP config for ens2 + dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory) + with open(dhclient_file, 'w') as fh: + data = """# DHCP client configuration +# Created by vminstall for host web1.i.bonilan.net +option rfc3442-classless-static-routes code 121 = array of unsigned integer 8; +interface "ens2" { + send host-name = "web1"; + send fqdn.fqdn = "web1"; + request subnet-mask, broadcast-address, time-offset, routers, + domain-name, domain-name-servers, domain-search, host-name, + dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers, + netbios-name-servers, netbios-scope, interface-mtu, + rfc3442-classless-static-routes, ntp-servers; +} +""" + fh.write(data) + + # Write the GRUB configuration + grubcfg_file = "{}/etc/default/grub".format(temporary_directory) + with open(grubcfg_file, 'w') as fh: + data = """# Written by the PVC provisioner +GRUB_DEFAULT=0 +GRUB_TIMEOUT=1 +GRUB_DISTRIBUTOR="PVC Virtual Machine" +GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/{root_disk} console=tty0 console=ttyS0,115200n8" +GRUB_CMDLINE_LINUX="" +GRUB_TERMINAL=console +GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1" +GRUB_DISABLE_LINUX_UUID=false +""".format(root_disk=root_disk['disk_id']) + fh.write(data) + + # Chroot, do some in-root tasks, then exit the chroot + # EXITING THE CHROOT IS VERY IMPORTANT OR THE FOLLOWING STAGES OF THE PROVISIONER + # WILL FAIL IN UNEXPECTED WAYS! Keep this in mind when using chroot in your scripts. + real_root = os.open("/", os.O_RDONLY) + os.chroot(temporary_directory) + fake_root = os.open("/", os.O_RDONLY) + os.fchdir(fake_root) + + # Install and update GRUB + os.system( + "grub-install --force /dev/rbd/{}/{}_{}".format(root_disk['pool'], vm_name, root_disk['disk_id']) + ) + os.system( + "update-grub" + ) + # Set a really dumb root password [TEMPORARY] + os.system( + "echo root:test123 | chpasswd" + ) + # Enable cloud-init target on (first) boot + # NOTE: Your user-data should handle this and disable it once done, or things get messy. + # That cloud-init won't run without this hack seems like a bug... but even the official + # Debian cloud images are affected, so who knows. + os.system( + "systemctl enable cloud-init.target" + ) + + # Restore our original root/exit the chroot + # EXITING THE CHROOT IS VERY IMPORTANT OR THE FOLLOWING STAGES OF THE PROVISIONER + # WILL FAIL IN UNEXPECTED WAYS! Keep this in mind when using chroot in your scripts. + os.fchdir(real_root) + os.chroot(".") + os.fchdir(real_root) + os.close(fake_root) + os.close(real_root) + + # Unmount the bound devfs + os.system( + "umount {}/dev".format( + temporary_directory + ) + ) + + # Clean up file handles so paths can be unmounted + del fake_root + del real_root + + # Everything else is done via cloud-init user-data diff --git a/client-api/provisioner/examples/dummy_script.py b/client-api/provisioner/examples/dummy_script.py new file mode 100644 index 00000000..7ac207fa --- /dev/null +++ b/client-api/provisioner/examples/dummy_script.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +# dummy_script.py - PVC Provisioner example script for noop +# Part of the Parallel Virtual Cluster (PVC) system +# +# Copyright (C) 2018-2019 Joshua M. Boniface +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +############################################################################### + +# This script provides an example of a PVC provisioner script. It will do +# nothing and return back to the provisioner without taking any action, and +# expecting no special arguments. + +# This script can thus be used as an example or reference implementation of a +# PVC provisioner script and expanded upon as required. + +# This script will run under root privileges as the provisioner does. Be careful +# with that. + +import os + +# Installation function - performs a debootstrap install of a Debian system +# Note that the only arguments are keyword arguments. +def install(**kwargs): + # The provisioner has already mounted the disks on kwargs['temporary_directory']. + # by this point, so we can get right to running the debootstrap after setting + # some nicer variable names; you don't necessarily have to do this. + vm_name = kwargs['vm_name'] + temporary_directory = kwargs['temporary_directory'] + disks = kwargs['disks'] + networks = kwargs['networks'] + # No operation - this script just returns + pass diff --git a/client-api/provisioner/examples/multipart-userdata.yaml b/client-api/provisioner/examples/multipart-userdata.yaml new file mode 100644 index 00000000..3db3abee --- /dev/null +++ b/client-api/provisioner/examples/multipart-userdata.yaml @@ -0,0 +1,16 @@ +Content-Type: multipart/mixed; boundary="==BOUNDARY==" +MIME-Version: 1.0 + +--==BOUNDARY== +Content-Type: text/cloud-config; charset="us-ascii" + +users: + - blah + +--==BOUNDARY== +Content-Type: text/x-shellscript; charset="us-ascii" + +#!/bin/bash +echo "koz is koz" >> /etc/motd + +--==BOUNDARY==-- diff --git a/client-api/provisioner/examples/userdata.yaml b/client-api/provisioner/examples/userdata.yaml new file mode 100644 index 00000000..faf1276d --- /dev/null +++ b/client-api/provisioner/examples/userdata.yaml @@ -0,0 +1,27 @@ +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 + +#cloud-config +# Example user-data file to set up an alternate /var/home, a first user and some SSH keys, and some packages +bootcmd: + - "mv /home /var/" + - "locale-gen" +package_update: true +packages: + - openssh-server + - sudo +users: + - name: deploy + gecos: Deploy User + homedir: /var/home/deploy + sudo: "ALL=(ALL) NOPASSWD: ALL" + groups: adm, sudo + lock_passwd: true + ssh_authorized_keys: + - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBRBGPzlbh5xYD6k8DMZdPNEwemZzKSSpWGOuU72ehfN joshua@bonifacelabs.net 2017-04 +runcmd: + - "userdel debian" + - "groupmod -g 200 deploy" + - "usermod -u 200 deploy" + - "systemctl disable cloud-init.target" + - "reboot" diff --git a/client-api/pvc-api.py b/client-api/pvc-api.py index e618825f..ac8065a5 100755 --- a/client-api/pvc-api.py +++ b/client-api/pvc-api.py @@ -25,9 +25,13 @@ import json import yaml import os +import distutils.util import gevent.pywsgi -import api_lib.api as pvcapi +import celery as Celery + +import api_lib.pvcapi_helper as api_helper +import api_lib.pvcapi_provisioner as api_provisioner # Parse the configuration file try: @@ -58,16 +62,36 @@ try: 'auth_tokens': o_config['pvc']['api']['authentication']['tokens'], 'ssl_enabled': o_config['pvc']['api']['ssl']['enabled'], 'ssl_key_file': o_config['pvc']['api']['ssl']['key_file'], - 'ssl_cert_file': o_config['pvc']['api']['ssl']['cert_file'] + 'ssl_cert_file': o_config['pvc']['api']['ssl']['cert_file'], + 'database_host': o_config['pvc']['provisioner']['database']['host'], + 'database_port': int(o_config['pvc']['provisioner']['database']['port']), + 'database_name': o_config['pvc']['provisioner']['database']['name'], + 'database_user': o_config['pvc']['provisioner']['database']['user'], + 'database_password': o_config['pvc']['provisioner']['database']['pass'], + 'queue_host': o_config['pvc']['provisioner']['queue']['host'], + 'queue_port': o_config['pvc']['provisioner']['queue']['port'], + 'queue_path': o_config['pvc']['provisioner']['queue']['path'], + 'storage_hosts': o_config['pvc']['provisioner']['ceph_cluster']['storage_hosts'], + 'storage_domain': o_config['pvc']['provisioner']['ceph_cluster']['storage_domain'], + 'ceph_monitor_port': o_config['pvc']['provisioner']['ceph_cluster']['ceph_monitor_port'], + 'ceph_storage_secret_uuid': o_config['pvc']['provisioner']['ceph_cluster']['ceph_storage_secret_uuid'] } - # Set the config object in the pvcapi namespace - pvcapi.config = config + # Use coordinators as storage hosts if not explicitly specified + if not config['storage_hosts']: + config['storage_hosts'] = config['coordinators'] + + # Set the config object in the api_helper namespace + api_helper.config = config + # Set the config object in the api_provisioner namespace + api_provisioner.config = config except Exception as e: print('ERROR: {}.'.format(e)) exit(1) api = flask.Flask(__name__) +api.config['CELERY_BROKER_URL'] = 'redis://{}:{}{}'.format(config['queue_host'], config['queue_port'], config['queue_path']) +api.config['CELERY_RESULT_BACKEND'] = 'redis://{}:{}{}'.format(config['queue_host'], config['queue_port'], config['queue_path']) if config['debug']: api.config['DEBUG'] = True @@ -75,6 +99,9 @@ if config['debug']: if config['auth_enabled']: api.config["SECRET_KEY"] = config['auth_secret_key'] +celery = Celery.Celery(api.name, broker=api.config['CELERY_BROKER_URL']) +celery.conf.update(api.config) + # Authentication decorator function def authenticator(function): def authenticate(*args, **kwargs): @@ -99,6 +126,18 @@ def authenticator(function): authenticate.__name__ = function.__name__ return authenticate +# +# Job functions +# + +@celery.task(bind=True) +def create_vm(self, vm_name, profile_name): + return api_provisioner.create_vm(self, vm_name, profile_name) + +########################################################## +# API Root/Authentication +########################################################## + @api.route('/api/v1', methods=['GET']) def api_root(): return flask.jsonify({"message":"PVC API version 1"}), 209 @@ -137,6 +176,10 @@ def api_auth_logout(): flask.session.pop('token', None) return flask.redirect(flask.url_for('api_root')) +########################################################## +# Cluster API +########################################################## + # # Node endpoints # @@ -149,50 +192,50 @@ def api_node_root(): else: limit = None - return pvcapi.node_list(limit) + return api_helper.node_list(limit) @api.route('/api/v1/node/', methods=['GET']) @authenticator def api_node_element(node): # Same as specifying /node?limit=NODE - return pvcapi.node_list(node) + return api_helper.node_list(node) @api.route('/api/v1/node//daemon-state', methods=['GET']) @authenticator def api_node_daemon_state(node): if flask.request.method == 'GET': - return pvcapi.node_daemon_state(node) + return api_helper.node_daemon_state(node) @api.route('/api/v1/node//coordinator-state', methods=['GET', 'POST']) @authenticator def api_node_coordinator_state(node): if flask.request.method == 'GET': - return pvcapi.node_coordinator_state(node) + return api_helper.node_coordinator_state(node) if flask.request.method == 'POST': if not 'coordinator-state' in flask.request.values: flask.abort(400) new_state = flask.request.values['coordinator-state'] if new_state == 'primary': - return pvcapi.node_primary(node) + return api_helper.node_primary(node) if new_state == 'secondary': - return pvcapi.node_secondary(node) + return api_helper.node_secondary(node) flask.abort(400) @api.route('/api/v1/node//domain-state', methods=['GET', 'POST']) @authenticator def api_node_domain_state(node): if flask.request.method == 'GET': - return pvcapi.node_domain_state(node) + return api_helper.node_domain_state(node) if flask.request.method == 'POST': if not 'domain-state' in flask.request.values: flask.abort(400) new_state = flask.request.values['domain-state'] if new_state == 'ready': - return pvcapi.node_ready(node) + return api_helper.node_ready(node) if new_state == 'flush': - return pvcapi.node_flush(node) + return api_helper.node_flush(node) flask.abort(400) # @@ -220,7 +263,7 @@ def api_vm_root(): else: limit = None - return pvcapi.vm_list(node, state, limit) + return api_helper.vm_list(node, state, limit) if flask.request.method == 'POST': # Get XML data @@ -253,14 +296,14 @@ def api_vm_root(): else: autostart = False - return pvcapi.vm_define(vm, libvirt_xml, node, selector) + return api_helper.vm_define(vm, libvirt_xml, node, selector) @api.route('/api/v1/vm/', methods=['GET', 'POST', 'PUT', 'DELETE']) @authenticator def api_vm_element(vm): if flask.request.method == 'GET': # Same as specifying /vm?limit=VM - return pvcapi.vm_list(None, None, vm, is_fuzzy=False) + return api_helper.vm_list(None, None, vm, is_fuzzy=False) if flask.request.method == 'POST': # Set target limit metadata @@ -283,7 +326,7 @@ def api_vm_element(vm): else: autostart = None - return pvcapi.vm_meta(vm, limit, selector, autostart) + return api_helper.vm_meta(vm, limit, selector, autostart) if flask.request.method == 'PUT': libvirt_xml = flask.request.data @@ -293,39 +336,39 @@ def api_vm_element(vm): else: flag_restart = False - return pvcapi.vm_modify(vm, flag_restart, libvirt_xml) + return api_helper.vm_modify(vm, flag_restart, libvirt_xml) if flask.request.method == 'DELETE': if 'delete_disks' in flask.request.values and flask.request.values['delete_disks']: - return pvcapi.vm_remove(vm) + return api_helper.vm_remove(vm) else: - return pvcapi.vm_undefine(vm) + return api_helper.vm_undefine(vm) @api.route('/api/v1/vm//state', methods=['GET', 'POST']) @authenticator def api_vm_state(vm): if flask.request.method == 'GET': - return pvcapi.vm_state(vm) + return api_helper.vm_state(vm) if flask.request.method == 'POST': if not 'state' in flask.request.values: flask.abort(400) new_state = flask.request.values['state'] if new_state == 'start': - return pvcapi.vm_start(vm) + return api_helper.vm_start(vm) if new_state == 'shutdown': - return pvcapi.vm_shutdown(vm) + return api_helper.vm_shutdown(vm) if new_state == 'stop': - return pvcapi.vm_stop(vm) + return api_helper.vm_stop(vm) if new_state == 'restart': - return pvcapi.vm_restart(vm) + return api_helper.vm_restart(vm) flask.abort(400) @api.route('/api/v1/vm//node', methods=['GET', 'POST']) @authenticator def api_vm_node(vm): if flask.request.method == 'GET': - return pvcapi.vm_node(vm) + return api_helper.vm_node(vm) if flask.request.method == 'POST': if 'action' in flask.request.values: @@ -350,14 +393,14 @@ def api_vm_node(vm): flag_force = False # Check if VM is presently migrated - is_migrated = pvcapi.vm_is_migrated(vm) + is_migrated = api_helper.vm_is_migrated(vm) if action == 'migrate' and not flag_permanent: - return pvcapi.vm_migrate(vm, node, flag_force) + return api_helper.vm_migrate(vm, node, flag_force) if action == 'migrate' and flag_permanent: - return pvcapi.vm_move(vm, node) + return api_helper.vm_move(vm, node) if action == 'unmigrate' and is_migrated: - return pvcapi.vm_unmigrate(vm) + return api_helper.vm_unmigrate(vm) flask.abort(400) @@ -368,7 +411,7 @@ def api_vm_locks(vm): return "Not implemented", 400 if flask.request.method == 'POST': - return pvcapi.vm_flush_locks(vm) + return api_helper.vm_flush_locks(vm) # @@ -384,7 +427,7 @@ def api_net_root(): else: limit = None - return pvcapi.net_list(limit) + return api_helper.net_list(limit) if flask.request.method == 'POST': # Get network VNI @@ -461,7 +504,7 @@ def api_net_root(): else: dhcp4_end = None - return pvcapi.net_add(vni, description, nettype, domain, name_servers, + return api_helper.net_add(vni, description, nettype, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, dhcp4_flag, dhcp4_start, dhcp4_end) @@ -470,7 +513,7 @@ def api_net_root(): def api_net_element(network): # Same as specifying /network?limit=NETWORK if flask.request.method == 'GET': - return pvcapi.net_list(network) + return api_helper.net_list(network) if flask.request.method == 'PUT': # Get network description @@ -533,13 +576,13 @@ def api_net_element(network): else: dhcp4_end = None - return pvcapi.net_modify(network, description, domain, name_servers, + return api_helper.net_modify(network, description, domain, name_servers, ip4_network, ip4_gateway, ip6_network, ip6_gateway, dhcp4_flag, dhcp4_start, dhcp4_end) if flask.request.method == 'DELETE': - return pvcapi.net_remove(network) + return api_helper.net_remove(network) @api.route('/api/v1/network//lease', methods=['GET', 'POST']) @authenticator @@ -557,7 +600,7 @@ def api_net_lease_root(network): else: flag_static = False - return pvcapi.net_dhcp_list(network, limit. flag_static) + return api_helper.net_dhcp_list(network, limit. flag_static) if flask.request.method == 'POST': # Get lease macaddr @@ -577,17 +620,17 @@ def api_net_lease_root(network): else: hostname = None - return pvcapi.net_dhcp_add(network, ipaddress, lease, hostname) + return api_helper.net_dhcp_add(network, ipaddress, lease, hostname) @api.route('/api/v1/network//lease/', methods=['GET', 'DELETE']) @authenticator def api_net_lease_element(network, lease): if flask.request.method == 'GET': # Same as specifying /network?limit=NETWORK - return pvcapi.net_dhcp_list(network, lease, False) + return api_helper.net_dhcp_list(network, lease, False) if flask.request.method == 'DELETE': - return pvcapi.net_dhcp_remove(network, lease) + return api_helper.net_dhcp_remove(network, lease) @api.route('/api/v1/network//acl', methods=['GET', 'POST']) @authenticator @@ -607,7 +650,7 @@ def api_net_acl_root(network): else: direction = None - return pvcapi.net_acl_list(network, limit, direction) + return api_helper.net_acl_list(network, limit, direction) if flask.request.method == 'POST': # Get ACL description @@ -636,14 +679,14 @@ def api_net_acl_root(network): else: order = None - return pvcapi.net_acl_add(network, direction, acl, rule, order) + return api_helper.net_acl_add(network, direction, acl, rule, order) @api.route('/api/v1/network//acl/', methods=['GET', 'DELETE']) @authenticator def api_net_acl_element(network, acl): if flask.request.method == 'GET': # Same as specifying /network?limit=NETWORK - return pvcapi.net_acl_list(network, acl, None) + return api_helper.net_acl_list(network, acl, None) if flask.request.method == 'DELETE': # Get rule direction @@ -654,7 +697,7 @@ def api_net_acl_element(network, acl): else: return flask.jsonify({"message":"ERROR: A direction must be specified for the ACL."}), 400 - return pvcapi.net_acl_remove(network, direction, acl) + return api_helper.net_acl_remove(network, direction, acl) # # Storage (Ceph) endpoints @@ -672,12 +715,12 @@ def api_storage(): @api.route('/api/v1/storage/ceph/status', methods=['GET']) @authenticator def api_ceph_status(): - return pvcapi.ceph_status() + return api_helper.ceph_status() @api.route('/api/v1/storage/ceph/df', methods=['GET']) @authenticator def api_ceph_radosdf(): - return pvcapi.ceph_radosdf() + return api_helper.ceph_radosdf() @api.route('/api/v1/storage/ceph/cluster-option', methods=['POST']) @authenticator @@ -697,9 +740,9 @@ def api_ceph_cluster_option(): return flask.jsonify({"message":"ERROR: An option must be specified."}), 400 if action == 'set': - return pvcapi.ceph_osd_set(option) + return api_helper.ceph_osd_set(option) if action == 'unset': - return pvcapi.ceph_osd_unset(option) + return api_helper.ceph_osd_unset(option) @api.route('/api/v1/storage/ceph/osd', methods=['GET', 'POST']) @authenticator @@ -711,7 +754,7 @@ def api_ceph_osd_root(): else: limit = None - return pvcapi.ceph_osd_list(limit) + return api_helper.ceph_osd_list(limit) if flask.request.method == 'POST': # Get OSD node @@ -732,27 +775,27 @@ def api_ceph_osd_root(): else: return flask.jsonify({"message":"ERROR: An OSD weight must be specified."}), 400 - return pvcapi.ceph_osd_add(node, device, weight) + return api_helper.ceph_osd_add(node, device, weight) @api.route('/api/v1/storage/ceph/osd/', methods=['GET', 'DELETE']) @authenticator def api_ceph_osd_element(osd): if flask.request.method == 'GET': # Same as specifying /osd?limit=OSD - return pvcapi.ceph_osd_list(osd) + return api_helper.ceph_osd_list(osd) if flask.request.method == 'DELETE': # Verify yes-i-really-mean-it flag if not 'yes_i_really_mean_it' in flask.request.values: return flask.jsonify({"message":"ERROR: This command can have unintended consequences and should not be automated; if you're sure you know what you're doing, resend with the argument 'yes_i_really_mean_it'."}), 400 - return pvcapi.ceph_osd_remove(osd) + return api_helper.ceph_osd_remove(osd) @api.route('/api/v1/storage/ceph/osd//state', methods=['GET', 'POST']) @authenticator def api_ceph_osd_state(osd): if flask.request.method == 'GET': - return pvcapi.ceph_osd_state(osd) + return api_helper.ceph_osd_state(osd) if flask.request.method == 'POST': if 'state' in flask.request.values: @@ -763,9 +806,9 @@ def api_ceph_osd_state(osd): return flask.jsonify({"message":"ERROR: A state must be specified."}), 400 if state == 'in': - return pvcapi.ceph_osd_in(osd) + return api_helper.ceph_osd_in(osd) if state == 'out': - return pvcapi.ceph_osd_out(osd) + return api_helper.ceph_osd_out(osd) @api.route('/api/v1/storage/ceph/pool', methods=['GET', 'POST']) @authenticator @@ -777,7 +820,7 @@ def api_ceph_pool_root(): else: limit = None - return pvcapi.ceph_pool_list(limit) + return api_helper.ceph_pool_list(limit) if flask.request.method == 'POST': # Get pool name @@ -800,21 +843,21 @@ def api_ceph_pool_root(): # We default to copies=3,mincopies=2 replcfg = 'copies=3,mincopies=2' - return pvcapi.ceph_pool_add(pool, pgs) + return api_helper.ceph_pool_add(pool, pgs) @api.route('/api/v1/storage/ceph/pool/', methods=['GET', 'DELETE']) @authenticator def api_ceph_pool_element(pool): if flask.request.method == 'GET': # Same as specifying /pool?limit=POOL - return pvcapi.ceph_pool_list(pool) + return api_helper.ceph_pool_list(pool) if flask.request.method == 'DELETE': # Verify yes-i-really-mean-it flag if not 'yes_i_really_mean_it' in flask.request.values: return flask.jsonify({"message":"ERROR: This command can have unintended consequences and should not be automated; if you're sure you know what you're doing, resend with the argument 'yes_i_really_mean_it'."}), 400 - return pvcapi.ceph_pool_remove(pool) + return api_helper.ceph_pool_remove(pool) @api.route('/api/v1/storage/ceph/volume', methods=['GET', 'POST']) @authenticator @@ -832,7 +875,7 @@ def api_ceph_volume_root(): else: limit = None - return pvcapi.ceph_volume_list(pool, limit) + return api_helper.ceph_volume_list(pool, limit) if flask.request.method == 'POST': # Get volume name @@ -863,16 +906,16 @@ def api_ceph_volume_root(): return flask.jsonify({"message":"ERROR: A volume size in bytes (or with an M/G/T suffix) must be specified."}), 400 if source_volume: - return pvcapi.ceph_volume_clone(pool, volume, source_volume) + return api_helper.ceph_volume_clone(pool, volume, source_volume) else: - return pvcapi.ceph_volume_add(pool, volume, size) + return api_helper.ceph_volume_add(pool, volume, size) @api.route('/api/v1/storage/ceph/volume//', methods=['GET', 'PUT', 'DELETE']) @authenticator def api_ceph_volume_element(pool, volume): if flask.request.method == 'GET': # Same as specifying /volume?limit=VOLUME - return pvcapi.ceph_volume_list(pool, volume) + return api_helper.ceph_volume_list(pool, volume) if flask.request.method == 'PUT': if 'size' in flask.request.values: @@ -882,15 +925,15 @@ def api_ceph_volume_element(pool, volume): name = flask.request.values['name'] if size and not name: - return pvcapi.ceph_volume_resize(pool, volume, size) + return api_helper.ceph_volume_resize(pool, volume, size) if name and not size: - return pvcapi.ceph_volume_rename(pool, volume, name) + return api_helper.ceph_volume_rename(pool, volume, name) return flask.jsonify({"message":"ERROR: No name or size specified, or both specified; not changing anything."}), 400 if flask.request.method == 'DELETE': - return pvcapi.ceph_volume_remove(pool, volume) + return api_helper.ceph_volume_remove(pool, volume) @api.route('/api/v1/storage/ceph/volume/snapshot', methods=['GET', 'POST']) @authenticator @@ -914,7 +957,7 @@ def api_ceph_volume_snapshot_root(): else: limit = None - return pvcapi.ceph_volume_snapshot_list(pool, volume, limit) + return api_helper.ceph_volume_snapshot_list(pool, volume, limit) if flask.request.method == 'POST': # Get snapshot name @@ -935,7 +978,7 @@ def api_ceph_volume_snapshot_root(): else: return flask.jsonify({"message":"ERROR: A pool name must be spcified."}), 400 - return pvcapi.ceph_volume_snapshot_add(pool, volume, snapshot) + return api_helper.ceph_volume_snapshot_add(pool, volume, snapshot) @api.route('/api/v1/storage/ceph/volume/snapshot///', methods=['GET', 'PUT', 'DELETE']) @@ -943,7 +986,7 @@ def api_ceph_volume_snapshot_root(): def api_ceph_volume_snapshot_element(pool, volume, snapshot): if flask.request.method == 'GET': # Same as specifying /snapshot?limit=VOLUME - return pvcapi.ceph_volume_snapshot_list(pool, volume, snapshot) + return api_helper.ceph_volume_snapshot_list(pool, volume, snapshot) if flask.request.method == 'PUT': if 'name' in flask.request.values: @@ -951,32 +994,1132 @@ def api_ceph_volume_snapshot_element(pool, volume, snapshot): else: return flask.jsonify({"message":"ERROR: A new name must be specified."}), 400 - return pvcapi.ceph_volume_snapshot_rename(pool, volume, snapshot, name) + return api_helper.ceph_volume_snapshot_rename(pool, volume, snapshot, name) if flask.request.method == 'DELETE': - return pvcapi.ceph_volume_snapshot_remove(pool, volume, snapshot) + return api_helper.ceph_volume_snapshot_remove(pool, volume, snapshot) + +########################################################## +# Provisioner API +########################################################## + +# +# Template endpoints +# +@api.route('/api/v1/provisioner/template', methods=['GET']) +@authenticator +def api_template_root(): + """ + /template - Manage provisioning templates for VM creation. + + GET: List all templates in the provisioning system. + ?limit: Specify a limit to queries. Fuzzy by default; use ^ and $ to force exact matches. + """ + # Get name limit + if 'limit' in flask.request.values: + limit = flask.request.values['limit'] + else: + limit = None + + return flask.jsonify(api_provisioner.template_list(limit)), 200 + +@api.route('/api/v1/provisioner/template/system', methods=['GET', 'POST']) +@authenticator +def api_template_system_root(): + """ + /template/system - Manage system provisioning templates for VM creation. + + GET: List all system templates in the provisioning system. + ?limit: Specify a limit to queries. Fuzzy by default; use ^ and $ to force exact matches. + * type: text + * optional: true + * requires: N/A + + POST: Add new system template. + ?name: The name of the template. + * type: text + * optional: false + * requires: N/A + ?vcpus: The number of VCPUs. + * type: integer + * optional: false + * requires: N/A + ?vram: The amount of RAM in MB. + * type: integer, Megabytes (MB) + * optional: false + * requires: N/A + ?serial: Enable serial console. + * type: boolean + * optional: false + * requires: N/A + ?vnc: True/False, enable VNC console. + * type: boolean + * optional: false + * requires: N/A + ?vnc_bind: Address to bind VNC to. + * default: '127.0.0.1' + * type: IP Address (or '0.0.0.0' wildcard) + * optional: true + * requires: vnc=True + ?node_limit: CSV list of node(s) to limit VM operation to + * type: CSV of valid PVC nodes + * optional: true + * requires: N/A + ?node_selector: Selector to use for node migrations after initial provisioning + * type: Valid PVC node selector + * optional: true + * requires: N/A + ?start_with_node: Whether to start limited node with the parent node + * default: false + * type: boolean + * optional: true + * requires: N/A + """ + if flask.request.method == 'GET': + # Get name limit + if 'limit' in flask.request.values: + limit = flask.request.values['limit'] + else: + limit = None + + return flask.jsonify(api_provisioner.list_template_system(limit)), 200 + + if flask.request.method == 'POST': + # Get name data + if 'name' in flask.request.values: + name = flask.request.values['name'] + else: + return flask.jsonify({"message": "A name must be specified."}), 400 + + # Get vcpus data + if 'vcpus' in flask.request.values: + try: + vcpu_count = int(flask.request.values['vcpus']) + except: + return flask.jsonify({"message": "A vcpus value must be an integer."}), 400 + else: + return flask.jsonify({"message": "A vcpus value must be specified."}), 400 + + # Get vram data + if 'vram' in flask.request.values: + try: + vram_mb = int(flask.request.values['vram']) + except: + return flask.jsonify({"message": "A vram integer value in Megabytes must be specified."}), 400 + else: + return flask.jsonify({"message": "A vram integer value in Megabytes must be specified."}), 400 + + # Get serial configuration + if 'serial' in flask.request.values and bool(distutils.util.strtobool(flask.request.values['serial'])): + serial = True + else: + serial = False + + # Get VNC configuration + if 'vnc' in flask.request.values and bool(distutils.util.strtobool(flask.request.values['vnc'])): + vnc = True + + if 'vnc_bind' in flask.request.values: + vnc_bind = flask.request.values['vnc_bind_address'] + else: + vnc_bind = None + else: + vnc = False + vnc_bind = None + + # Get metadata + if 'node_limit' in flask.request.values: + node_limit = flask.request.values['node_limit'] + else: + node_limit = None + + if 'node_selector' in flask.request.values: + node_selector = flask.request.values['node_selector'] + else: + node_selector = None + + if 'start_with_node' in flask.request.values and bool(distutils.util.strtobool(flask.request.values['start_with_node'])): + start_with_node = True + else: + start_with_node = False + + return api_provisioner.create_template_system(name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, start_with_node) + +@api.route('/api/v1/provisioner/template/system/