Further work on provisioner
This commit is contained in:
parent
3471f4e57a
commit
2dd6247d7b
|
@ -122,6 +122,11 @@ def install(**kwargs):
|
|||
cpass=cpass
|
||||
))
|
||||
|
||||
# Write the hostname
|
||||
hostname_file = "{}/etc/hostname".format(temporary_directory)
|
||||
with open(hostname_file, 'w') as fh:
|
||||
fh.write("{}".format(vm_name))
|
||||
|
||||
# Write the GRUB configuration
|
||||
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
|
||||
with open(grubcfg_file, 'w') as fh:
|
||||
|
@ -149,6 +154,9 @@ GRUB_DISABLE_LINUX_UUID=false
|
|||
os.system(
|
||||
"update-grub"
|
||||
)
|
||||
os.system(
|
||||
"echo root:test123 | chpasswd"
|
||||
)
|
||||
# Restore our original root
|
||||
os.fchdir(real_root)
|
||||
os.chroot(".")
|
||||
|
@ -163,4 +171,8 @@ GRUB_DISABLE_LINUX_UUID=false
|
|||
)
|
||||
)
|
||||
|
||||
# Clean up file handles so paths can be unmounted
|
||||
del fake_root
|
||||
del real_root
|
||||
|
||||
# Everything else is done via cloud-init
|
||||
|
|
|
@ -0,0 +1,150 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# libvirt_schema.py - Libvirt schema elements
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2019 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# File header, containing default values for various non-device components
|
||||
# Variables:
|
||||
# * vm_name
|
||||
# * vm_uuid
|
||||
# * vm_description
|
||||
# * vm_memory
|
||||
# * vm_vcpus
|
||||
# * vm_architecture
|
||||
libvirt_header = """
|
||||
<domain type='kvm'>
|
||||
<name>{vm_name}</name>
|
||||
<uuid>{vm_uuid}</uuid>
|
||||
<description>{vm_description}</description>
|
||||
<memory unit='MiB'>{vm_memory}</memory>
|
||||
<vcpu>{vm_vcpus}</vcpu>
|
||||
<cpu>
|
||||
<topology sockets='1' cores='{vm_vcpus}' threads='1'/>
|
||||
</cpu>
|
||||
<os>
|
||||
<type arch='{vm_architecture}' machine='pc-i440fx-2.7'>hvm</type>
|
||||
<bootmenu enable='yes'/>
|
||||
<boot dev='cdrom'/>
|
||||
<boot dev='hd'/>
|
||||
</os>
|
||||
<features>
|
||||
<acpi/>
|
||||
<apic/>
|
||||
<pae/>
|
||||
</features>
|
||||
<clock offset='utc'/>
|
||||
<on_poweroff>destroy</on_poweroff>
|
||||
<on_reboot>restart</on_reboot>
|
||||
<on_crash>restart</on_crash>
|
||||
<devices>
|
||||
"""
|
||||
|
||||
# File footer, closing devices and domain elements
|
||||
libvirt_footer = """
|
||||
</devices>
|
||||
</domain>
|
||||
"""
|
||||
|
||||
# Default devices for all VMs
|
||||
devices_default = """
|
||||
<emulator>/usr/bin/kvm</emulator>
|
||||
<controller type='usb' index='0'/>
|
||||
<controller type='pci' index='0' model='pci-root'/>
|
||||
<rng model='virtio'>
|
||||
<rate period="1000" bytes="2048"/>
|
||||
<backend model='random'>/dev/random</backend>
|
||||
</rng>
|
||||
"""
|
||||
|
||||
# Serial device
|
||||
# Variables:
|
||||
# * vm_name
|
||||
devices_serial = """
|
||||
<serial type='pty'>
|
||||
<log file='/var/log/libvirt/{vm_name}.log' append='on'/>
|
||||
</serial>
|
||||
<console type='pty'/>
|
||||
"""
|
||||
|
||||
# VNC device
|
||||
# Variables:
|
||||
# * vm_vncport
|
||||
# * vm_vnc_autoport
|
||||
# * vm_vnc_bind
|
||||
devices_vnc = """
|
||||
<graphics type='vnc' port='{vm_vncport}' autoport='{vm_vnc_autoport}' listen='{vm_vnc_bind}'/>
|
||||
"""
|
||||
|
||||
# VirtIO SCSI device
|
||||
devices_scsi_controller = """
|
||||
<controller type='scsi' index='0' model='virtio-scsi'/>
|
||||
"""
|
||||
|
||||
# Disk device header
|
||||
# Variables:
|
||||
# * ceph_storage_secret
|
||||
# * disk_pool
|
||||
# * vm_name
|
||||
# * disk_id
|
||||
devices_disk_header = """
|
||||
<disk type='network' device='disk'>
|
||||
<driver name='qemu' discard='unmap'/>
|
||||
<target dev='{disk_id}' bus='scsi'/>
|
||||
<auth username='libvirt'>
|
||||
<secret type='ceph' uuid='{ceph_storage_secret}'/>
|
||||
</auth>
|
||||
<source protocol='rbd' name='{disk_pool}/{vm_name}_{disk_id}'>
|
||||
"""
|
||||
|
||||
# Disk device coordinator element
|
||||
# Variables:
|
||||
# * coordinator_name
|
||||
# * coordinator_ceph_mon_port
|
||||
devices_disk_coordinator = """
|
||||
<host name='{coordinator_name}' port='{coordinator_ceph_mon_port}'/>
|
||||
"""
|
||||
|
||||
# Disk device footer
|
||||
devices_disk_footer = """
|
||||
</source>
|
||||
</disk>
|
||||
"""
|
||||
|
||||
# vhostmd virtualization passthrough device
|
||||
devices_vhostmd = """
|
||||
<disk type='file' device='disk'>
|
||||
<drive name='qemu' type='raw'/>
|
||||
<source file='/dev/shm/vhostmd0'/>
|
||||
<target dev='sdz' bus='usb'/>
|
||||
<readonly/>
|
||||
</disk>
|
||||
"""
|
||||
|
||||
# Network interface device
|
||||
# Variables:
|
||||
# * eth_macaddr
|
||||
# * eth_bridge
|
||||
devices_net_interface = """
|
||||
<interface type='bridge'>
|
||||
<mac address='{eth_macaddr}'/>
|
||||
<source bridge='{eth_bridge}'/>
|
||||
<model type='virtio'/>
|
||||
</interface>
|
||||
"""
|
|
@ -36,6 +36,8 @@ import client_lib.vm as pvc_vm
|
|||
import client_lib.network as pvc_network
|
||||
import client_lib.ceph as pvc_ceph
|
||||
|
||||
import provisioner_lib.libvirt_schema as libvirt_schema
|
||||
|
||||
#
|
||||
# Exceptions (used by Celery tasks)
|
||||
#
|
||||
|
@ -173,14 +175,14 @@ def template_list(limit):
|
|||
#
|
||||
# Template Create functions
|
||||
#
|
||||
def create_template_system(name, vcpu_count, vram_mb, serial=False, vnc=False, vnc_bind=None):
|
||||
def create_template_system(name, vcpu_count, vram_mb, serial=False, vnc=False, vnc_bind=None, node_limit=None, node_selector=None, start_with_node=False):
|
||||
if list_template_system(name, is_fuzzy=False):
|
||||
retmsg = { "message": "The system template {} already exists".format(name) }
|
||||
retcode = 400
|
||||
return flask.jsonify(retmsg), retcode
|
||||
|
||||
query = "INSERT INTO system_template (name, vcpu_count, vram_mb, serial, vnc, vnc_bind) VALUES (%s, %s, %s, %s, %s, %s);"
|
||||
args = (name, vcpu_count, vram_mb, serial, vnc, vnc_bind)
|
||||
query = "INSERT INTO system_template (name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, start_with_node) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);"
|
||||
args = (name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, start_with_node)
|
||||
|
||||
conn, cur = open_database(config)
|
||||
try:
|
||||
|
@ -666,6 +668,9 @@ def create_vm(self, vm_name, vm_profile):
|
|||
# Runtime imports
|
||||
import time
|
||||
import importlib
|
||||
import uuid
|
||||
import datetime
|
||||
import random
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
|
@ -690,7 +695,13 @@ def create_vm(self, vm_name, vm_profile):
|
|||
# * Assemble a VM configuration dictionary
|
||||
self.update_state(state='RUNNING', meta={'current': 1, 'total': 10, 'status': 'Collecting configuration'})
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
vm_id = re.findall(r'/(\d+)$/', vm_name)
|
||||
if not vm_id:
|
||||
vm_id = 0
|
||||
else:
|
||||
vm_id = vm_id[0]
|
||||
|
||||
vm_data = dict()
|
||||
|
||||
# Get the profile information
|
||||
|
@ -701,7 +712,7 @@ def create_vm(self, vm_name, vm_profile):
|
|||
vm_data['script_arguments'] = profile_data['arguments'].split('|')
|
||||
|
||||
# Get the system details
|
||||
query = 'SELECT vcpu_count, vram_mb, serial, vnc, vnc_bind FROM system_template WHERE id = %s'
|
||||
query = 'SELECT vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, start_with_node FROM system_template WHERE id = %s'
|
||||
args = (profile_data['system_template'],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data['system_details'] = db_cur.fetchone()
|
||||
|
@ -732,6 +743,8 @@ def create_vm(self, vm_name, vm_profile):
|
|||
|
||||
close_database(db_conn, db_cur)
|
||||
|
||||
print("VM configuration data:\n{}".format(json.dumps(vm_data, sort_keys=True, indent=2)))
|
||||
|
||||
# Phase 2 - verification
|
||||
# * Ensure that at least one node has enough free RAM to hold the VM (becomes main host)
|
||||
# * Ensure that all networks are valid
|
||||
|
@ -849,7 +862,7 @@ def create_vm(self, vm_name, vm_profile):
|
|||
self.update_state(state='RUNNING', meta={'current': 5, 'total': 10, 'status': 'Mapping, formatting, and mounting storage volumes locally'})
|
||||
time.sleep(1)
|
||||
|
||||
for volume in vm_data['volumes']:
|
||||
for volume in reversed(vm_data['volumes']):
|
||||
if not volume['filesystem']:
|
||||
continue
|
||||
|
||||
|
@ -915,33 +928,182 @@ def create_vm(self, vm_name, vm_profile):
|
|||
# Run the script
|
||||
installer_script.install(
|
||||
vm_name=vm_name,
|
||||
vm_id=re.findall(r'/(\d+)$/', vm_name),
|
||||
vm_id=vm_id,
|
||||
temporary_directory=temp_dir,
|
||||
disks=vm_data['volumes'],
|
||||
networks=vm_data['networks'],
|
||||
**script_arguments
|
||||
)
|
||||
|
||||
|
||||
return
|
||||
|
||||
|
||||
# Phase 7 - install cleanup
|
||||
# * Unmount any mounted volumes
|
||||
# * Remove any temporary directories
|
||||
self.update_state(state='RUNNING', meta={'current': 7, 'total': 10, 'status': 'Cleaning up local mounts and directories'})
|
||||
time.sleep(1)
|
||||
|
||||
for volume in list(reversed(vm_data['volumes'])):
|
||||
# Unmount the volume
|
||||
if volume['mountpoint']:
|
||||
print("Cleaning up mount {}{}".format(temp_dir, volume['mountpoint']))
|
||||
|
||||
mount_path = "{}{}".format(temp_dir, volume['mountpoint'])
|
||||
retcode, stdout, stderr = run_os_command("umount {}".format(mount_path))
|
||||
if retcode:
|
||||
raise ProvisioningError("Failed to unmount {}: {}".format(mount_path, stderr))
|
||||
|
||||
# Unmap the RBD device
|
||||
if volume['filesystem']:
|
||||
print("Cleaning up RBD mapping /dev/rbd/{}/{}_{}".format(volume['pool'], vm_name, volume['disk_id']))
|
||||
|
||||
rbd_volume = "/dev/rbd/{}/{}_{}".format(volume['pool'], vm_name, volume['disk_id'])
|
||||
retcode, stdout, stderr = run_os_command("rbd unmap {}".format(rbd_volume))
|
||||
if retcode:
|
||||
raise ProvisioningError("Failed to unmap volume {}: {}".format(rbd_volume, stderr))
|
||||
|
||||
print("Cleaning up temporary directories and files")
|
||||
|
||||
# Remove temporary mount directory (don't fail if not removed)
|
||||
retcode, stdout, stderr = run_os_command("rmdir {}".format(temp_dir))
|
||||
if retcode:
|
||||
print("Failed to delete temporary directory {}: {}".format(temp_dir, stderr))
|
||||
|
||||
# Remote temporary script (don't fail if not removed)
|
||||
retcode, stdout, stderr = run_os_command("rm -f {}".format(script_file))
|
||||
if retcode:
|
||||
print("Failed to delete temporary script file {}: {}".format(script_file, stderr))
|
||||
|
||||
# Phase 8 - configuration creation
|
||||
# * Create the libvirt XML configuration
|
||||
self.update_state(state='RUNNING', meta={'current': 8, 'total': 10, 'status': 'Preparing Libvirt XML configuration'})
|
||||
time.sleep(5)
|
||||
time.sleep(1)
|
||||
|
||||
print("Creating Libvirt configuration")
|
||||
|
||||
# Get information about VM
|
||||
vm_uuid = uuid.uuid4()
|
||||
vm_description = "PVC provisioner @ {}, profile '{}'".format(datetime.datetime.now(), vm_profile)
|
||||
|
||||
retcode, stdout, stderr = run_os_command("uname -m")
|
||||
system_architecture = stdout.strip()
|
||||
|
||||
# Begin assembling libvirt schema
|
||||
vm_schema = ""
|
||||
|
||||
vm_schema += libvirt_schema.libvirt_header.format(
|
||||
vm_name=vm_name,
|
||||
vm_uuid=vm_uuid,
|
||||
vm_description=vm_description,
|
||||
vm_memory=vm_data['system_details']['vram_mb'],
|
||||
vm_vcpus=vm_data['system_details']['vcpu_count'],
|
||||
vm_architecture=system_architecture
|
||||
)
|
||||
|
||||
# Add default devices
|
||||
vm_schema += libvirt_schema.devices_default
|
||||
|
||||
# Add serial device
|
||||
if vm_data['system_details']['serial']:
|
||||
vm_schema += libvirt_schema.devices_serial.format(
|
||||
vm_name=vm_name
|
||||
)
|
||||
|
||||
# Add VNC device
|
||||
if vm_data['system_details']['vnc']:
|
||||
if vm_data['system_details']['vnc_bind']:
|
||||
vm_vnc_bind = vm_data['system_details']['vnc_bind']
|
||||
else:
|
||||
vm_vnc_bind = "127.0.0.1"
|
||||
|
||||
vm_vncport = 5900
|
||||
vm_vnc_autoport = "yes"
|
||||
|
||||
vm_schema += libvirt_schema.devices_vnc.format(
|
||||
vm_vncport=vm_vncport,
|
||||
vm_vnc_autoport=vm_vnc_autoport,
|
||||
vm_vnc_bind=vm_vnc_bind
|
||||
)
|
||||
|
||||
# Add SCSI controller
|
||||
vm_schema += libvirt_schema.devices_scsi_controller
|
||||
|
||||
# Add disk devices
|
||||
monitor_list = list()
|
||||
coordinator_names = config['storage_hosts']
|
||||
for coordinator in coordinator_names:
|
||||
monitor_list.append("{}.{}".format(coordinator, config['storage_domain']))
|
||||
|
||||
ceph_storage_secret = config['ceph_storage_secret_uuid']
|
||||
|
||||
for volume in vm_data['volumes']:
|
||||
vm_schema += libvirt_schema.devices_disk_header.format(
|
||||
ceph_storage_secret=ceph_storage_secret,
|
||||
disk_pool=volume['pool'],
|
||||
vm_name=vm_name,
|
||||
disk_id=volume['disk_id']
|
||||
)
|
||||
for monitor in monitor_list:
|
||||
vm_schema += libvirt_schema.devices_disk_coordinator.format(
|
||||
coordinator_name=monitor,
|
||||
coordinator_ceph_mon_port=config['ceph_monitor_port']
|
||||
)
|
||||
vm_schema += libvirt_schema.devices_disk_footer
|
||||
|
||||
vm_schema += libvirt_schema.devices_vhostmd
|
||||
|
||||
# Add network devices
|
||||
network_id = 0
|
||||
for network in vm_data['networks']:
|
||||
vni = network['vni']
|
||||
eth_bridge = "vmbr{}".format(vni)
|
||||
|
||||
vm_id_hex = '{:x}'.format(int(vm_id % 16))
|
||||
net_id_hex = '{:x}'.format(int(network_id % 16))
|
||||
mac_prefix = '52:54:00'
|
||||
|
||||
if vm_data['mac_template']:
|
||||
mactemplate = "{prefix}:ff:f6:{vmid}{netid}"
|
||||
macgen_template = vm_data['mac_template']
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
vmid=vm_id_hex,
|
||||
netid=net_id_hex,
|
||||
)
|
||||
else:
|
||||
random_octet_A = '{:x}'.format(random.randint(16,238))
|
||||
random_octet_B = '{:x}'.format(random.randint(16,238))
|
||||
random_octet_C = '{:x}'.format(random.randint(16,238))
|
||||
|
||||
macgen_template = '{prefix}:{octetA}:{octetB}:{octetC}'
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
octetA=random_octet_A,
|
||||
octetB=random_octet_B,
|
||||
octetC=random_octet_C
|
||||
)
|
||||
|
||||
vm_schema += libvirt_schema.devices_net_interface.format(
|
||||
eth_macaddr=eth_macaddr,
|
||||
eth_bridge=eth_bridge
|
||||
)
|
||||
|
||||
network_id += 1
|
||||
|
||||
vm_schema += libvirt_schema.libvirt_footer
|
||||
|
||||
print("Final VM schema:\n{}\n".format(vm_schema))
|
||||
|
||||
# Phase 9 - definition
|
||||
# * Create the VM in the PVC cluster
|
||||
# * Start the VM in the PVC cluster
|
||||
self.update_state(state='RUNNING', meta={'current': 9, 'total': 10, 'status': 'Defining and starting VM on the cluster'})
|
||||
time.sleep(5)
|
||||
time.sleep(1)
|
||||
|
||||
print("Defining and starting VM on cluster")
|
||||
|
||||
retcode, retmsg = pvc_vm.define_vm(zk_conn, vm_schema, target_node, vm_data['system_details']['node_limit'].split(','), vm_data['system_details']['node_selector'], vm_data['system_details']['start_with_node'])
|
||||
print(retmsg)
|
||||
retcode, retmsg = pvc_vm.start_vm(zk_conn, vm_name)
|
||||
print(retmsg)
|
||||
|
||||
pvc_common.stopZKConnection(zk_conn)
|
||||
return {"status": "VM '{}' with profile '{}' has been provisioned and started successfully".format(vm_name, vm_profile), "current": 10, "total": 10}
|
||||
|
|
|
@ -70,8 +70,15 @@ try:
|
|||
'queue_host': o_config['pvc']['provisioner']['queue']['host'],
|
||||
'queue_port': o_config['pvc']['provisioner']['queue']['port'],
|
||||
'queue_path': o_config['pvc']['provisioner']['queue']['path'],
|
||||
'storage_hosts': o_config['pvc']['cluster']['storage_hosts'],
|
||||
'storage_domain': o_config['pvc']['cluster']['storage_domain'],
|
||||
'ceph_monitor_port': o_config['pvc']['cluster']['ceph_monitor_port'],
|
||||
'ceph_storage_secret_uuid': o_config['pvc']['cluster']['ceph_storage_secret_uuid']
|
||||
}
|
||||
|
||||
if not config['storage_hosts']:
|
||||
config['storage_hosts'] = config['coordinators']
|
||||
|
||||
# Set the config object in the pvcapi namespace
|
||||
pvcprovisioner.config = config
|
||||
except Exception as e:
|
||||
|
@ -228,6 +235,19 @@ def api_template_system_root():
|
|||
* type: IP Address (or '0.0.0.0' wildcard)
|
||||
* optional: true
|
||||
* requires: vnc=True
|
||||
?node_limit: CSV list of node(s) to limit VM operation to
|
||||
* type: CSV of valid PVC nodes
|
||||
* optional: true
|
||||
* requires: N/A
|
||||
?node_selector: Selector to use for node migrations after initial provisioning
|
||||
* type: Valid PVC node selector
|
||||
* optional: true
|
||||
* requires: N/A
|
||||
?start_with_node: Whether to start limited node with the parent node
|
||||
* default: false
|
||||
* type: boolean
|
||||
* optional: true
|
||||
* requires: N/A
|
||||
"""
|
||||
if flask.request.method == 'GET':
|
||||
# Get name limit
|
||||
|
@ -281,7 +301,22 @@ def api_template_system_root():
|
|||
vnc = False
|
||||
vnc_bind = None
|
||||
|
||||
return pvcprovisioner.create_template_system(name, vcpu_count, vram_mb, serial, vnc, vnc_bind)
|
||||
if 'node_limit' in flask.request.values:
|
||||
node_limit = flask.request.values['node_limit']
|
||||
else:
|
||||
node_limit = None
|
||||
|
||||
if 'node_selector' in flask.request.values:
|
||||
node_selector = flask.request.values['node_selector']
|
||||
else:
|
||||
node_selector = None
|
||||
|
||||
if 'start_with_node' in flask.request.values and flask.request.values['start_with_node']:
|
||||
start_with_node = True
|
||||
else:
|
||||
start_with_node = False
|
||||
|
||||
return pvcprovisioner.create_template_system(name, vcpu_count, vram_mb, serial, vnc, vnc_bind, node_limit, node_selector, start_with_node)
|
||||
|
||||
@api.route('/api/v1/template/system/<template>', methods=['GET', 'POST', 'DELETE'])
|
||||
@authenticator
|
||||
|
@ -379,9 +414,16 @@ def api_template_network_root():
|
|||
* optional: false
|
||||
* requires: N/A
|
||||
?mac_template: The MAC address template for the template.
|
||||
* type: text
|
||||
* type: MAC address template
|
||||
* optional: true
|
||||
* requires: N/A
|
||||
|
||||
The MAC address template should use the following conventions:
|
||||
* use {prefix} to represent the Libvirt MAC prefix, always "52:54:00"
|
||||
* use {vmid} to represent the hex value (<16) of the host's ID (e.g. server4 has ID 4, server has ID 0)
|
||||
* use {netid} to represent the hex value (<16) of the network's sequential integer ID (first is 0, etc.)
|
||||
|
||||
Example: "{prefix}:ff:ff:{vmid}{netid}"
|
||||
"""
|
||||
if flask.request.method == 'GET':
|
||||
# Get name limit
|
||||
|
|
|
@ -20,6 +20,21 @@ pvc:
|
|||
- hv1
|
||||
- hv2
|
||||
- hv3
|
||||
# cluster: Information about the cluster
|
||||
cluster:
|
||||
# storage_hosts: The list of hosts that the Ceph monitors are valid on; if empty (the default),
|
||||
# uses the list of coordinators
|
||||
storage_hosts:
|
||||
- ceph1
|
||||
- ceph2
|
||||
- ceph2
|
||||
# storage_domain: The storage domain name, concatenated with the coordinators list names
|
||||
# to form monitor access strings
|
||||
storage_domain: "s.bonilan.net"
|
||||
# ceph_monitor_port: The port that the Ceph monitor on each coordinator listens on
|
||||
ceph_monitor_port: 6789
|
||||
# ceph_storage_secret_uuid: Libvirt secret UUID for Ceph storage access
|
||||
ceph_storage_secret_uuid: "c416032b-2ce9-457f-a5c2-18704a3485f4"
|
||||
# provisioner: Configuration of the Provisioner API listener
|
||||
provisioner:
|
||||
# listen_address: IP address(es) to listen on; use 0.0.0.0 for all interfaces
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
create database pvcprov owner pvcprov;
|
||||
\c pvcprov
|
||||
create table system_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, vcpu_count INT NOT NULL, vram_mb INT NOT NULL, serial BOOL NOT NULL, vnc BOOL NOT NULL, vnc_bind TEXT);
|
||||
create table system_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, vcpu_count INT NOT NULL, vram_mb INT NOT NULL, serial BOOL NOT NULL, vnc BOOL NOT NULL, vnc_bind TEXT, node_limit TEXT, node_selector TEXT, start_with_node BOOL NOT NULL);
|
||||
create table network_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, mac_template TEXT);
|
||||
create table network (id SERIAL PRIMARY KEY, network_template INT REFERENCES network_template(id), vni INT NOT NULL);
|
||||
create table storage_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE);
|
||||
|
|
Loading…
Reference in New Issue