Move all provisioner API functionality into main

This commit is contained in:
Joshua Boniface 2019-12-14 14:12:55 -05:00
parent 45dbc0eef8
commit 0727a7f6ed
17 changed files with 3001 additions and 128 deletions

View File

@ -0,0 +1,138 @@
#!/usr/bin/env python3
# libvirt_schema.py - Libvirt schema elements
# Part of the Parallel Virtual Cluster (PVC) system
#
# Copyright (C) 2018-2019 Joshua M. Boniface <joshua@boniface.me>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###############################################################################
# File header, containing default values for various non-device components
# Variables:
# * vm_name
# * vm_uuid
# * vm_description
# * vm_memory
# * vm_vcpus
# * vm_architecture
libvirt_header = """<domain type='kvm'>
<name>{vm_name}</name>
<uuid>{vm_uuid}</uuid>
<description>{vm_description}</description>
<memory unit='MiB'>{vm_memory}</memory>
<vcpu>{vm_vcpus}</vcpu>
<cpu>
<topology sockets='1' cores='{vm_vcpus}' threads='1'/>
</cpu>
<os>
<type arch='{vm_architecture}' machine='pc-i440fx-2.7'>hvm</type>
<bootmenu enable='yes'/>
<boot dev='cdrom'/>
<boot dev='hd'/>
</os>
<features>
<acpi/>
<apic/>
<pae/>
</features>
<clock offset='utc'/>
<on_poweroff>destroy</on_poweroff>
<on_reboot>restart</on_reboot>
<on_crash>restart</on_crash>
<devices>
"""
# File footer, closing devices and domain elements
libvirt_footer = """ </devices>
</domain>"""
# Default devices for all VMs
devices_default = """ <emulator>/usr/bin/kvm</emulator>
<controller type='usb' index='0'/>
<controller type='pci' index='0' model='pci-root'/>
<rng model='virtio'>
<rate period="1000" bytes="2048"/>
<backend model='random'>/dev/random</backend>
</rng>
"""
# Serial device
# Variables:
# * vm_name
devices_serial = """ <serial type='pty'>
<log file='/var/log/libvirt/{vm_name}.log' append='on'/>
</serial>
<console type='pty'/>
"""
# VNC device
# Variables:
# * vm_vncport
# * vm_vnc_autoport
# * vm_vnc_bind
devices_vnc = """ <graphics type='vnc' port='{vm_vncport}' autoport='{vm_vnc_autoport}' listen='{vm_vnc_bind}'/>
"""
# VirtIO SCSI device
devices_scsi_controller = """ <controller type='scsi' index='0' model='virtio-scsi'/>
"""
# Disk device header
# Variables:
# * ceph_storage_secret
# * disk_pool
# * vm_name
# * disk_id
devices_disk_header = """ <disk type='network' device='disk'>
<driver name='qemu' discard='unmap'/>
<target dev='{disk_id}' bus='scsi'/>
<auth username='libvirt'>
<secret type='ceph' uuid='{ceph_storage_secret}'/>
</auth>
<source protocol='rbd' name='{disk_pool}/{vm_name}_{disk_id}'>
"""
# Disk device coordinator element
# Variables:
# * coordinator_name
# * coordinator_ceph_mon_port
devices_disk_coordinator = """ <host name='{coordinator_name}' port='{coordinator_ceph_mon_port}'/>
"""
# Disk device footer
devices_disk_footer = """ </source>
</disk>
"""
# vhostmd virtualization passthrough device
devices_vhostmd = """ <disk type='file' device='disk'>
<drive name='qemu' type='raw'/>
<source file='/dev/shm/vhostmd0'/>
<target dev='sdz' bus='usb'/>
<readonly/>
</disk>
"""
# Network interface device
# Variables:
# * eth_macaddr
# * eth_bridge
devices_net_interface = """ <interface type='bridge'>
<mac address='{eth_macaddr}'/>
<source bridge='{eth_bridge}'/>
<model type='virtio'/>
</interface>
"""

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3
# api.py - PVC HTTP API functions
# pvcapi_helper.py - PVC HTTP API functions
# Part of the Parallel Virtual Cluster (PVC) system
#
# Copyright (C) 2018-2019 Joshua M. Boniface <joshua@boniface.me>

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,232 @@
#!/usr/bin/env python3
# debootstrap_script.py - PVC Provisioner example script for Debootstrap
# Part of the Parallel Virtual Cluster (PVC) system
#
# Copyright (C) 2018-2019 Joshua M. Boniface <joshua@boniface.me>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###############################################################################
# This script provides an example of a PVC provisioner script. It will install
# a Debian system, of the release specified in the keyword argument `deb_release`
# and from the mirror specified in the keyword argument `deb_mirror`, and
# including the packages specified in the keyword argument `deb_packages` (a list
# of strings, which is then joined together as a CSV and passed to debootstrap),
# to the configured disks, configure fstab, and install GRUB. Any later config
# should be done within the VM, for instance via cloud-init.
# This script can thus be used as an example or reference implementation of a
# PVC provisioner script and expanded upon as required.
# This script will run under root privileges as the provisioner does. Be careful
# with that.
import os
# Installation function - performs a debootstrap install of a Debian system
# Note that the only arguments are keyword arguments.
def install(**kwargs):
# The provisioner has already mounted the disks on kwargs['temporary_directory'].
# by this point, so we can get right to running the debootstrap after setting
# some nicer variable names; you don't necessarily have to do this.
vm_name = kwargs['vm_name']
temporary_directory = kwargs['temporary_directory']
disks = kwargs['disks']
networks = kwargs['networks']
# Our own required arguments. We should, though are not required to, handle
# failures of these gracefully, should administrators forget to specify them.
try:
deb_release = kwargs['deb_release']
except:
deb_release = "stable"
try:
deb_mirror = kwargs['deb_mirror']
except:
deb_mirror = "http://ftp.debian.org/debian"
try:
deb_packages = kwargs['deb_packages'].split(',')
except:
deb_packages = ["linux-image-amd64", "grub-pc", "cloud-init", "python3-cffi-backend"]
# We need to know our root disk
root_disk = None
for disk in disks:
if disk['mountpoint'] == '/':
root_disk = disk
if not root_disk:
return
# Ensure we have debootstrap intalled on the provisioner system; this is a
# good idea to include if you plan to use anything that is not part of the
# base Debian host system, just in case the provisioner host is not properly
# configured already.
os.system(
"apt-get install -y debootstrap"
)
# Perform a deboostrap installation
os.system(
"debootstrap --include={pkgs} {suite} {target} {mirror}".format(
suite=deb_release,
target=temporary_directory,
mirror=deb_mirror,
pkgs=','.join(deb_packages)
)
)
# Bind mount the devfs
os.system(
"mount --bind /dev {}/dev".format(
temporary_directory
)
)
# Create an fstab entry for each disk
fstab_file = "{}/etc/fstab".format(temporary_directory)
for disk in disks:
# We assume SSD-based/-like storage, and dislike atimes
options = "defaults,discard,noatime,nodiratime"
# The root and var volumes have specific values
if disk['mountpoint'] == "/":
dump = 0
cpass = 1
elif disk['mountpoint'] == '/var':
dump = 0
cpass = 2
else:
dump = 0
cpass = 0
# Append the fstab line
with open(fstab_file, 'a') as fh:
data = "/dev/{disk} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
disk=disk['disk_id'],
mountpoint=disk['mountpoint'],
filesystem=disk['filesystem'],
options=options,
dump=dump,
cpass=cpass
)
fh.write(data)
# Write the hostname
hostname_file = "{}/etc/hostname".format(temporary_directory)
with open(hostname_file, 'w') as fh:
fh.write("{}".format(vm_name))
# Fix the cloud-init.target since it's broken
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(temporary_directory)
with open(cloudinit_target_file, 'w') as fh:
data = """[Install]
WantedBy=multi-user.target
[Unit]
Description=Cloud-init target
After=multi-user.target
"""
fh.write(data)
# NOTE: Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
# will always be on PCI bus ID 2, hence the name "ens2".
# Write a DHCP stanza for ens2
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(temporary_directory)
with open(ens2_network_file, 'w') as fh:
data = """auto ens2
iface ens2 inet dhcp
"""
fh.write(data)
# Write the DHCP config for ens2
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
with open(dhclient_file, 'w') as fh:
data = """# DHCP client configuration
# Created by vminstall for host web1.i.bonilan.net
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
interface "ens2" {
send host-name = "web1";
send fqdn.fqdn = "web1";
request subnet-mask, broadcast-address, time-offset, routers,
domain-name, domain-name-servers, domain-search, host-name,
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
netbios-name-servers, netbios-scope, interface-mtu,
rfc3442-classless-static-routes, ntp-servers;
}
"""
fh.write(data)
# Write the GRUB configuration
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
with open(grubcfg_file, 'w') as fh:
data = """# Written by the PVC provisioner
GRUB_DEFAULT=0
GRUB_TIMEOUT=1
GRUB_DISTRIBUTOR="PVC Virtual Machine"
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/{root_disk} console=tty0 console=ttyS0,115200n8"
GRUB_CMDLINE_LINUX=""
GRUB_TERMINAL=console
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
GRUB_DISABLE_LINUX_UUID=false
""".format(root_disk=root_disk['disk_id'])
fh.write(data)
# Chroot, do some in-root tasks, then exit the chroot
# EXITING THE CHROOT IS VERY IMPORTANT OR THE FOLLOWING STAGES OF THE PROVISIONER
# WILL FAIL IN UNEXPECTED WAYS! Keep this in mind when using chroot in your scripts.
real_root = os.open("/", os.O_RDONLY)
os.chroot(temporary_directory)
fake_root = os.open("/", os.O_RDONLY)
os.fchdir(fake_root)
# Install and update GRUB
os.system(
"grub-install --force /dev/rbd/{}/{}_{}".format(root_disk['pool'], vm_name, root_disk['disk_id'])
)
os.system(
"update-grub"
)
# Set a really dumb root password [TEMPORARY]
os.system(
"echo root:test123 | chpasswd"
)
# Enable cloud-init target on (first) boot
# NOTE: Your user-data should handle this and disable it once done, or things get messy.
# That cloud-init won't run without this hack seems like a bug... but even the official
# Debian cloud images are affected, so who knows.
os.system(
"systemctl enable cloud-init.target"
)
# Restore our original root/exit the chroot
# EXITING THE CHROOT IS VERY IMPORTANT OR THE FOLLOWING STAGES OF THE PROVISIONER
# WILL FAIL IN UNEXPECTED WAYS! Keep this in mind when using chroot in your scripts.
os.fchdir(real_root)
os.chroot(".")
os.fchdir(real_root)
os.close(fake_root)
os.close(real_root)
# Unmount the bound devfs
os.system(
"umount {}/dev".format(
temporary_directory
)
)
# Clean up file handles so paths can be unmounted
del fake_root
del real_root
# Everything else is done via cloud-init user-data

View File

@ -0,0 +1,46 @@
#!/usr/bin/env python3
# dummy_script.py - PVC Provisioner example script for noop
# Part of the Parallel Virtual Cluster (PVC) system
#
# Copyright (C) 2018-2019 Joshua M. Boniface <joshua@boniface.me>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###############################################################################
# This script provides an example of a PVC provisioner script. It will do
# nothing and return back to the provisioner without taking any action, and
# expecting no special arguments.
# This script can thus be used as an example or reference implementation of a
# PVC provisioner script and expanded upon as required.
# This script will run under root privileges as the provisioner does. Be careful
# with that.
import os
# Installation function - performs a debootstrap install of a Debian system
# Note that the only arguments are keyword arguments.
def install(**kwargs):
# The provisioner has already mounted the disks on kwargs['temporary_directory'].
# by this point, so we can get right to running the debootstrap after setting
# some nicer variable names; you don't necessarily have to do this.
vm_name = kwargs['vm_name']
temporary_directory = kwargs['temporary_directory']
disks = kwargs['disks']
networks = kwargs['networks']
# No operation - this script just returns
pass

View File

@ -0,0 +1,16 @@
Content-Type: multipart/mixed; boundary="==BOUNDARY=="
MIME-Version: 1.0
--==BOUNDARY==
Content-Type: text/cloud-config; charset="us-ascii"
users:
- blah
--==BOUNDARY==
Content-Type: text/x-shellscript; charset="us-ascii"
#!/bin/bash
echo "koz is koz" >> /etc/motd
--==BOUNDARY==--

View File

@ -0,0 +1,27 @@
Content-Type: text/cloud-config; charset="us-ascii"
MIME-Version: 1.0
#cloud-config
# Example user-data file to set up an alternate /var/home, a first user and some SSH keys, and some packages
bootcmd:
- "mv /home /var/"
- "locale-gen"
package_update: true
packages:
- openssh-server
- sudo
users:
- name: deploy
gecos: Deploy User
homedir: /var/home/deploy
sudo: "ALL=(ALL) NOPASSWD: ALL"
groups: adm, sudo
lock_passwd: true
ssh_authorized_keys:
- ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBRBGPzlbh5xYD6k8DMZdPNEwemZzKSSpWGOuU72ehfN joshua@bonifacelabs.net 2017-04
runcmd:
- "userdel debian"
- "groupmod -g 200 deploy"
- "usermod -u 200 deploy"
- "systemctl disable cloud-init.target"
- "reboot"

File diff suppressed because it is too large Load Diff

View File

@ -41,3 +41,40 @@ pvc:
cert_file: ""
# key_file: SSL certificate key file
key_file: ""
# provisioner: Configuration of the Provisioner API listener
provisioner:
# database: Backend database configuration
database:
# host: PostgreSQL hostname, usually 'localhost'
host: 10.100.0.252
# port: PostgreSQL port, invariably '5432'
port: 5432
# name: PostgreSQL database name, invariably 'pvcprov'
name: pvcprov
# user: PostgreSQL username, invariable 'pvcprov'
user: pvcprov
# pass: PostgreSQL user password, randomly generated
pass: pvcprov
# queue: Celery backend queue using the PVC Zookeeper cluster
queue:
# host: Redis hostname, usually 'localhost'
host: localhost
# port: Redis port, invariably '6279'
port: 6379
# path: Redis queue path, invariably '/0'
path: /0
# ceph_cluster: Information about the Ceph storage cluster
ceph_cluster:
# storage_hosts: The list of hosts that the Ceph monitors are valid on; if empty (the default),
# uses the list of coordinators
storage_hosts:
- ceph1
- ceph2
- ceph2
# storage_domain: The storage domain name, concatenated with the coordinators list names
# to form monitor access strings
storage_domain: "s.bonilan.net"
# ceph_monitor_port: The port that the Ceph monitor on each coordinator listens on
ceph_monitor_port: 6789
# ceph_storage_secret_uuid: Libvirt secret UUID for Ceph storage access
ceph_storage_secret_uuid: "c416032b-2ce9-457f-a5c2-18704a3485f4"

View File

@ -0,0 +1,16 @@
# Parallel Virtual Cluster Provisioner client worker unit file
[Unit]
Description = Parallel Virtual Cluster Provisioner worker
After = network-online.target
[Service]
Type = simple
WorkingDirectory = /usr/share/pvc
Environment = PYTHONUNBUFFERED=true
Environment = PVC_CONFIG_FILE=/etc/pvc/pvc-api.yaml
ExecStart = /usr/bin/celery worker -A pvc-api.celery --concurrency 1 --loglevel INFO
Restart = on-failure
[Install]
WantedBy = multi-user.target

15
client-api/schema.sql Normal file
View File

@ -0,0 +1,15 @@
create database pvcprov with owner = pvcprov connection limit = -1;
\c pvcprov
create table system_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, vcpu_count INT NOT NULL, vram_mb INT NOT NULL, serial BOOL NOT NULL, vnc BOOL NOT NULL, vnc_bind TEXT, node_limit TEXT, node_selector TEXT, start_with_node BOOL NOT NULL);
create table network_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, mac_template TEXT);
create table network (id SERIAL PRIMARY KEY, network_template INT REFERENCES network_template(id), vni INT NOT NULL);
create table storage_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE);
create table storage (id SERIAL PRIMARY KEY, storage_template INT REFERENCES storage_template(id), pool TEXT NOT NULL, disk_id TEXT NOT NULL, disk_size_gb INT NOT NULL, mountpoint TEXT, filesystem TEXT, filesystem_args TEXT);
create table userdata_template (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, userdata TEXT NOT NULL);
create table script (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, script TEXT NOT NULL);
create table profile (id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, system_template INT REFERENCES system_template(id), network_template INT REFERENCES network_template(id), storage_template INT REFERENCES storage_template(id), userdata_template INT REFERENCES userdata_template(id), script INT REFERENCES script(id), arguments text);
grant all privileges on database pvcprov to pvcprov;
grant all privileges on all tables in schema public to pvcprov;
grant all privileges on all sequences in schema public to pvcprov;
insert into userdata_template(name, userdata) values ('empty', '');

11
debian/control vendored
View File

@ -33,17 +33,8 @@ Description: Parallel Virtual Cluster CLI client (Python 3)
Package: pvc-client-api
Architecture: all
Depends: pvc-client-common, python3-yaml, python3-flask, python3-gevent
Depends: pvc-client-common, python3-yaml, python3-flask, python3-gevent, python3-celery, python-celery-common
Description: Parallel Virtual Cluster API client (Python 3)
A KVM/Zookeeper/Ceph-based VM and private cloud manager
.
This package installs the PVC API client daemon
Package: pvc-client-provisioner
Architecture: all
Depends: pvc-client-common, python3-yaml, python3-flask, python3-celery, python-celery-common
Description: Parallel Virtual Cluster Provisioner client (Python 3)
A KVM/Zookeeper/Ceph-based VM and private cloud manager
.
This package installs the PVC provisioner daemon

View File

@ -2,3 +2,6 @@ client-api/pvc-api.py usr/share/pvc
client-api/pvc-api.sample.yaml etc/pvc
client-api/api_lib usr/share/pvc
client-api/pvc-api.service lib/systemd/system
client-api/pvc-provisioner-worker.service lib/systemd/system
client-api/schema.sql usr/share/pvc
client-api/provisioner/examples usr/share/pvc/provisioner

View File

@ -6,9 +6,15 @@ ln -s /usr/share/pvc/api.py /usr/bin/pvc-api
# Reload systemd's view of the units
systemctl daemon-reload
# Restart the daemon (or warn on first install)
# Restart the main daemon (or warn on first install)
if systemctl is-active --quiet pvc-api.service; then
systemctl restart pvc-api.service
else
echo "NOTE: The PVC client API daemon (pvc-api.service) has not been started; create a config file at /etc/pvc/pvc-api.yaml then start it."
fi
# Restart the worker daemon (or warn on first install)
if systemctl is-active --quiet pvc-provisioner-worker.service; then
systemctl restart pvc-provisioner-worker.service
else
echo "NOTE: The PVC provisioner worker daemon (pvc-provisioner-worker.service) has not been started; create a config file at /etc/pvc/pvc-api.yaml then start it."
fi

View File

@ -1,6 +0,0 @@
client-provisioner/pvc-provisioner.py usr/share/pvc
client-provisioner/pvc-provisioner.sample.yaml etc/pvc
client-provisioner/provisioner_lib usr/share/pvc
client-provisioner/pvc-provisioner.service lib/systemd/system
client-provisioner/pvc-provisioner-worker.service lib/systemd/system
client-provisioner/examples usr/share/pvc/provisioner

View File

@ -1,15 +0,0 @@
#!/bin/sh
# Install client binary to /usr/bin via symlink
ln -s /usr/share/pvc/provisioner.py /usr/bin/pvc-provisioner
# Reload systemd's view of the units
systemctl daemon-reload
# Restart the daemon (or warn on first install)
if systemctl is-active --quiet pvc-provisioner.service; then
systemctl restart pvc-provisioner.service
systemctl restart pvc-provisioner-worker.service
else
echo "NOTE: The PVC provisioner API daemon (pvc-provisioner.service) has not been started; create a config file at /etc/pvc/pvc-provisioner.yaml then start it."
fi

View File

@ -1,4 +0,0 @@
#!/bin/sh
# Remove client binary symlink
rm -f /usr/bin/pvc-provisioner