Compare commits
17 Commits
c84ee0f4f1
...
b58fa06f67
Author | SHA1 | Date | |
---|---|---|---|
b58fa06f67 | |||
3b3d2e7f7e | |||
72a5de800c | |||
f450d1d313 | |||
2db58488a2 | |||
1bbf8f6bf6 | |||
191f8780c9 | |||
80c1f78864 | |||
c8c0987fe7 | |||
67560c6457 | |||
79c9eba28c | |||
36e924d339 | |||
aeb1443410 | |||
eccd2a98b2 | |||
6e2c1fb45e | |||
b14ba9172c | |||
e9235a627c |
@ -0,0 +1,38 @@
|
||||
"""PVC version 0.9.55
|
||||
|
||||
Revision ID: 88fa0d88a9f8
|
||||
Revises: 5c2109dbbeae
|
||||
Create Date: 2022-10-06 10:33:38.784497
|
||||
|
||||
"""
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '88fa0d88a9f8'
|
||||
down_revision = '5c2109dbbeae'
|
||||
branch_labels = None
|
||||
depends_on = None
|
||||
|
||||
|
||||
def upgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.alter_column('profile', 'script',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=False)
|
||||
op.alter_column('profile', 'system_template',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=False)
|
||||
# ### end Alembic commands ###
|
||||
|
||||
|
||||
def downgrade():
|
||||
# ### commands auto generated by Alembic - please adjust! ###
|
||||
op.alter_column('profile', 'system_template',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=True)
|
||||
op.alter_column('profile', 'script',
|
||||
existing_type=sa.INTEGER(),
|
||||
nullable=True)
|
||||
# ### end Alembic commands ###
|
@ -1,248 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# debootstrap_script.py - PVC Provisioner example script for Debootstrap
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2021 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will install
|
||||
# a Debian system, of the release specified in the keyword argument `deb_release`
|
||||
# and from the mirror specified in the keyword argument `deb_mirror`, and
|
||||
# including the packages specified in the keyword argument `deb_packages` (a list
|
||||
# of strings, which is then joined together as a CSV and passed to debootstrap),
|
||||
# to the configured disks, configure fstab, and install GRUB. Any later config
|
||||
# should be done within the VM, for instance via cloud-init.
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required.
|
||||
|
||||
# This script will run under root privileges as the provisioner does. Be careful
|
||||
# with that.
|
||||
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
# Create a chroot context manager
|
||||
# This can be used later in the script to chroot to the destination directory
|
||||
# for instance to run commands within the target.
|
||||
@contextmanager
|
||||
def chroot_target(destination):
|
||||
try:
|
||||
real_root = os.open("/", os.O_RDONLY)
|
||||
os.chroot(destination)
|
||||
fake_root = os.open("/", os.O_RDONLY)
|
||||
os.fchdir(fake_root)
|
||||
yield
|
||||
finally:
|
||||
os.fchdir(real_root)
|
||||
os.chroot(".")
|
||||
os.fchdir(real_root)
|
||||
os.close(fake_root)
|
||||
os.close(real_root)
|
||||
del fake_root
|
||||
del real_root
|
||||
|
||||
|
||||
# Installation function - performs a debootstrap install of a Debian system
|
||||
# Note that the only arguments are keyword arguments.
|
||||
def install(**kwargs):
|
||||
# The provisioner has already mounted the disks on kwargs['temporary_directory'].
|
||||
# by this point, so we can get right to running the debootstrap after setting
|
||||
# some nicer variable names; you don't necessarily have to do this.
|
||||
vm_name = kwargs["vm_name"]
|
||||
temporary_directory = kwargs["temporary_directory"]
|
||||
disks = kwargs["disks"]
|
||||
networks = kwargs["networks"]
|
||||
# Our own required arguments. We should, though are not required to, handle
|
||||
# failures of these gracefully, should administrators forget to specify them.
|
||||
try:
|
||||
deb_release = kwargs["deb_release"]
|
||||
except Exception:
|
||||
deb_release = "stable"
|
||||
try:
|
||||
deb_mirror = kwargs["deb_mirror"]
|
||||
except Exception:
|
||||
deb_mirror = "http://ftp.debian.org/debian"
|
||||
try:
|
||||
deb_packages = kwargs["deb_packages"].split(",")
|
||||
except Exception:
|
||||
deb_packages = [
|
||||
"linux-image-amd64",
|
||||
"grub-pc",
|
||||
"cloud-init",
|
||||
"python3-cffi-backend",
|
||||
"wget",
|
||||
]
|
||||
|
||||
# We need to know our root disk
|
||||
root_disk = None
|
||||
for disk in disks:
|
||||
if disk["mountpoint"] == "/":
|
||||
root_disk = disk
|
||||
if not root_disk:
|
||||
return
|
||||
|
||||
# Ensure we have debootstrap intalled on the provisioner system; this is a
|
||||
# good idea to include if you plan to use anything that is not part of the
|
||||
# base Debian host system, just in case the provisioner host is not properly
|
||||
# configured already.
|
||||
os.system("apt-get install -y debootstrap")
|
||||
|
||||
# Perform a deboostrap installation
|
||||
os.system(
|
||||
"debootstrap --include={pkgs} {suite} {target} {mirror}".format(
|
||||
suite=deb_release,
|
||||
target=temporary_directory,
|
||||
mirror=deb_mirror,
|
||||
pkgs=",".join(deb_packages),
|
||||
)
|
||||
)
|
||||
|
||||
# Bind mount the devfs
|
||||
os.system("mount --bind /dev {}/dev".format(temporary_directory))
|
||||
|
||||
# Create an fstab entry for each disk
|
||||
fstab_file = "{}/etc/fstab".format(temporary_directory)
|
||||
# The disk ID starts at zero and increments by one for each disk in the fixed-order
|
||||
# disk list. This lets us work around the insanity of Libvirt IDs not matching guest IDs,
|
||||
# while still letting us have some semblance of control here without enforcing things
|
||||
# like labels. It increments in the for loop below at the end of each iteration, and is
|
||||
# used to craft a /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-X device ID
|
||||
# which will always match the correct order from Libvirt (unlike sdX/vdX names).
|
||||
disk_id = 0
|
||||
for disk in disks:
|
||||
# We assume SSD-based/-like storage, and dislike atimes
|
||||
options = "defaults,discard,noatime,nodiratime"
|
||||
|
||||
# The root, var, and log volumes have specific values
|
||||
if disk["mountpoint"] == "/":
|
||||
root_disk["scsi_id"] = disk_id
|
||||
dump = 0
|
||||
cpass = 1
|
||||
elif disk["mountpoint"] == "/var" or disk["mountpoint"] == "/var/log":
|
||||
dump = 0
|
||||
cpass = 2
|
||||
else:
|
||||
dump = 0
|
||||
cpass = 0
|
||||
|
||||
# Append the fstab line
|
||||
with open(fstab_file, "a") as fh:
|
||||
data = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{disk} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
|
||||
disk=disk_id,
|
||||
mountpoint=disk["mountpoint"],
|
||||
filesystem=disk["filesystem"],
|
||||
options=options,
|
||||
dump=dump,
|
||||
cpass=cpass,
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Increment the disk_id
|
||||
disk_id += 1
|
||||
|
||||
# Write the hostname
|
||||
hostname_file = "{}/etc/hostname".format(temporary_directory)
|
||||
with open(hostname_file, "w") as fh:
|
||||
fh.write("{}".format(vm_name))
|
||||
|
||||
# Fix the cloud-init.target since it's broken
|
||||
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
|
||||
temporary_directory
|
||||
)
|
||||
with open(cloudinit_target_file, "w") as fh:
|
||||
data = """[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=Cloud-init target
|
||||
After=multi-user.target
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# NOTE: Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
|
||||
# will always be on PCI bus ID 2, hence the name "ens2".
|
||||
# Write a DHCP stanza for ens2
|
||||
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(temporary_directory)
|
||||
with open(ens2_network_file, "w") as fh:
|
||||
data = """auto ens2
|
||||
iface ens2 inet dhcp
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# Write the DHCP config for ens2
|
||||
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
|
||||
with open(dhclient_file, "w") as fh:
|
||||
data = (
|
||||
"""# DHCP client configuration
|
||||
# Written by the PVC provisioner
|
||||
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
|
||||
interface "ens2" {
|
||||
"""
|
||||
+ """ send fqdn.fqdn = "{hostname}";
|
||||
send host-name = "{hostname}";
|
||||
""".format(
|
||||
hostname=vm_name
|
||||
)
|
||||
+ """ request subnet-mask, broadcast-address, time-offset, routers,
|
||||
domain-name, domain-name-servers, domain-search, host-name,
|
||||
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
|
||||
netbios-name-servers, netbios-scope, interface-mtu,
|
||||
rfc3442-classless-static-routes, ntp-servers;
|
||||
}
|
||||
"""
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Write the GRUB configuration
|
||||
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
|
||||
with open(grubcfg_file, "w") as fh:
|
||||
data = """# Written by the PVC provisioner
|
||||
GRUB_DEFAULT=0
|
||||
GRUB_TIMEOUT=1
|
||||
GRUB_DISTRIBUTOR="PVC Virtual Machine"
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{root_disk} console=tty0 console=ttyS0,115200n8"
|
||||
GRUB_CMDLINE_LINUX=""
|
||||
GRUB_TERMINAL=console
|
||||
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
|
||||
GRUB_DISABLE_LINUX_UUID=false
|
||||
""".format(
|
||||
root_disk=root_disk["scsi_id"]
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Chroot, do some in-root tasks, then exit the chroot
|
||||
with chroot_target(temporary_directory):
|
||||
# Install and update GRUB
|
||||
os.system(
|
||||
"grub-install --force /dev/rbd/{}/{}_{}".format(
|
||||
root_disk["pool"], vm_name, root_disk["disk_id"]
|
||||
)
|
||||
)
|
||||
os.system("update-grub")
|
||||
# Set a really dumb root password [TEMPORARY]
|
||||
os.system("echo root:test123 | chpasswd")
|
||||
# Enable cloud-init target on (first) boot
|
||||
# NOTE: Your user-data should handle this and disable it once done, or things get messy.
|
||||
# That cloud-init won't run without this hack seems like a bug... but even the official
|
||||
# Debian cloud images are affected, so who knows.
|
||||
os.system("systemctl enable cloud-init.target")
|
||||
|
||||
# Unmount the bound devfs
|
||||
os.system("umount {}/dev".format(temporary_directory))
|
||||
|
||||
# Everything else is done via cloud-init user-data
|
@ -1,43 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# dummy_script.py - PVC Provisioner example script for noop
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2021 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will do
|
||||
# nothing and return back to the provisioner without taking any action, and
|
||||
# expecting no special arguments.
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required.
|
||||
|
||||
# This script will run under root privileges as the provisioner does. Be careful
|
||||
# with that.
|
||||
|
||||
# Installation function - performs no actions then returns
|
||||
# Note that the only arguments are keyword arguments.
|
||||
def install(**kwargs):
|
||||
# The provisioner has already mounted the disks on kwargs['temporary_directory'].
|
||||
# by this point, so we can get right to running the debootstrap after setting
|
||||
# some nicer variable names; you don't necessarily have to do this.
|
||||
vm_name = kwargs["vm_name"]
|
||||
temporary_directory = kwargs["temporary_directory"]
|
||||
disks = kwargs["disks"]
|
||||
networks = kwargs["networks"]
|
||||
# No operation - this script just returns
|
||||
pass
|
295
api-daemon/provisioner/examples/script/1-noop.py
Normal file
295
api-daemon/provisioner/examples/script/1-noop.py
Normal file
@ -0,0 +1,295 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# 1-noop.py - PVC Provisioner example script for noop install
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will create a
|
||||
# standard VM config but do no actual setup/prepare/install/cleanup (noop).
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required.
|
||||
# *** READ THIS SCRIPT THOROUGHLY BEFORE USING TO UNDERSTAND HOW IT WORKS. ***
|
||||
|
||||
# A script must implement the class "VMBuilderScript" which extends "VMBuilder",
|
||||
# providing the 5 functions indicated. Detailed explanation of the role of each
|
||||
# function is provided in context of the example; see the other examples for
|
||||
# more potential uses.
|
||||
|
||||
# Within the VMBuilderScript class, several common variables are exposed through
|
||||
# the parent VMBuilder class:
|
||||
# self.vm_name: The name of the VM from PVC's perspective
|
||||
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
|
||||
# self.vm_uuid: An automatically-generated UUID for the VM
|
||||
# self.vm_profile: The PVC provisioner profile name used for the VM
|
||||
# self.vm_data: A dictionary of VM data collected by the provisioner; as an example:
|
||||
# {
|
||||
# "ceph_monitor_list": [
|
||||
# "hv1.pvcstorage.tld",
|
||||
# "hv2.pvcstorage.tld",
|
||||
# "hv3.pvcstorage.tld"
|
||||
# ],
|
||||
# "ceph_monitor_port": "6789",
|
||||
# "ceph_monitor_secret": "96721723-8650-4a72-b8f6-a93cd1a20f0c",
|
||||
# "mac_template": null,
|
||||
# "networks": [
|
||||
# {
|
||||
# "eth_bridge": "vmbr1001",
|
||||
# "id": 72,
|
||||
# "network_template": 69,
|
||||
# "vni": "1001"
|
||||
# },
|
||||
# {
|
||||
# "eth_bridge": "vmbr101",
|
||||
# "id": 73,
|
||||
# "network_template": 69,
|
||||
# "vni": "101"
|
||||
# }
|
||||
# ],
|
||||
# "script": [contents of this file]
|
||||
# "script_arguments": {
|
||||
# "deb_mirror": "http://ftp.debian.org/debian",
|
||||
# "deb_release": "bullseye"
|
||||
# },
|
||||
# "system_architecture": "x86_64",
|
||||
# "system_details": {
|
||||
# "id": 78,
|
||||
# "migration_method": "live",
|
||||
# "name": "small",
|
||||
# "node_autostart": false,
|
||||
# "node_limit": null,
|
||||
# "node_selector": null,
|
||||
# "ova": null,
|
||||
# "serial": true,
|
||||
# "vcpu_count": 2,
|
||||
# "vnc": false,
|
||||
# "vnc_bind": null,
|
||||
# "vram_mb": 2048
|
||||
# },
|
||||
# "volumes": [
|
||||
# {
|
||||
# "disk_id": "sda",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=root",
|
||||
# "id": 9,
|
||||
# "mountpoint": "/",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdb",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=var",
|
||||
# "id": 10,
|
||||
# "mountpoint": "/var",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdc",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=log",
|
||||
# "id": 11,
|
||||
# "mountpoint": "/var/log",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
#
|
||||
# Any other information you may require must be obtained manually.
|
||||
|
||||
# WARNING:
|
||||
#
|
||||
# For safety reasons, the script runs in a modified chroot. It will have full access to
|
||||
# the entire / (root partition) of the hypervisor, but read-only. In addition it has
|
||||
# access to /dev, /sys, /run, and a fresh /tmp to write to; use /tmp/target (as
|
||||
# convention) as the destination for any mounting of volumes and installation.
|
||||
# Of course, in addition to this safety, it is VERY IMPORTANT to be aware that this
|
||||
# script runs AS ROOT ON THE HYPERVISOR SYSTEM. You should never allow arbitrary,
|
||||
# untrusted users the ability to add provisioning scripts even with this safeguard,
|
||||
# since they could still do destructive things to /dev and the like!
|
||||
|
||||
|
||||
# This import is always required here, as VMBuilder is used by the VMBuilderScript class
|
||||
# and ProvisioningError is the primary exception that should be raised within the class.
|
||||
from pvcapid.vmbuilder import VMBuilder, ProvisioningError
|
||||
|
||||
|
||||
# The VMBuilderScript class must be named as such, and extend VMBuilder.
|
||||
class VMBuilderScript(VMBuilder):
|
||||
def setup(self):
|
||||
"""
|
||||
setup(): Perform special setup steps or validation before proceeding
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
create(): Create the VM libvirt schema definition
|
||||
|
||||
This step *must* return a fully-formed Libvirt XML document as a string or the
|
||||
provisioning task will fail.
|
||||
|
||||
This example leverages the built-in libvirt_schema objects provided by PVC; these
|
||||
can be used as-is, or replaced with your own schema(s) on a per-script basis.
|
||||
|
||||
Even though we noop the rest of the script, we still create a fully-formed libvirt
|
||||
XML document here as a demonstration.
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
import pvcapid.libvirt_schema as libvirt_schema
|
||||
import datetime
|
||||
import random
|
||||
|
||||
# Create the empty schema document that we will append to and return at the end
|
||||
schema = ""
|
||||
|
||||
# Prepare a description based on the VM profile
|
||||
description = (
|
||||
f"PVC provisioner @ {datetime.datetime.now()}, profile '{self.vm_profile}'"
|
||||
)
|
||||
|
||||
# Format the header
|
||||
schema += libvirt_schema.libvirt_header.format(
|
||||
vm_name=self.vm_name,
|
||||
vm_uuid=self.vm_uuid,
|
||||
vm_description=description,
|
||||
vm_memory=self.vm_data["system_details"]["vram_mb"],
|
||||
vm_vcpus=self.vm_data["system_details"]["vcpu_count"],
|
||||
vm_architecture=self.vm_data["system_architecture"],
|
||||
)
|
||||
|
||||
# Add the disk devices
|
||||
monitor_list = self.vm_data["ceph_monitor_list"]
|
||||
monitor_port = self.vm_data["ceph_monitor_port"]
|
||||
monitor_secret = self.vm_data["ceph_monitor_secret"]
|
||||
|
||||
for volume in self.vm_data["volumes"]:
|
||||
schema += libvirt_schema.devices_disk_header.format(
|
||||
ceph_storage_secret=monitor_secret,
|
||||
disk_pool=volume["pool"],
|
||||
vm_name=self.vm_name,
|
||||
disk_id=volume["disk_id"],
|
||||
)
|
||||
for monitor in monitor_list:
|
||||
schema += libvirt_schema.devices_disk_coordinator.format(
|
||||
coordinator_name=monitor,
|
||||
coordinator_ceph_mon_port=monitor_port,
|
||||
)
|
||||
schema += libvirt_schema.devices_disk_footer
|
||||
|
||||
# Add the special vhostmd device for hypervisor information inside the VM
|
||||
schema += libvirt_schema.devices_vhostmd
|
||||
|
||||
# Add the network devices
|
||||
network_id = 0
|
||||
for network in self.vm_data["networks"]:
|
||||
vm_id_hex = "{:x}".format(int(self.vm_id % 16))
|
||||
net_id_hex = "{:x}".format(int(network_id % 16))
|
||||
|
||||
if self.vm_data.get("mac_template") is not None:
|
||||
mac_prefix = "52:54:01"
|
||||
macgen_template = self.vm_data["mac_template"]
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix, vmid=vm_id_hex, netid=net_id_hex
|
||||
)
|
||||
else:
|
||||
mac_prefix = "52:54:00"
|
||||
random_octet_A = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_B = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_C = "{:x}".format(random.randint(16, 238))
|
||||
|
||||
macgen_template = "{prefix}:{octetA}:{octetB}:{octetC}"
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
octetA=random_octet_A,
|
||||
octetB=random_octet_B,
|
||||
octetC=random_octet_C,
|
||||
)
|
||||
|
||||
schema += libvirt_schema.devices_net_interface.format(
|
||||
eth_macaddr=eth_macaddr,
|
||||
eth_bridge=network["eth_bridge"],
|
||||
)
|
||||
|
||||
network_id += 1
|
||||
|
||||
# Add default devices
|
||||
schema += libvirt_schema.devices_default
|
||||
|
||||
# Add serial device
|
||||
if self.vm_data["system_details"]["serial"]:
|
||||
schema += libvirt_schema.devices_serial.format(vm_name=self.vm_name)
|
||||
|
||||
# Add VNC device
|
||||
if self.vm_data["system_details"]["vnc"]:
|
||||
if self.vm_data["system_details"]["vnc_bind"]:
|
||||
vm_vnc_bind = self.vm_data["system_details"]["vnc_bind"]
|
||||
else:
|
||||
vm_vnc_bind = "127.0.0.1"
|
||||
|
||||
vm_vncport = 5900
|
||||
vm_vnc_autoport = "yes"
|
||||
|
||||
schema += libvirt_schema.devices_vnc.format(
|
||||
vm_vncport=vm_vncport,
|
||||
vm_vnc_autoport=vm_vnc_autoport,
|
||||
vm_vnc_bind=vm_vnc_bind,
|
||||
)
|
||||
|
||||
# Add SCSI controller
|
||||
schema += libvirt_schema.devices_scsi_controller
|
||||
|
||||
# Add footer
|
||||
schema += libvirt_schema.libvirt_footer
|
||||
|
||||
return schema
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
prepare(): Prepare any disks/volumes for the install() step
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def install(self):
|
||||
"""
|
||||
install(): Perform the installation
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
cleanup(): Perform any cleanup required due to prepare()/install()
|
||||
|
||||
This function is also called if there is ANY exception raised in the prepare()
|
||||
or install() steps. While this doesn't mean you shouldn't or can't raise exceptions
|
||||
here, be warned that doing so might cause loops. Do this only if you really need to.
|
||||
"""
|
||||
|
||||
pass
|
394
api-daemon/provisioner/examples/script/2-ova.py
Normal file
394
api-daemon/provisioner/examples/script/2-ova.py
Normal file
@ -0,0 +1,394 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# 1-ova.py - PVC Provisioner example script for OVA profile install
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will create a
|
||||
# standard VM config suitable for the OVA profile, and prepare the disks, but do
|
||||
# no additional install tasks (noop).
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required.
|
||||
# *** READ THIS SCRIPT THOROUGHLY BEFORE USING TO UNDERSTAND HOW IT WORKS. ***
|
||||
|
||||
# A script must implement the class "VMBuilderScript" which extends "VMBuilder",
|
||||
# providing the 5 functions indicated. Detailed explanation of the role of each
|
||||
# function is provided in context of the example; see the other examples for
|
||||
# more potential uses.
|
||||
|
||||
# Within the VMBuilderScript class, several common variables are exposed through
|
||||
# the parent VMBuilder class:
|
||||
# self.vm_name: The name of the VM from PVC's perspective
|
||||
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
|
||||
# self.vm_uuid: An automatically-generated UUID for the VM
|
||||
# self.vm_profile: The PVC provisioner profile name used for the VM
|
||||
# self.vm_data: A dictionary of VM data collected by the provisioner; as an example:
|
||||
# {
|
||||
# "ceph_monitor_list": [
|
||||
# "hv1.pvcstorage.tld",
|
||||
# "hv2.pvcstorage.tld",
|
||||
# "hv3.pvcstorage.tld"
|
||||
# ],
|
||||
# "ceph_monitor_port": "6789",
|
||||
# "ceph_monitor_secret": "96721723-8650-4a72-b8f6-a93cd1a20f0c",
|
||||
# "mac_template": null,
|
||||
# "networks": [
|
||||
# {
|
||||
# "eth_bridge": "vmbr1001",
|
||||
# "id": 72,
|
||||
# "network_template": 69,
|
||||
# "vni": "1001"
|
||||
# },
|
||||
# {
|
||||
# "eth_bridge": "vmbr101",
|
||||
# "id": 73,
|
||||
# "network_template": 69,
|
||||
# "vni": "101"
|
||||
# }
|
||||
# ],
|
||||
# "script": [contents of this file]
|
||||
# "script_arguments": {
|
||||
# "deb_mirror": "http://ftp.debian.org/debian",
|
||||
# "deb_release": "bullseye"
|
||||
# },
|
||||
# "system_architecture": "x86_64",
|
||||
# "system_details": {
|
||||
# "id": 78,
|
||||
# "migration_method": "live",
|
||||
# "name": "small",
|
||||
# "node_autostart": false,
|
||||
# "node_limit": null,
|
||||
# "node_selector": null,
|
||||
# "ova": null,
|
||||
# "serial": true,
|
||||
# "vcpu_count": 2,
|
||||
# "vnc": false,
|
||||
# "vnc_bind": null,
|
||||
# "vram_mb": 2048
|
||||
# },
|
||||
# "volumes": [
|
||||
# {
|
||||
# "disk_id": "sda",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=root",
|
||||
# "id": 9,
|
||||
# "mountpoint": "/",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdb",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=var",
|
||||
# "id": 10,
|
||||
# "mountpoint": "/var",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdc",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=log",
|
||||
# "id": 11,
|
||||
# "mountpoint": "/var/log",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
#
|
||||
# Any other information you may require must be obtained manually.
|
||||
|
||||
# WARNING:
|
||||
#
|
||||
# For safety reasons, the script runs in a modified chroot. It will have full access to
|
||||
# the entire / (root partition) of the hypervisor, but read-only. In addition it has
|
||||
# access to /dev, /sys, /run, and a fresh /tmp to write to; use /tmp/target (as
|
||||
# convention) as the destination for any mounting of volumes and installation.
|
||||
# Of course, in addition to this safety, it is VERY IMPORTANT to be aware that this
|
||||
# script runs AS ROOT ON THE HYPERVISOR SYSTEM. You should never allow arbitrary,
|
||||
# untrusted users the ability to add provisioning scripts even with this safeguard,
|
||||
# since they could still do destructive things to /dev and the like!
|
||||
|
||||
|
||||
# This import is always required here, as VMBuilder is used by the VMBuilderScript class
|
||||
# and ProvisioningError is the primary exception that should be raised within the class.
|
||||
from pvcapid.vmbuilder import VMBuilder, ProvisioningError
|
||||
|
||||
|
||||
# The VMBuilderScript class must be named as such, and extend VMBuilder.
|
||||
class VMBuilderScript(VMBuilder):
|
||||
def setup(self):
|
||||
"""
|
||||
setup(): Perform special setup steps or validation before proceeding
|
||||
|
||||
Validate that we're actually an OVA profile.
|
||||
"""
|
||||
|
||||
if self.vm_data.get("ova_details") is None:
|
||||
raise ProvisioningError(
|
||||
"Attempting to provision non-OVA profile with OVA script."
|
||||
)
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
create(): Create the VM libvirt schema definition
|
||||
|
||||
This step *must* return a fully-formed Libvirt XML document as a string or the
|
||||
provisioning task will fail.
|
||||
|
||||
This example leverages the built-in libvirt_schema objects provided by PVC; these
|
||||
can be used as-is, or replaced with your own schema(s) on a per-script basis.
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
import pvcapid.libvirt_schema as libvirt_schema
|
||||
import datetime
|
||||
import random
|
||||
|
||||
# Create the empty schema document that we will append to and return at the end
|
||||
schema = ""
|
||||
|
||||
# Prepare a description based on the VM profile
|
||||
description = f"PVC provisioner @ {datetime.datetime.now()}, profile '{self.vm_profile}', OVA '{self.vm_data['ova_details']['name']}'"
|
||||
|
||||
# Format the header
|
||||
schema += libvirt_schema.libvirt_header.format(
|
||||
vm_name=self.vm_name,
|
||||
vm_uuid=self.vm_uuid,
|
||||
vm_description=description,
|
||||
vm_memory=self.vm_data["system_details"]["vram_mb"],
|
||||
vm_vcpus=self.vm_data["system_details"]["vcpu_count"],
|
||||
vm_architecture=self.vm_data["system_architecture"],
|
||||
)
|
||||
|
||||
# Add the disk devices
|
||||
monitor_list = self.vm_data["ceph_monitor_list"]
|
||||
monitor_port = self.vm_data["ceph_monitor_port"]
|
||||
monitor_secret = self.vm_data["ceph_monitor_secret"]
|
||||
|
||||
for volume in self.vm_data["volumes"]:
|
||||
schema += libvirt_schema.devices_disk_header.format(
|
||||
ceph_storage_secret=monitor_secret,
|
||||
disk_pool=volume["pool"],
|
||||
vm_name=self.vm_name,
|
||||
disk_id=volume["disk_id"],
|
||||
)
|
||||
for monitor in monitor_list:
|
||||
schema += libvirt_schema.devices_disk_coordinator.format(
|
||||
coordinator_name=monitor,
|
||||
coordinator_ceph_mon_port=monitor_port,
|
||||
)
|
||||
schema += libvirt_schema.devices_disk_footer
|
||||
|
||||
# Add the special vhostmd device for hypervisor information inside the VM
|
||||
schema += libvirt_schema.devices_vhostmd
|
||||
|
||||
# Add the network devices
|
||||
network_id = 0
|
||||
for network in self.vm_data["networks"]:
|
||||
vm_id_hex = "{:x}".format(int(self.vm_id % 16))
|
||||
net_id_hex = "{:x}".format(int(network_id % 16))
|
||||
|
||||
if self.vm_data.get("mac_template") is not None:
|
||||
mac_prefix = "52:54:01"
|
||||
macgen_template = self.vm_data["mac_template"]
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix, vmid=vm_id_hex, netid=net_id_hex
|
||||
)
|
||||
else:
|
||||
mac_prefix = "52:54:00"
|
||||
random_octet_A = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_B = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_C = "{:x}".format(random.randint(16, 238))
|
||||
|
||||
macgen_template = "{prefix}:{octetA}:{octetB}:{octetC}"
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
octetA=random_octet_A,
|
||||
octetB=random_octet_B,
|
||||
octetC=random_octet_C,
|
||||
)
|
||||
|
||||
schema += libvirt_schema.devices_net_interface.format(
|
||||
eth_macaddr=eth_macaddr,
|
||||
eth_bridge=network["eth_bridge"],
|
||||
)
|
||||
|
||||
network_id += 1
|
||||
|
||||
# Add default devices
|
||||
schema += libvirt_schema.devices_default
|
||||
|
||||
# Add serial device
|
||||
if self.vm_data["system_details"]["serial"]:
|
||||
schema += libvirt_schema.devices_serial.format(vm_name=self.vm_name)
|
||||
|
||||
# Add VNC device
|
||||
if self.vm_data["system_details"]["vnc"]:
|
||||
if self.vm_data["system_details"]["vnc_bind"]:
|
||||
vm_vnc_bind = self.vm_data["system_details"]["vnc_bind"]
|
||||
else:
|
||||
vm_vnc_bind = "127.0.0.1"
|
||||
|
||||
vm_vncport = 5900
|
||||
vm_vnc_autoport = "yes"
|
||||
|
||||
schema += libvirt_schema.devices_vnc.format(
|
||||
vm_vncport=vm_vncport,
|
||||
vm_vnc_autoport=vm_vnc_autoport,
|
||||
vm_vnc_bind=vm_vnc_bind,
|
||||
)
|
||||
|
||||
# Add SCSI controller
|
||||
schema += libvirt_schema.devices_scsi_controller
|
||||
|
||||
# Add footer
|
||||
schema += libvirt_schema.libvirt_footer
|
||||
|
||||
return schema
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
prepare(): Prepare any disks/volumes for the install() step
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
from pvcapid.vmbuilder import open_zk
|
||||
from pvcapid.Daemon import config
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
# First loop: Create the destination disks
|
||||
for volume in self.vm_data["volumes"]:
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.add_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
f"{self.vm_name}_{volume['disk_id']}",
|
||||
f"{volume['disk_size_gb']}G",
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to create volume '{volume['disk_id']}'."
|
||||
)
|
||||
|
||||
# Second loop: Map the destination disks
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.map_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
dst_volume_name,
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(f"Failed to map volume '{dst_volume}'.")
|
||||
|
||||
# Third loop: Map the source disks
|
||||
for volume in self.vm_data["volumes"]:
|
||||
src_volume_name = volume["volume_name"]
|
||||
src_volume = f"{volume['pool']}/{src_volume_name}"
|
||||
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.map_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
src_volume_name,
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(f"Failed to map volume '{src_volume}'.")
|
||||
|
||||
# Fourth loop: Convert the source (usually VMDK) volume to the raw destination volume
|
||||
for volume in self.vm_data["volumes"]:
|
||||
src_volume_name = volume["volume_name"]
|
||||
src_volume = f"{volume['pool']}/{src_volume_name}"
|
||||
src_devpath = f"/dev/rbd/{src_volume}"
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
dst_devpath = f"/dev/rbd/{dst_volume}"
|
||||
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"qemu-img convert -C -f {volume['volume_format']} -O raw {src_devpath} {dst_devpath}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to convert {volume['volume_format']} volume '{src_volume}' to raw volume '{dst_volume}' with qemu-img: {stderr}"
|
||||
)
|
||||
|
||||
def install(self):
|
||||
"""
|
||||
install(): Perform the installation
|
||||
|
||||
Noop for OVA deploys as no further tasks are performed.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
cleanup(): Perform any cleanup required due to prepare()/install()
|
||||
|
||||
This function is also called if there is ANY exception raised in the prepare()
|
||||
or install() steps. While this doesn't mean you shouldn't or can't raise exceptions
|
||||
here, be warned that doing so might cause loops. Do this only if you really need to.
|
||||
"""
|
||||
|
||||
for volume in list(reversed(self.vm_data["volumes"])):
|
||||
src_volume_name = volume["volume_name"]
|
||||
src_volume = f"{volume['pool']}/{src_volume_name}"
|
||||
src_devpath = f"/dev/rbd/{src_volume}"
|
||||
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.unmap_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
src_volume_name,
|
||||
)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to unmap source volume '{src_volume_name}': {message}"
|
||||
)
|
||||
|
||||
for volume in list(reversed(self.vm_data["volumes"])):
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
dst_devpath = f"/dev/rbd/{dst_volume}"
|
||||
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.unmap_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
dst_volume_name,
|
||||
)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to unmap destination volume '{dst_volume_name}': {message}"
|
||||
)
|
685
api-daemon/provisioner/examples/script/3-debootstrap.py
Normal file
685
api-daemon/provisioner/examples/script/3-debootstrap.py
Normal file
@ -0,0 +1,685 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# 2-debootstrap.py - PVC Provisioner example script for debootstrap install
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will create a
|
||||
# standard VM config and install a Debian-like OS using debootstrap.
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required.
|
||||
# *** READ THIS SCRIPT THOROUGHLY BEFORE USING TO UNDERSTAND HOW IT WORKS. ***
|
||||
|
||||
# A script must implement the class "VMBuilderScript" which extends "VMBuilder",
|
||||
# providing the 5 functions indicated. Detailed explanation of the role of each
|
||||
# function is provided in context of the example; see the other examples for
|
||||
# more potential uses.
|
||||
|
||||
# Within the VMBuilderScript class, several common variables are exposed through
|
||||
# the parent VMBuilder class:
|
||||
# self.vm_name: The name of the VM from PVC's perspective
|
||||
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
|
||||
# self.vm_uuid: An automatically-generated UUID for the VM
|
||||
# self.vm_profile: The PVC provisioner profile name used for the VM
|
||||
# self.vm_data: A dictionary of VM data collected by the provisioner; as an example:
|
||||
# {
|
||||
# "ceph_monitor_list": [
|
||||
# "hv1.pvcstorage.tld",
|
||||
# "hv2.pvcstorage.tld",
|
||||
# "hv3.pvcstorage.tld"
|
||||
# ],
|
||||
# "ceph_monitor_port": "6789",
|
||||
# "ceph_monitor_secret": "96721723-8650-4a72-b8f6-a93cd1a20f0c",
|
||||
# "mac_template": null,
|
||||
# "networks": [
|
||||
# {
|
||||
# "eth_bridge": "vmbr1001",
|
||||
# "id": 72,
|
||||
# "network_template": 69,
|
||||
# "vni": "1001"
|
||||
# },
|
||||
# {
|
||||
# "eth_bridge": "vmbr101",
|
||||
# "id": 73,
|
||||
# "network_template": 69,
|
||||
# "vni": "101"
|
||||
# }
|
||||
# ],
|
||||
# "script": [contents of this file]
|
||||
# "script_arguments": {
|
||||
# "deb_mirror": "http://ftp.debian.org/debian",
|
||||
# "deb_release": "bullseye"
|
||||
# },
|
||||
# "system_architecture": "x86_64",
|
||||
# "system_details": {
|
||||
# "id": 78,
|
||||
# "migration_method": "live",
|
||||
# "name": "small",
|
||||
# "node_autostart": false,
|
||||
# "node_limit": null,
|
||||
# "node_selector": null,
|
||||
# "ova": null,
|
||||
# "serial": true,
|
||||
# "vcpu_count": 2,
|
||||
# "vnc": false,
|
||||
# "vnc_bind": null,
|
||||
# "vram_mb": 2048
|
||||
# },
|
||||
# "volumes": [
|
||||
# {
|
||||
# "disk_id": "sda",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=root",
|
||||
# "id": 9,
|
||||
# "mountpoint": "/",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdb",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=var",
|
||||
# "id": 10,
|
||||
# "mountpoint": "/var",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdc",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=log",
|
||||
# "id": 11,
|
||||
# "mountpoint": "/var/log",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
#
|
||||
# Any other information you may require must be obtained manually.
|
||||
|
||||
# WARNING:
|
||||
#
|
||||
# For safety reasons, the script runs in a modified chroot. It will have full access to
|
||||
# the entire / (root partition) of the hypervisor, but read-only. In addition it has
|
||||
# access to /dev, /sys, /run, and a fresh /tmp to write to; use /tmp/target (as
|
||||
# convention) as the destination for any mounting of volumes and installation.
|
||||
# Of course, in addition to this safety, it is VERY IMPORTANT to be aware that this
|
||||
# script runs AS ROOT ON THE HYPERVISOR SYSTEM. You should never allow arbitrary,
|
||||
# untrusted users the ability to add provisioning scripts even with this safeguard,
|
||||
# since they could still do destructive things to /dev and the like!
|
||||
|
||||
|
||||
# This import is always required here, as VMBuilder is used by the VMBuilderScript class
|
||||
# and ProvisioningError is the primary exception that should be raised within the class.
|
||||
from pvcapid.vmbuilder import VMBuilder, ProvisioningError
|
||||
|
||||
|
||||
# The VMBuilderScript class must be named as such, and extend VMBuilder.
|
||||
class VMBuilderScript(VMBuilder):
|
||||
def setup(self):
|
||||
"""
|
||||
setup(): Perform special setup steps or validation before proceeding
|
||||
|
||||
This example uses the PVC built-in command runner to verify that debootstrap is
|
||||
installed and throws and error if not.
|
||||
|
||||
Note that, due to the aforementioned chroot, you *cannot* install or otherwise
|
||||
modify the hypervisor system here: any tooling, etc. must be pre-installed.
|
||||
"""
|
||||
|
||||
# Run any imports first; as shown here, you can import anything from the PVC
|
||||
# namespace, as well as (of course) the main Python namespaces
|
||||
import daemon_lib.common as pvc_common
|
||||
|
||||
# Ensure we have debootstrap intalled on the provisioner system
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(f"which debootstrap")
|
||||
if retcode:
|
||||
# Raise a ProvisioningError for any exception; the provisioner will handle
|
||||
# this gracefully and properly, avoiding dangling mounts, RBD maps, etc.
|
||||
raise ProvisioningError("Failed to find critical dependency: debootstrap")
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
create(): Create the VM libvirt schema definition
|
||||
|
||||
This step *must* return a fully-formed Libvirt XML document as a string or the
|
||||
provisioning task will fail.
|
||||
|
||||
This example leverages the built-in libvirt_schema objects provided by PVC; these
|
||||
can be used as-is, or replaced with your own schema(s) on a per-script basis.
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
import pvcapid.libvirt_schema as libvirt_schema
|
||||
import datetime
|
||||
import random
|
||||
|
||||
# Create the empty schema document that we will append to and return at the end
|
||||
schema = ""
|
||||
|
||||
# Prepare a description based on the VM profile
|
||||
description = (
|
||||
f"PVC provisioner @ {datetime.datetime.now()}, profile '{self.vm_profile}'"
|
||||
)
|
||||
|
||||
# Format the header
|
||||
schema += libvirt_schema.libvirt_header.format(
|
||||
vm_name=self.vm_name,
|
||||
vm_uuid=self.vm_uuid,
|
||||
vm_description=description,
|
||||
vm_memory=self.vm_data["system_details"]["vram_mb"],
|
||||
vm_vcpus=self.vm_data["system_details"]["vcpu_count"],
|
||||
vm_architecture=self.vm_data["system_architecture"],
|
||||
)
|
||||
|
||||
# Add the disk devices
|
||||
monitor_list = self.vm_data["ceph_monitor_list"]
|
||||
monitor_port = self.vm_data["ceph_monitor_port"]
|
||||
monitor_secret = self.vm_data["ceph_monitor_secret"]
|
||||
|
||||
for volume in self.vm_data["volumes"]:
|
||||
schema += libvirt_schema.devices_disk_header.format(
|
||||
ceph_storage_secret=monitor_secret,
|
||||
disk_pool=volume["pool"],
|
||||
vm_name=self.vm_name,
|
||||
disk_id=volume["disk_id"],
|
||||
)
|
||||
for monitor in monitor_list:
|
||||
schema += libvirt_schema.devices_disk_coordinator.format(
|
||||
coordinator_name=monitor,
|
||||
coordinator_ceph_mon_port=monitor_port,
|
||||
)
|
||||
schema += libvirt_schema.devices_disk_footer
|
||||
|
||||
# Add the special vhostmd device for hypervisor information inside the VM
|
||||
schema += libvirt_schema.devices_vhostmd
|
||||
|
||||
# Add the network devices
|
||||
network_id = 0
|
||||
for network in self.vm_data["networks"]:
|
||||
vm_id_hex = "{:x}".format(int(self.vm_id % 16))
|
||||
net_id_hex = "{:x}".format(int(network_id % 16))
|
||||
|
||||
if self.vm_data.get("mac_template") is not None:
|
||||
mac_prefix = "52:54:01"
|
||||
macgen_template = self.vm_data["mac_template"]
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix, vmid=vm_id_hex, netid=net_id_hex
|
||||
)
|
||||
else:
|
||||
mac_prefix = "52:54:00"
|
||||
random_octet_A = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_B = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_C = "{:x}".format(random.randint(16, 238))
|
||||
|
||||
macgen_template = "{prefix}:{octetA}:{octetB}:{octetC}"
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
octetA=random_octet_A,
|
||||
octetB=random_octet_B,
|
||||
octetC=random_octet_C,
|
||||
)
|
||||
|
||||
schema += libvirt_schema.devices_net_interface.format(
|
||||
eth_macaddr=eth_macaddr,
|
||||
eth_bridge=network["eth_bridge"],
|
||||
)
|
||||
|
||||
network_id += 1
|
||||
|
||||
# Add default devices
|
||||
schema += libvirt_schema.devices_default
|
||||
|
||||
# Add serial device
|
||||
if self.vm_data["system_details"]["serial"]:
|
||||
schema += libvirt_schema.devices_serial.format(vm_name=self.vm_name)
|
||||
|
||||
# Add VNC device
|
||||
if self.vm_data["system_details"]["vnc"]:
|
||||
if self.vm_data["system_details"]["vnc_bind"]:
|
||||
vm_vnc_bind = self.vm_data["system_details"]["vnc_bind"]
|
||||
else:
|
||||
vm_vnc_bind = "127.0.0.1"
|
||||
|
||||
vm_vncport = 5900
|
||||
vm_vnc_autoport = "yes"
|
||||
|
||||
schema += libvirt_schema.devices_vnc.format(
|
||||
vm_vncport=vm_vncport,
|
||||
vm_vnc_autoport=vm_vnc_autoport,
|
||||
vm_vnc_bind=vm_vnc_bind,
|
||||
)
|
||||
|
||||
# Add SCSI controller
|
||||
schema += libvirt_schema.devices_scsi_controller
|
||||
|
||||
# Add footer
|
||||
schema += libvirt_schema.libvirt_footer
|
||||
|
||||
return schema
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
prepare(): Prepare any disks/volumes for the install() step
|
||||
|
||||
This function should use the various exposed PVC commands as indicated to create
|
||||
RBD block devices and map them to the host as required.
|
||||
|
||||
open_zk is exposed from pvcapid.vmbuilder to provide a context manager for opening
|
||||
connections to the PVC Zookeeper cluster; ensure you also import (and pass it)
|
||||
the config object from pvcapid.Daemon as well. This context manager then allows
|
||||
the use of various common daemon library functions, without going through the API.
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
import os
|
||||
from pvcapid.vmbuilder import open_zk
|
||||
from pvcapid.Daemon import config
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
# First loop: Create the disks, either by cloning (pvc_ceph.clone_volume), or by
|
||||
# new creation (pvc_ceph.add_volume), depending on the source_volume entry
|
||||
for volume in self.vm_data["volumes"]:
|
||||
if volume.get("source_volume") is not None:
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.clone_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
volume["source_volume"],
|
||||
f"{self.vm_name}_{volume['disk_id']}",
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to clone volume '{volume['source_volume']}' to '{volume['disk_id']}'."
|
||||
)
|
||||
else:
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.add_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
f"{self.vm_name}_{volume['disk_id']}",
|
||||
f"{volume['disk_size_gb']}G",
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to create volume '{volume['disk_id']}'."
|
||||
)
|
||||
|
||||
# Second loop: Map the disks to the local system
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.map_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
dst_volume_name,
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(f"Failed to map volume '{dst_volume}'.")
|
||||
|
||||
# Third loop: Create filesystems on the volumes
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
continue
|
||||
|
||||
filesystem_args_list = list()
|
||||
for arg in volume["filesystem_args"].split():
|
||||
arg_entry, *arg_data = arg.split("=")
|
||||
arg_data = "=".join(arg_data)
|
||||
filesystem_args_list.append(arg_entry)
|
||||
filesystem_args_list.append(arg_data)
|
||||
filesystem_args = " ".join(filesystem_args_list)
|
||||
|
||||
if volume["filesystem"] == "swap":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mkswap -f /dev/rbd/{dst_volume}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to create swap on '{dst_volume}': {stderr}"
|
||||
)
|
||||
else:
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mkfs.{volume['filesystem']} {filesystem_args} /dev/rbd/{dst_volume}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Faield to create {volume['filesystem']} file on '{dst_volume}': {stderr}"
|
||||
)
|
||||
|
||||
print(stdout)
|
||||
|
||||
# Create a temporary directory to use during install
|
||||
temp_dir = "/tmp/target"
|
||||
|
||||
if not os.path.isdir(temp_dir):
|
||||
os.mkdir(temp_dir)
|
||||
|
||||
# Fourth loop: Mount the volumes to a set of temporary directories
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
continue
|
||||
|
||||
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
|
||||
|
||||
mount_path = f"{temp_dir}/{volume['mountpoint']}"
|
||||
|
||||
if not os.path.isdir(mount_path):
|
||||
os.mkdir(mount_path)
|
||||
|
||||
# Mount filesystem
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount {mapped_dst_volume} {mount_path}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
|
||||
)
|
||||
|
||||
def install(self):
|
||||
"""
|
||||
install(): Perform the installation
|
||||
|
||||
This example, unlike noop, performs a full debootstrap install and base config
|
||||
of a Debian-like system, including installing GRUB for fully-virtualized boot
|
||||
(required by PVC) and cloud-init for later configuration with the PVC userdata
|
||||
functionality, leveraging a PVC managed network on the first NIC for DHCP.
|
||||
|
||||
Several arguments are also supported; these can be set either in the provisioner
|
||||
profile itself, or on the command line at runtime.
|
||||
|
||||
To show the options, this function does not use the previous PVC-exposed
|
||||
run_os_command function, but instead just uses os.system. The downside here is
|
||||
a lack of response and error handling, but the upside is simpler-to-read code.
|
||||
Use whichever you feel is appropriate for your situation.
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
import os
|
||||
from pvcapid.vmbuilder import chroot
|
||||
|
||||
# The directory we mounted things on earlier during prepare(); this could very well
|
||||
# be exposed as a module-level variable if you so choose
|
||||
temporary_directory = "/tmp/target"
|
||||
|
||||
# Use these convenient aliases for later (avoiding lots of "self.vm_data" everywhere)
|
||||
vm_name = self.vm_name
|
||||
volumes = self.vm_data["volumes"]
|
||||
networks = self.vm_data["networks"]
|
||||
|
||||
# Parse these arguments out of self.vm_data["script_arguments"]
|
||||
if self.vm_data["script_arguments"].get("deb_release") is not None:
|
||||
deb_release = self.vm_data["script_arguments"].get("deb_release")
|
||||
else:
|
||||
deb_release = "stable"
|
||||
|
||||
if self.vm_data["script_arguments"].get("deb_mirror") is not None:
|
||||
deb_mirror = self.vm_data["script_arguments"].get("deb_mirror")
|
||||
else:
|
||||
deb_mirror = "http://ftp.debian.org/debian"
|
||||
|
||||
if self.vm_data["script_arguments"].get("deb_packages") is not None:
|
||||
deb_packages = (
|
||||
self.vm_data["script_arguments"].get("deb_packages").split(",")
|
||||
)
|
||||
else:
|
||||
deb_packages = [
|
||||
"linux-image-amd64",
|
||||
"grub-pc",
|
||||
"cloud-init",
|
||||
"python3-cffi-backend",
|
||||
"wget",
|
||||
]
|
||||
|
||||
# We need to know our root disk for later GRUB-ing
|
||||
root_disk = None
|
||||
for volume in volumes:
|
||||
if volume["mountpoint"] == "/":
|
||||
root_volume = volume
|
||||
if not root_volume:
|
||||
raise ProvisioningError("Failed to find root volume in volumes list")
|
||||
|
||||
# Perform a deboostrap installation
|
||||
os.system(
|
||||
"debootstrap --include={pkgs} {suite} {target} {mirror}".format(
|
||||
suite=deb_release,
|
||||
target=temporary_directory,
|
||||
mirror=deb_mirror,
|
||||
pkgs=",".join(deb_packages),
|
||||
)
|
||||
)
|
||||
|
||||
# Bind mount the devfs so we can grub-install later
|
||||
os.system("mount --bind /dev {}/dev".format(temporary_directory))
|
||||
|
||||
# Create an fstab entry for each volume
|
||||
fstab_file = "{}/etc/fstab".format(temporary_directory)
|
||||
# The volume ID starts at zero and increments by one for each volume in the fixed-order
|
||||
# volume list. This lets us work around the insanity of Libvirt IDs not matching guest IDs,
|
||||
# while still letting us have some semblance of control here without enforcing things
|
||||
# like labels. It increments in the for loop below at the end of each iteration, and is
|
||||
# used to craft a /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-X device ID
|
||||
# which will always match the correct order from Libvirt (unlike sdX/vdX names).
|
||||
volume_id = 0
|
||||
for volume in volumes:
|
||||
# We assume SSD-based/-like storage (because Ceph behaves this way), and dislike atimes
|
||||
options = "defaults,discard,noatime,nodiratime"
|
||||
|
||||
# The root, var, and log volumes have specific values
|
||||
if volume["mountpoint"] == "/":
|
||||
# This will be used later by GRUB's cmdline
|
||||
root_volume["scsi_id"] = volume_id
|
||||
dump = 0
|
||||
cpass = 1
|
||||
elif volume["mountpoint"] == "/var" or volume["mountpoint"] == "/var/log":
|
||||
dump = 0
|
||||
cpass = 2
|
||||
else:
|
||||
dump = 0
|
||||
cpass = 0
|
||||
|
||||
# Append the fstab line
|
||||
with open(fstab_file, "a") as fh:
|
||||
# Using these /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK entries guarantees
|
||||
# proper ordering; /dev/sdX (or similar) names are NOT guaranteed to be
|
||||
# in any order nor are they guaranteed to match the volume's sdX/vdX name
|
||||
# when inside the VM due to Linux's quirks.
|
||||
data = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{volume} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
|
||||
volume=volume_id,
|
||||
mountpoint=volume["mountpoint"],
|
||||
filesystem=volume["filesystem"],
|
||||
options=options,
|
||||
dump=dump,
|
||||
cpass=cpass,
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Increment the volume_id
|
||||
volume_id += 1
|
||||
|
||||
# Write the hostname; you could also take an FQDN argument for this as an example
|
||||
hostname_file = "{}/etc/hostname".format(temporary_directory)
|
||||
with open(hostname_file, "w") as fh:
|
||||
fh.write("{}".format(vm_name))
|
||||
|
||||
# Fix the cloud-init.target since it's broken by default in Debian 11
|
||||
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
|
||||
temporary_directory
|
||||
)
|
||||
with open(cloudinit_target_file, "w") as fh:
|
||||
# We lose our indent on these raw blocks to preserve the apperance of the files
|
||||
# inside the VM itself
|
||||
data = """[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=Cloud-init target
|
||||
After=multi-user.target
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
|
||||
# will always be on PCI bus ID 2, hence the name "ens2".
|
||||
# Write a DHCP stanza for ens2
|
||||
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(
|
||||
temporary_directory
|
||||
)
|
||||
with open(ens2_network_file, "w") as fh:
|
||||
data = """auto ens2
|
||||
iface ens2 inet dhcp
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# Write the DHCP config for ens2
|
||||
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
|
||||
with open(dhclient_file, "w") as fh:
|
||||
# We can use fstrings too, since PVC will always have Python 3.6+, though
|
||||
# using format() might be preferable for clarity in some situations
|
||||
data = f"""# DHCP client configuration
|
||||
# Written by the PVC provisioner
|
||||
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
|
||||
interface "ens2" {
|
||||
send fqdn.fqdn = "{vm_name}";
|
||||
send host-name = "{vm_name}";
|
||||
request subnet-mask, broadcast-address, time-offset, routers,
|
||||
domain-name, domain-name-servers, domain-search, host-name,
|
||||
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
|
||||
netbios-name-servers, netbios-scope, interface-mtu,
|
||||
rfc3442-classless-static-routes, ntp-servers;
|
||||
}
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# Write the GRUB configuration
|
||||
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
|
||||
with open(grubcfg_file, "w") as fh:
|
||||
data = """# Written by the PVC provisioner
|
||||
GRUB_DEFAULT=0
|
||||
GRUB_TIMEOUT=1
|
||||
GRUB_DISTRIBUTOR="PVC Virtual Machine"
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{root_volume} console=tty0 console=ttyS0,115200n8"
|
||||
GRUB_CMDLINE_LINUX=""
|
||||
GRUB_TERMINAL=console
|
||||
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
|
||||
GRUB_DISABLE_LINUX_UUID=false
|
||||
""".format(
|
||||
root_volume=root_volume["scsi_id"]
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Do some tasks inside the chroot using the provided context manager
|
||||
with chroot(temporary_directory):
|
||||
# Install and update GRUB
|
||||
os.system(
|
||||
"grub-install --force /dev/rbd/{}/{}_{}".format(
|
||||
root_volume["pool"], vm_name, root_volume["disk_id"]
|
||||
)
|
||||
)
|
||||
os.system("update-grub")
|
||||
|
||||
# Set a really dumb root password so the VM can be debugged
|
||||
# EITHER CHANGE THIS YOURSELF, here or in Userdata, or run something after install
|
||||
# to change the root password: don't leave it like this on an Internet-facing machine!
|
||||
os.system("echo root:test123 | chpasswd")
|
||||
|
||||
# Enable cloud-init target on (first) boot
|
||||
# Your user-data should handle this and disable it once done, or things get messy.
|
||||
# That cloud-init won't run without this hack seems like a bug... but even the official
|
||||
# Debian cloud images are affected, so who knows.
|
||||
os.system("systemctl enable cloud-init.target")
|
||||
|
||||
# Unmount the bound devfs
|
||||
os.system("umount {}/dev".format(temporary_directory))
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
cleanup(): Perform any cleanup required due to prepare()/install()
|
||||
|
||||
It is important to now reverse *all* steps taken in those functions that might
|
||||
need cleanup before teardown of the upper chroot environment.
|
||||
|
||||
This function is also called if there is ANY exception raised in the prepare()
|
||||
or install() steps. While this doesn't mean you shouldn't or can't raise exceptions
|
||||
here, be warned that doing so might cause loops. Do this only if you really need to.
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
from pvcapid.vmbuilder import open_zk
|
||||
from pvcapid.Daemon import config
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
# Set the tempdir we used in the prepare() and install() steps
|
||||
temp_dir = "/tmp/target"
|
||||
|
||||
# Use this construct for reversing the list, as the normal reverse() messes with the list
|
||||
for volume in list(reversed(self.vm_data["volumes"])):
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
|
||||
mount_path = f"{temp_dir}/{volume['mountpoint']}"
|
||||
|
||||
if (
|
||||
volume.get("source_volume") is None
|
||||
and volume.get("filesystem") is not None
|
||||
):
|
||||
# Unmount filesystem
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"umount {mount_path}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to unmount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
|
||||
)
|
||||
|
||||
# Unmap volume
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.unmap_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
dst_volume_name,
|
||||
)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to unmap '{mapped_dst_volume}': {stderr}"
|
||||
)
|
@ -29,6 +29,7 @@ from pvcapid.Daemon import config, strtobool, API_VERSION
|
||||
|
||||
import pvcapid.helper as api_helper
|
||||
import pvcapid.provisioner as api_provisioner
|
||||
import pvcapid.vmbuilder as api_vmbuilder
|
||||
import pvcapid.benchmark as api_benchmark
|
||||
import pvcapid.ova as api_ova
|
||||
|
||||
@ -144,7 +145,7 @@ def Authenticator(function):
|
||||
def create_vm(
|
||||
self, vm_name, profile_name, define_vm=True, start_vm=True, script_run_args=[]
|
||||
):
|
||||
return api_provisioner.create_vm(
|
||||
return api_vmbuilder.create_vm(
|
||||
self,
|
||||
vm_name,
|
||||
profile_name,
|
||||
@ -7351,11 +7352,19 @@ class API_Provisioner_Profile_Root(Resource):
|
||||
"required": True,
|
||||
"helptext": "A profile type must be specified.",
|
||||
},
|
||||
{"name": "system_template"},
|
||||
{
|
||||
"name": "system_template",
|
||||
"required": True,
|
||||
"helptext": "A system_template must be specified.",
|
||||
},
|
||||
{"name": "network_template"},
|
||||
{"name": "storage_template"},
|
||||
{"name": "userdata"},
|
||||
{"name": "script"},
|
||||
{
|
||||
"name": "script",
|
||||
"required": True,
|
||||
"helptext": "A script must be specified.",
|
||||
},
|
||||
{"name": "ova"},
|
||||
{"name": "arg", "action": "append"},
|
||||
]
|
||||
@ -7384,12 +7393,12 @@ class API_Provisioner_Profile_Root(Resource):
|
||||
- in: query
|
||||
name: script
|
||||
type: string
|
||||
required: false
|
||||
required: true
|
||||
description: Script name
|
||||
- in: query
|
||||
name: system_template
|
||||
type: string
|
||||
required: false
|
||||
required: true
|
||||
description: System template name
|
||||
- in: query
|
||||
name: network_template
|
||||
@ -7472,11 +7481,19 @@ class API_Provisioner_Profile_Element(Resource):
|
||||
"required": True,
|
||||
"helptext": "A profile type must be specified.",
|
||||
},
|
||||
{"name": "system_template"},
|
||||
{
|
||||
"name": "system_template",
|
||||
"required": True,
|
||||
"helptext": "A system_template must be specified.",
|
||||
},
|
||||
{"name": "network_template"},
|
||||
{"name": "storage_template"},
|
||||
{"name": "userdata"},
|
||||
{"name": "script"},
|
||||
{
|
||||
"name": "script",
|
||||
"required": True,
|
||||
"helptext": "A script must be specified.",
|
||||
},
|
||||
{"name": "ova"},
|
||||
{"name": "arg", "action": "append"},
|
||||
]
|
||||
@ -7510,17 +7527,17 @@ class API_Provisioner_Profile_Element(Resource):
|
||||
- in: query
|
||||
name: network_template
|
||||
type: string
|
||||
required: true
|
||||
required: false
|
||||
description: Network template name
|
||||
- in: query
|
||||
name: storage_template
|
||||
type: string
|
||||
required: true
|
||||
required: false
|
||||
description: Storage template name
|
||||
- in: query
|
||||
name: userdata
|
||||
type: string
|
||||
required: true
|
||||
required: false
|
||||
description: Userdata template name
|
||||
- in: query
|
||||
name: ova
|
||||
|
@ -230,11 +230,13 @@ class DBProfile(db.Model):
|
||||
id = db.Column(db.Integer, primary_key=True)
|
||||
name = db.Column(db.Text, nullable=False, unique=True)
|
||||
profile_type = db.Column(db.Text, nullable=False)
|
||||
system_template = db.Column(db.Integer, db.ForeignKey("system_template.id"))
|
||||
system_template = db.Column(
|
||||
db.Integer, db.ForeignKey("system_template.id"), nullable=False
|
||||
)
|
||||
network_template = db.Column(db.Integer, db.ForeignKey("network_template.id"))
|
||||
storage_template = db.Column(db.Integer, db.ForeignKey("storage_template.id"))
|
||||
userdata = db.Column(db.Integer, db.ForeignKey("userdata.id"))
|
||||
script = db.Column(db.Integer, db.ForeignKey("script.id"))
|
||||
script = db.Column(db.Integer, db.ForeignKey("script.id"), nullable=False)
|
||||
ova = db.Column(db.Integer, db.ForeignKey("ova.id"))
|
||||
arguments = db.Column(db.Text)
|
||||
|
||||
|
@ -168,6 +168,15 @@ def delete_ova(zkhandler, name):
|
||||
|
||||
@ZKConnection(config)
|
||||
def upload_ova(zkhandler, pool, name, ova_size):
|
||||
# Check that we have a default_ova provisioning script
|
||||
_, retcode = provisioner.list_script("default_ova", is_fuzzy=False)
|
||||
if retcode != "200":
|
||||
output = {
|
||||
"message": "Did not find a 'default_ova' provisioning script. Please add one with that name, either the example from '/usr/share/pvc/provisioner/examples/script/2-ova.py' or a custom one, before uploading OVAs."
|
||||
}
|
||||
retcode = 400
|
||||
return output, retcode
|
||||
|
||||
ova_archive = None
|
||||
|
||||
# Cleanup function
|
||||
@ -402,7 +411,7 @@ def upload_ova(zkhandler, pool, name, ova_size):
|
||||
None,
|
||||
None,
|
||||
userdata=None,
|
||||
script=None,
|
||||
script="default_ova",
|
||||
ova=name,
|
||||
arguments=None,
|
||||
)
|
||||
|
@ -19,23 +19,12 @@
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import json
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
import re
|
||||
|
||||
from pvcapid.Daemon import config, strtobool
|
||||
|
||||
from daemon_lib.zkhandler import ZKHandler
|
||||
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.node as pvc_node
|
||||
import daemon_lib.vm as pvc_vm
|
||||
import daemon_lib.network as pvc_network
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
import pvcapid.libvirt_schema as libvirt_schema
|
||||
|
||||
from pvcapid.ova import list_ova
|
||||
|
||||
|
||||
@ -1229,866 +1218,3 @@ def delete_profile(name):
|
||||
retcode = 400
|
||||
close_database(conn, cur)
|
||||
return retmsg, retcode
|
||||
|
||||
|
||||
#
|
||||
# Main VM provisioning function - executed by the Celery worker
|
||||
#
|
||||
def create_vm(
|
||||
self, vm_name, vm_profile, define_vm=True, start_vm=True, script_run_args=[]
|
||||
):
|
||||
# Runtime imports
|
||||
import time
|
||||
import importlib
|
||||
import uuid
|
||||
import datetime
|
||||
import random
|
||||
|
||||
temp_dir = None
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
print(
|
||||
"Starting provisioning of VM '{}' with profile '{}'".format(vm_name, vm_profile)
|
||||
)
|
||||
|
||||
# Phase 0 - connect to databases
|
||||
try:
|
||||
db_conn, db_cur = open_database(config)
|
||||
except Exception:
|
||||
raise ClusterError("Failed to connect to Postgres")
|
||||
|
||||
try:
|
||||
zkhandler = ZKHandler(config)
|
||||
zkhandler.connect()
|
||||
except Exception:
|
||||
raise ClusterError("Failed to connect to Zookeeper")
|
||||
|
||||
# Phase 1 - setup
|
||||
# * Get the profile elements
|
||||
# * Get the details from these elements
|
||||
# * Assemble a VM configuration dictionary
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 1, "total": 10, "status": "Collecting configuration"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
vm_id = re.findall(r"/(\d+)$/", vm_name)
|
||||
if not vm_id:
|
||||
vm_id = 0
|
||||
else:
|
||||
vm_id = vm_id[0]
|
||||
|
||||
vm_data = dict()
|
||||
|
||||
# Get the profile information
|
||||
query = "SELECT * FROM profile WHERE name = %s"
|
||||
args = (vm_profile,)
|
||||
db_cur.execute(query, args)
|
||||
profile_data = db_cur.fetchone()
|
||||
if profile_data.get("arguments"):
|
||||
vm_data["script_arguments"] = profile_data.get("arguments").split("|")
|
||||
else:
|
||||
vm_data["script_arguments"] = []
|
||||
|
||||
if profile_data.get("profile_type") == "ova":
|
||||
is_ova_install = True
|
||||
is_script_install = False # By definition
|
||||
else:
|
||||
is_ova_install = False
|
||||
|
||||
# Get the system details
|
||||
query = "SELECT * FROM system_template WHERE id = %s"
|
||||
args = (profile_data["system_template"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["system_details"] = db_cur.fetchone()
|
||||
|
||||
# Get the MAC template
|
||||
query = "SELECT mac_template FROM network_template WHERE id = %s"
|
||||
args = (profile_data["network_template"],)
|
||||
db_cur.execute(query, args)
|
||||
db_row = db_cur.fetchone()
|
||||
if db_row:
|
||||
vm_data["mac_template"] = db_row.get("mac_template")
|
||||
else:
|
||||
vm_data["mac_template"] = None
|
||||
|
||||
# Get the networks
|
||||
query = "SELECT * FROM network WHERE network_template = %s"
|
||||
args = (profile_data["network_template"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["networks"] = db_cur.fetchall()
|
||||
|
||||
# Get the storage volumes
|
||||
# ORDER BY ensures disks are always in the sdX/vdX order, regardless of add order
|
||||
query = "SELECT * FROM storage WHERE storage_template = %s ORDER BY disk_id"
|
||||
args = (profile_data["storage_template"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["volumes"] = db_cur.fetchall()
|
||||
|
||||
# Get the script
|
||||
query = "SELECT script FROM script WHERE id = %s"
|
||||
args = (profile_data["script"],)
|
||||
db_cur.execute(query, args)
|
||||
db_row = db_cur.fetchone()
|
||||
if db_row:
|
||||
vm_data["script"] = db_row.get("script")
|
||||
else:
|
||||
vm_data["script"] = None
|
||||
|
||||
if vm_data["script"] and not is_ova_install:
|
||||
is_script_install = True
|
||||
else:
|
||||
is_script_install = False
|
||||
|
||||
# Get the OVA details
|
||||
if is_ova_install:
|
||||
query = "SELECT * FROM ova WHERE id = %s"
|
||||
args = (profile_data["ova"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["ova_details"] = db_cur.fetchone()
|
||||
|
||||
query = "SELECT * FROM ova_volume WHERE ova = %s"
|
||||
args = (profile_data["ova"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["volumes"] = db_cur.fetchall()
|
||||
|
||||
close_database(db_conn, db_cur)
|
||||
|
||||
print(
|
||||
"VM configuration data:\n{}".format(
|
||||
json.dumps(vm_data, sort_keys=True, indent=2)
|
||||
)
|
||||
)
|
||||
|
||||
# Phase 2 - verification
|
||||
# * Ensure that at least one node has enough free RAM to hold the VM (becomes main host)
|
||||
# * Ensure that all networks are valid
|
||||
# * Ensure that there is enough disk space in the Ceph cluster for the disks
|
||||
# This is the "safe fail" step when an invalid configuration will be caught
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 2,
|
||||
"total": 10,
|
||||
"status": "Verifying configuration against cluster",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# Verify that a VM with this name does not already exist
|
||||
if pvc_vm.searchClusterByName(zkhandler, vm_name):
|
||||
raise ClusterError(
|
||||
"A VM with the name '{}' already exists in the cluster.".format(vm_name)
|
||||
)
|
||||
|
||||
# Verify that at least one host has enough free RAM to run the VM
|
||||
_discard, nodes = pvc_node.get_list(zkhandler, None)
|
||||
target_node = None
|
||||
last_free = 0
|
||||
for node in nodes:
|
||||
# Skip the node if it is not ready to run VMs
|
||||
if node["daemon_state"] != "run" or node["domain_state"] != "ready":
|
||||
continue
|
||||
# Skip the node if its free memory is less than the new VM's size, plus a 512MB buffer
|
||||
if node["memory"]["free"] < (vm_data["system_details"]["vram_mb"] + 512):
|
||||
continue
|
||||
# If this node has the most free, use it
|
||||
if node["memory"]["free"] > last_free:
|
||||
last_free = node["memory"]["free"]
|
||||
target_node = node["name"]
|
||||
# Raise if no node was found
|
||||
if not target_node:
|
||||
raise ClusterError(
|
||||
"No ready cluster node contains at least {}+512 MB of free RAM.".format(
|
||||
vm_data["system_details"]["vram_mb"]
|
||||
)
|
||||
)
|
||||
|
||||
print(
|
||||
'Selecting target node "{}" with "{}" MB free RAM'.format(
|
||||
target_node, last_free
|
||||
)
|
||||
)
|
||||
|
||||
# Verify that all configured networks are present on the cluster
|
||||
cluster_networks, _discard = pvc_network.getClusterNetworkList(zkhandler)
|
||||
for network in vm_data["networks"]:
|
||||
vni = str(network["vni"])
|
||||
if vni not in cluster_networks and vni not in [
|
||||
"upstream",
|
||||
"cluster",
|
||||
"storage",
|
||||
]:
|
||||
raise ClusterError(
|
||||
'The network VNI "{}" is not present on the cluster.'.format(vni)
|
||||
)
|
||||
|
||||
print("All configured networks for VM are valid")
|
||||
|
||||
# Verify that there is enough disk space free to provision all VM disks
|
||||
pools = dict()
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume.get("source_volume") is not None:
|
||||
volume_data = pvc_ceph.getVolumeInformation(
|
||||
zkhandler, volume["pool"], volume["source_volume"]
|
||||
)
|
||||
if not volume_data:
|
||||
raise ClusterError(
|
||||
"The source volume {}/{} could not be found.".format(
|
||||
volume["pool"], volume["source_volume"]
|
||||
)
|
||||
)
|
||||
if not volume["pool"] in pools:
|
||||
pools[volume["pool"]] = int(
|
||||
pvc_ceph.format_bytes_fromhuman(volume_data["stats"]["size"])
|
||||
/ 1024
|
||||
/ 1024
|
||||
/ 1024
|
||||
)
|
||||
else:
|
||||
pools[volume["pool"]] += int(
|
||||
pvc_ceph.format_bytes_fromhuman(volume_data["stats"]["size"])
|
||||
/ 1024
|
||||
/ 1024
|
||||
/ 1024
|
||||
)
|
||||
else:
|
||||
if not volume["pool"] in pools:
|
||||
pools[volume["pool"]] = volume["disk_size_gb"]
|
||||
else:
|
||||
pools[volume["pool"]] += volume["disk_size_gb"]
|
||||
|
||||
for pool in pools:
|
||||
try:
|
||||
pool_information = pvc_ceph.getPoolInformation(zkhandler, pool)
|
||||
if not pool_information:
|
||||
raise
|
||||
except Exception:
|
||||
raise ClusterError('Pool "{}" is not present on the cluster.'.format(pool))
|
||||
pool_free_space_gb = int(
|
||||
pool_information["stats"]["free_bytes"] / 1024 / 1024 / 1024
|
||||
)
|
||||
pool_vm_usage_gb = int(pools[pool])
|
||||
|
||||
if pool_vm_usage_gb >= pool_free_space_gb:
|
||||
raise ClusterError(
|
||||
'Pool "{}" has only {} GB free and VM requires {} GB.'.format(
|
||||
pool, pool_free_space_gb, pool_vm_usage_gb
|
||||
)
|
||||
)
|
||||
|
||||
print("There is enough space on cluster to store VM volumes")
|
||||
|
||||
if not is_ova_install:
|
||||
# Verify that every specified filesystem is valid
|
||||
used_filesystems = list()
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume["source_volume"] is not None:
|
||||
continue
|
||||
if volume["filesystem"] and volume["filesystem"] not in used_filesystems:
|
||||
used_filesystems.append(volume["filesystem"])
|
||||
|
||||
for filesystem in used_filesystems:
|
||||
if filesystem == "swap":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("which mkswap")
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to find binary for mkswap: {}".format(stderr)
|
||||
)
|
||||
else:
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"which mkfs.{}".format(filesystem)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to find binary for mkfs.{}: {}".format(
|
||||
filesystem, stderr
|
||||
)
|
||||
)
|
||||
|
||||
print("All selected filesystems are valid")
|
||||
|
||||
# Phase 3 - provisioning script preparation
|
||||
# * Import the provisioning script as a library with importlib
|
||||
# * Ensure the required function(s) are present
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 3, "total": 10, "status": "Preparing provisioning script"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if is_script_install:
|
||||
# Write the script out to a temporary file
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("mktemp")
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to create a temporary file: {}".format(stderr)
|
||||
)
|
||||
script_file = stdout.strip()
|
||||
with open(script_file, "w") as fh:
|
||||
fh.write(vm_data["script"])
|
||||
fh.write("\n")
|
||||
|
||||
# Import the script file
|
||||
loader = importlib.machinery.SourceFileLoader("installer_script", script_file)
|
||||
spec = importlib.util.spec_from_loader(loader.name, loader)
|
||||
installer_script = importlib.util.module_from_spec(spec)
|
||||
loader.exec_module(installer_script)
|
||||
|
||||
# Verify that the install() function is valid
|
||||
if "install" not in dir(installer_script):
|
||||
raise ProvisioningError(
|
||||
"Specified script does not contain an install() function."
|
||||
)
|
||||
|
||||
print("Provisioning script imported successfully")
|
||||
|
||||
# Phase 4 - configuration creation
|
||||
# * Create the libvirt XML configuration
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 4,
|
||||
"total": 10,
|
||||
"status": "Preparing Libvirt XML configuration",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Creating Libvirt configuration")
|
||||
|
||||
# Get information about VM
|
||||
vm_uuid = uuid.uuid4()
|
||||
vm_description = "PVC provisioner @ {}, profile '{}'".format(
|
||||
datetime.datetime.now(), vm_profile
|
||||
)
|
||||
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("uname -m")
|
||||
system_architecture = stdout.strip()
|
||||
|
||||
# Begin assembling libvirt schema
|
||||
vm_schema = ""
|
||||
|
||||
vm_schema += libvirt_schema.libvirt_header.format(
|
||||
vm_name=vm_name,
|
||||
vm_uuid=vm_uuid,
|
||||
vm_description=vm_description,
|
||||
vm_memory=vm_data["system_details"]["vram_mb"],
|
||||
vm_vcpus=vm_data["system_details"]["vcpu_count"],
|
||||
vm_architecture=system_architecture,
|
||||
)
|
||||
|
||||
# Add disk devices
|
||||
monitor_list = list()
|
||||
coordinator_names = config["storage_hosts"]
|
||||
for coordinator in coordinator_names:
|
||||
monitor_list.append("{}.{}".format(coordinator, config["storage_domain"]))
|
||||
|
||||
ceph_storage_secret = config["ceph_storage_secret_uuid"]
|
||||
|
||||
for volume in vm_data["volumes"]:
|
||||
vm_schema += libvirt_schema.devices_disk_header.format(
|
||||
ceph_storage_secret=ceph_storage_secret,
|
||||
disk_pool=volume["pool"],
|
||||
vm_name=vm_name,
|
||||
disk_id=volume["disk_id"],
|
||||
)
|
||||
for monitor in monitor_list:
|
||||
vm_schema += libvirt_schema.devices_disk_coordinator.format(
|
||||
coordinator_name=monitor,
|
||||
coordinator_ceph_mon_port=config["ceph_monitor_port"],
|
||||
)
|
||||
vm_schema += libvirt_schema.devices_disk_footer
|
||||
|
||||
vm_schema += libvirt_schema.devices_vhostmd
|
||||
|
||||
# Add network devices
|
||||
network_id = 0
|
||||
for network in vm_data["networks"]:
|
||||
vni = network["vni"]
|
||||
if vni in ["upstream", "cluster", "storage"]:
|
||||
eth_bridge = "br{}".format(vni)
|
||||
else:
|
||||
eth_bridge = "vmbr{}".format(vni)
|
||||
|
||||
vm_id_hex = "{:x}".format(int(vm_id % 16))
|
||||
net_id_hex = "{:x}".format(int(network_id % 16))
|
||||
|
||||
if vm_data.get("mac_template") is not None:
|
||||
mac_prefix = "52:54:01"
|
||||
macgen_template = vm_data["mac_template"]
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
vmid=vm_id_hex,
|
||||
netid=net_id_hex,
|
||||
)
|
||||
else:
|
||||
mac_prefix = "52:54:00"
|
||||
random_octet_A = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_B = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_C = "{:x}".format(random.randint(16, 238))
|
||||
|
||||
macgen_template = "{prefix}:{octetA}:{octetB}:{octetC}"
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
octetA=random_octet_A,
|
||||
octetB=random_octet_B,
|
||||
octetC=random_octet_C,
|
||||
)
|
||||
|
||||
vm_schema += libvirt_schema.devices_net_interface.format(
|
||||
eth_macaddr=eth_macaddr, eth_bridge=eth_bridge
|
||||
)
|
||||
|
||||
network_id += 1
|
||||
|
||||
# Add default devices
|
||||
vm_schema += libvirt_schema.devices_default
|
||||
|
||||
# Add serial device
|
||||
if vm_data["system_details"]["serial"]:
|
||||
vm_schema += libvirt_schema.devices_serial.format(vm_name=vm_name)
|
||||
|
||||
# Add VNC device
|
||||
if vm_data["system_details"]["vnc"]:
|
||||
if vm_data["system_details"]["vnc_bind"]:
|
||||
vm_vnc_bind = vm_data["system_details"]["vnc_bind"]
|
||||
else:
|
||||
vm_vnc_bind = "127.0.0.1"
|
||||
|
||||
vm_vncport = 5900
|
||||
vm_vnc_autoport = "yes"
|
||||
|
||||
vm_schema += libvirt_schema.devices_vnc.format(
|
||||
vm_vncport=vm_vncport,
|
||||
vm_vnc_autoport=vm_vnc_autoport,
|
||||
vm_vnc_bind=vm_vnc_bind,
|
||||
)
|
||||
|
||||
# Add SCSI controller
|
||||
vm_schema += libvirt_schema.devices_scsi_controller
|
||||
|
||||
# Add footer
|
||||
vm_schema += libvirt_schema.libvirt_footer
|
||||
|
||||
print("Final VM schema:\n{}\n".format(vm_schema))
|
||||
|
||||
# All the following steps may require cleanup later on, so catch them here and do cleanup in a Finally block
|
||||
try:
|
||||
# Phase 5 - definition
|
||||
# * Create the VM in the PVC cluster
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 5, "total": 10, "status": "Defining VM on the cluster"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if define_vm:
|
||||
print("Defining VM on cluster")
|
||||
node_limit = vm_data["system_details"]["node_limit"]
|
||||
if node_limit:
|
||||
node_limit = node_limit.split(",")
|
||||
node_selector = vm_data["system_details"]["node_selector"]
|
||||
node_autostart = vm_data["system_details"]["node_autostart"]
|
||||
migration_method = vm_data["system_details"]["migration_method"]
|
||||
retcode, retmsg = pvc_vm.define_vm(
|
||||
zkhandler,
|
||||
vm_schema.strip(),
|
||||
target_node,
|
||||
node_limit,
|
||||
node_selector,
|
||||
node_autostart,
|
||||
migration_method,
|
||||
vm_profile,
|
||||
initial_state="provision",
|
||||
)
|
||||
print(retmsg)
|
||||
else:
|
||||
print("Skipping VM definition")
|
||||
|
||||
# Phase 6 - disk creation
|
||||
# * Create each Ceph storage volume for the disks
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 6, "total": 10, "status": "Creating storage volumes"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume.get("source_volume") is not None:
|
||||
success, message = pvc_ceph.clone_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
volume["source_volume"],
|
||||
"{}_{}".format(vm_name, volume["disk_id"]),
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
'Failed to clone volume "{}" to "{}".'.format(
|
||||
volume["source_volume"], volume["disk_id"]
|
||||
)
|
||||
)
|
||||
else:
|
||||
success, message = pvc_ceph.add_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
"{}_{}".format(vm_name, volume["disk_id"]),
|
||||
"{}G".format(volume["disk_size_gb"]),
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
'Failed to create volume "{}".'.format(volume["disk_id"])
|
||||
)
|
||||
|
||||
# Phase 7 - disk mapping
|
||||
# * Map each volume to the local host in order
|
||||
# * Format each volume with any specified filesystems
|
||||
# * If any mountpoints are specified, create a temporary mount directory
|
||||
# * Mount any volumes to their respective mountpoints
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 7,
|
||||
"total": 10,
|
||||
"status": "Mapping, formatting, and mounting storage volumes locally",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
for volume in vm_data["volumes"]:
|
||||
dst_volume_name = "{}_{}".format(vm_name, volume["disk_id"])
|
||||
dst_volume = "{}/{}".format(volume["pool"], dst_volume_name)
|
||||
|
||||
if is_ova_install:
|
||||
src_volume_name = volume["volume_name"]
|
||||
src_volume = "{}/{}".format(volume["pool"], src_volume_name)
|
||||
|
||||
print(
|
||||
"Converting {} source volume {} to raw format on {}".format(
|
||||
volume["volume_format"], src_volume, dst_volume
|
||||
)
|
||||
)
|
||||
|
||||
# Map the target RBD device
|
||||
retcode, retmsg = pvc_ceph.map_volume(
|
||||
zkhandler, volume["pool"], dst_volume_name
|
||||
)
|
||||
if not retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to map destination volume "{}": {}'.format(
|
||||
dst_volume_name, retmsg
|
||||
)
|
||||
)
|
||||
# Map the source RBD device
|
||||
retcode, retmsg = pvc_ceph.map_volume(
|
||||
zkhandler, volume["pool"], src_volume_name
|
||||
)
|
||||
if not retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to map source volume "{}": {}'.format(
|
||||
src_volume_name, retmsg
|
||||
)
|
||||
)
|
||||
# Convert from source to target
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"qemu-img convert -C -f {} -O raw {} {}".format(
|
||||
volume["volume_format"],
|
||||
"/dev/rbd/{}".format(src_volume),
|
||||
"/dev/rbd/{}".format(dst_volume),
|
||||
)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to convert {} volume "{}" to raw volume "{}": {}'.format(
|
||||
volume["volume_format"], src_volume, dst_volume, stderr
|
||||
)
|
||||
)
|
||||
|
||||
# Unmap the source RBD device (don't bother later)
|
||||
retcode, retmsg = pvc_ceph.unmap_volume(
|
||||
zkhandler, volume["pool"], src_volume_name
|
||||
)
|
||||
if not retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to unmap source volume "{}": {}'.format(
|
||||
src_volume_name, retmsg
|
||||
)
|
||||
)
|
||||
# Unmap the target RBD device (don't bother later)
|
||||
retcode, retmsg = pvc_ceph.unmap_volume(
|
||||
zkhandler, volume["pool"], dst_volume_name
|
||||
)
|
||||
if not retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to unmap destination volume "{}": {}'.format(
|
||||
dst_volume_name, retmsg
|
||||
)
|
||||
)
|
||||
else:
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
continue
|
||||
|
||||
filesystem_args_list = list()
|
||||
for arg in volume["filesystem_args"].split():
|
||||
arg_entry, *arg_data = arg.split("=")
|
||||
arg_data = "=".join(arg_data)
|
||||
filesystem_args_list.append(arg_entry)
|
||||
filesystem_args_list.append(arg_data)
|
||||
filesystem_args = " ".join(filesystem_args_list)
|
||||
|
||||
print(
|
||||
"Creating {} filesystem on {}".format(
|
||||
volume["filesystem"], dst_volume
|
||||
)
|
||||
)
|
||||
print("Args: {}".format(filesystem_args))
|
||||
|
||||
# Map the RBD device
|
||||
retcode, retmsg = pvc_ceph.map_volume(
|
||||
zkhandler, volume["pool"], dst_volume_name
|
||||
)
|
||||
if not retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to map volume "{}": {}'.format(dst_volume, retmsg)
|
||||
)
|
||||
|
||||
# Create the filesystem
|
||||
if volume["filesystem"] == "swap":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"mkswap -f /dev/rbd/{}".format(dst_volume)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to create swap on "{}": {}'.format(
|
||||
dst_volume, stderr
|
||||
)
|
||||
)
|
||||
else:
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"mkfs.{} {} /dev/rbd/{}".format(
|
||||
volume["filesystem"], filesystem_args, dst_volume
|
||||
)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to create {} filesystem on "{}": {}'.format(
|
||||
volume["filesystem"], dst_volume, stderr
|
||||
)
|
||||
)
|
||||
|
||||
print(stdout)
|
||||
|
||||
if is_script_install:
|
||||
# Create temporary directory
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("mktemp -d")
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to create a temporary directory: {}".format(stderr)
|
||||
)
|
||||
temp_dir = stdout.strip()
|
||||
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume["source_volume"] is not None:
|
||||
continue
|
||||
|
||||
if not volume["mountpoint"] or volume["mountpoint"] == "swap":
|
||||
continue
|
||||
|
||||
mapped_dst_volume = "/dev/rbd/{}/{}_{}".format(
|
||||
volume["pool"], vm_name, volume["disk_id"]
|
||||
)
|
||||
mount_path = "{}{}".format(temp_dir, volume["mountpoint"])
|
||||
|
||||
# Ensure the mount path exists (within the filesystems)
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"mkdir -p {}".format(mount_path)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to create mountpoint "{}": {}'.format(
|
||||
mount_path, stderr
|
||||
)
|
||||
)
|
||||
|
||||
# Mount filesystems to temporary directory
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"mount {} {}".format(mapped_dst_volume, mount_path)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to mount "{}" on "{}": {}'.format(
|
||||
mapped_dst_volume, mount_path, stderr
|
||||
)
|
||||
)
|
||||
|
||||
print(
|
||||
"Successfully mounted {} on {}".format(
|
||||
mapped_dst_volume, mount_path
|
||||
)
|
||||
)
|
||||
|
||||
# Phase 8 - provisioning script execution
|
||||
# * Execute the provisioning script main function ("install") passing any custom arguments
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 8, "total": 10, "status": "Executing provisioning script"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if is_script_install:
|
||||
print("Running installer script")
|
||||
|
||||
# Parse the script arguments
|
||||
script_arguments = dict()
|
||||
for argument in vm_data["script_arguments"]:
|
||||
argument_name, argument_data = argument.split("=")
|
||||
script_arguments[argument_name] = argument_data
|
||||
|
||||
# Parse the runtime arguments
|
||||
if script_run_args is not None:
|
||||
for argument in script_run_args:
|
||||
argument_name, argument_data = argument.split("=")
|
||||
script_arguments[argument_name] = argument_data
|
||||
|
||||
print("Script arguments: {}".format(script_arguments))
|
||||
|
||||
# Run the script
|
||||
try:
|
||||
installer_script.install(
|
||||
vm_name=vm_name,
|
||||
vm_id=vm_id,
|
||||
temporary_directory=temp_dir,
|
||||
disks=vm_data["volumes"],
|
||||
networks=vm_data["networks"],
|
||||
**script_arguments
|
||||
)
|
||||
except Exception as e:
|
||||
raise ProvisioningError("Failed to run install script: {}".format(e))
|
||||
|
||||
except Exception as e:
|
||||
start_vm = False
|
||||
raise e
|
||||
|
||||
# Always perform the cleanup steps
|
||||
finally:
|
||||
# Phase 9 - install cleanup
|
||||
# * Unmount any mounted volumes
|
||||
# * Remove any temporary directories
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 9,
|
||||
"total": 10,
|
||||
"status": "Cleaning up local mounts and directories",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if not is_ova_install:
|
||||
for volume in list(reversed(vm_data["volumes"])):
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if is_script_install:
|
||||
# Unmount the volume
|
||||
if (
|
||||
volume.get("mountpoint") is not None
|
||||
and volume.get("mountpoint") != "swap"
|
||||
):
|
||||
print(
|
||||
"Cleaning up mount {}{}".format(
|
||||
temp_dir, volume["mountpoint"]
|
||||
)
|
||||
)
|
||||
|
||||
mount_path = "{}{}".format(temp_dir, volume["mountpoint"])
|
||||
|
||||
# Make sure any bind mounts or submounts are unmounted first
|
||||
if volume["mountpoint"] == "/":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"umount {}/**/**".format(mount_path)
|
||||
)
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"umount {}/**".format(mount_path)
|
||||
)
|
||||
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"umount {}".format(mount_path)
|
||||
)
|
||||
if retcode:
|
||||
print(
|
||||
'Failed to unmount "{}": {}'.format(mount_path, stderr)
|
||||
)
|
||||
|
||||
# Unmap the RBD device
|
||||
if volume["filesystem"]:
|
||||
print(
|
||||
"Cleaning up RBD mapping /dev/rbd/{}/{}_{}".format(
|
||||
volume["pool"], vm_name, volume["disk_id"]
|
||||
)
|
||||
)
|
||||
|
||||
rbd_volume = "/dev/rbd/{}/{}_{}".format(
|
||||
volume["pool"], vm_name, volume["disk_id"]
|
||||
)
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"rbd unmap {}".format(rbd_volume)
|
||||
)
|
||||
if retcode:
|
||||
print(
|
||||
'Failed to unmap volume "{}": {}'.format(rbd_volume, stderr)
|
||||
)
|
||||
|
||||
print("Cleaning up temporary directories and files")
|
||||
|
||||
if is_script_install:
|
||||
# Remove temporary mount directory (don't fail if not removed)
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"rmdir {}".format(temp_dir)
|
||||
)
|
||||
if retcode:
|
||||
print(
|
||||
'Failed to delete temporary directory "{}": {}'.format(
|
||||
temp_dir, stderr
|
||||
)
|
||||
)
|
||||
|
||||
# Remote temporary script (don't fail if not removed)
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"rm -f {}".format(script_file)
|
||||
)
|
||||
if retcode:
|
||||
print(
|
||||
'Failed to delete temporary script file "{}": {}'.format(
|
||||
script_file, stderr
|
||||
)
|
||||
)
|
||||
|
||||
# Phase 10 - startup
|
||||
# * Start the VM in the PVC cluster
|
||||
if start_vm:
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 10, "total": 10, "status": "Starting VM"},
|
||||
)
|
||||
time.sleep(1)
|
||||
retcode, retmsg = pvc_vm.start_vm(zkhandler, vm_name)
|
||||
print(retmsg)
|
||||
|
||||
zkhandler.disconnect()
|
||||
del zkhandler
|
||||
|
||||
return {
|
||||
"status": 'VM "{}" with profile "{}" has been provisioned successfully'.format(
|
||||
vm_name, vm_profile
|
||||
),
|
||||
"current": 10,
|
||||
"total": 10,
|
||||
}
|
||||
|
771
api-daemon/pvcapid/vmbuilder.py
Executable file
771
api-daemon/pvcapid/vmbuilder.py
Executable file
@ -0,0 +1,771 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# vmbuilder.py - pvc api vm builder (provisioner) functions
|
||||
# part of the parallel virtual cluster (pvc) system
|
||||
#
|
||||
# copyright (c) 2018-2022 joshua m. boniface <joshua@boniface.me>
|
||||
#
|
||||
# this program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the gnu general public license as published by
|
||||
# the free software foundation, version 3.
|
||||
#
|
||||
# this program is distributed in the hope that it will be useful,
|
||||
# but without any warranty; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import json
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
import re
|
||||
import os
|
||||
|
||||
# import sys
|
||||
import time
|
||||
import importlib.util
|
||||
import uuid
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from pvcapid.Daemon import config
|
||||
|
||||
from daemon_lib.zkhandler import ZKHandler
|
||||
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.node as pvc_node
|
||||
import daemon_lib.vm as pvc_vm
|
||||
import daemon_lib.network as pvc_network
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
|
||||
#
|
||||
# Exceptions (used by Celery tasks)
|
||||
#
|
||||
class ValidationError(Exception):
|
||||
"""
|
||||
An exception that results from some value being un- or mis-defined.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ClusterError(Exception):
|
||||
"""
|
||||
An exception that results from the PVC cluster being out of alignment with the action.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ProvisioningError(Exception):
|
||||
"""
|
||||
An exception that results from a failure of a provisioning command.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
#
|
||||
# VMBuilder class - subclassed by install scripts
|
||||
#
|
||||
class VMBuilder(object):
|
||||
def __init__(
|
||||
self,
|
||||
vm_name,
|
||||
vm_id,
|
||||
vm_profile,
|
||||
vm_data,
|
||||
):
|
||||
self.vm_name = vm_name
|
||||
self.vm_id = vm_id
|
||||
self.vm_uuid = uuid.uuid4()
|
||||
self.vm_profile = vm_profile
|
||||
self.vm_data = vm_data
|
||||
|
||||
#
|
||||
# Primary class functions; implemented by the individual scripts
|
||||
#
|
||||
def setup(self):
|
||||
"""
|
||||
setup(): Perform special setup steps before proceeding
|
||||
OPTIONAL
|
||||
"""
|
||||
pass
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
create(): Create the VM libvirt schema definition which is defined afterwards
|
||||
"""
|
||||
pass
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
prepare(): Prepare any disks/volumes for the install step
|
||||
"""
|
||||
pass
|
||||
|
||||
def install(self):
|
||||
"""
|
||||
install(): Perform the installation
|
||||
"""
|
||||
pass
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
cleanup(): Perform any cleanup required after the prepare() step or on failure of the install() step
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
#
|
||||
# Helper functions (as context managers)
|
||||
#
|
||||
@contextmanager
|
||||
def chroot(destination):
|
||||
"""
|
||||
Change root directory to a given destination
|
||||
"""
|
||||
try:
|
||||
real_root = os.open("/", os.O_RDONLY)
|
||||
os.chroot(destination)
|
||||
fake_root = os.open("/", os.O_RDONLY)
|
||||
os.fchdir(fake_root)
|
||||
yield
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
os.fchdir(real_root)
|
||||
os.chroot(".")
|
||||
os.fchdir(real_root)
|
||||
os.close(fake_root)
|
||||
os.close(real_root)
|
||||
del fake_root
|
||||
del real_root
|
||||
|
||||
|
||||
@contextmanager
|
||||
def open_db(config):
|
||||
try:
|
||||
conn = psycopg2.connect(
|
||||
host=config["database_host"],
|
||||
port=config["database_port"],
|
||||
dbname=config["database_name"],
|
||||
user=config["database_user"],
|
||||
password=config["database_password"],
|
||||
)
|
||||
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||
except Exception:
|
||||
raise ClusterError("Failed to connect to Postgres")
|
||||
|
||||
try:
|
||||
yield cur
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
conn.commit()
|
||||
cur.close()
|
||||
conn.close()
|
||||
del conn
|
||||
|
||||
|
||||
@contextmanager
|
||||
def open_zk(config):
|
||||
try:
|
||||
zkhandler = ZKHandler(config)
|
||||
zkhandler.connect()
|
||||
except Exception:
|
||||
raise ClusterError("Failed to connect to Zookeeper")
|
||||
|
||||
try:
|
||||
yield zkhandler
|
||||
except Exception:
|
||||
raise
|
||||
finally:
|
||||
zkhandler.disconnect()
|
||||
del zkhandler
|
||||
|
||||
|
||||
#
|
||||
# Main VM provisioning function - executed by the Celery worker
|
||||
#
|
||||
def create_vm(
|
||||
self, vm_name, vm_profile, define_vm=True, start_vm=True, script_run_args=[]
|
||||
):
|
||||
print(f"Starting provisioning of VM '{vm_name}' with profile '{vm_profile}'")
|
||||
|
||||
# Phase 1 - setup
|
||||
# * Get the profile elements
|
||||
# * Get the details from these elements
|
||||
# * Assemble a VM configuration dictionary
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 1, "total": 10, "status": "Collecting configuration"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
vm_id = re.findall(r"/(\d+)$/", vm_name)
|
||||
if not vm_id:
|
||||
vm_id = 0
|
||||
else:
|
||||
vm_id = vm_id[0]
|
||||
|
||||
vm_data = dict()
|
||||
|
||||
with open_db(config) as db_cur:
|
||||
# Get the profile information
|
||||
query = "SELECT * FROM profile WHERE name = %s"
|
||||
args = (vm_profile,)
|
||||
db_cur.execute(query, args)
|
||||
profile_data = db_cur.fetchone()
|
||||
if profile_data.get("arguments"):
|
||||
vm_data["script_arguments"] = profile_data.get("arguments").split("|")
|
||||
else:
|
||||
vm_data["script_arguments"] = []
|
||||
|
||||
# Get the system details
|
||||
query = "SELECT * FROM system_template WHERE id = %s"
|
||||
args = (profile_data["system_template"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["system_details"] = db_cur.fetchone()
|
||||
|
||||
# Get the MAC template
|
||||
query = "SELECT mac_template FROM network_template WHERE id = %s"
|
||||
args = (profile_data["network_template"],)
|
||||
db_cur.execute(query, args)
|
||||
db_row = db_cur.fetchone()
|
||||
if db_row:
|
||||
vm_data["mac_template"] = db_row.get("mac_template")
|
||||
else:
|
||||
vm_data["mac_template"] = None
|
||||
|
||||
# Get the networks
|
||||
query = "SELECT * FROM network WHERE network_template = %s"
|
||||
args = (profile_data["network_template"],)
|
||||
db_cur.execute(query, args)
|
||||
_vm_networks = db_cur.fetchall()
|
||||
vm_networks = list()
|
||||
|
||||
# Set the eth_bridge for each network
|
||||
for network in _vm_networks:
|
||||
vni = network["vni"]
|
||||
if vni in ["upstream", "cluster", "storage"]:
|
||||
eth_bridge = "br{}".format(vni)
|
||||
else:
|
||||
eth_bridge = "vmbr{}".format(vni)
|
||||
network["eth_bridge"] = eth_bridge
|
||||
vm_networks.append(network)
|
||||
vm_data["networks"] = vm_networks
|
||||
|
||||
# Get the storage volumes
|
||||
# ORDER BY ensures disks are always in the sdX/vdX order, regardless of add order
|
||||
query = "SELECT * FROM storage WHERE storage_template = %s ORDER BY disk_id"
|
||||
args = (profile_data["storage_template"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["volumes"] = db_cur.fetchall()
|
||||
|
||||
# Get the script
|
||||
query = "SELECT script FROM script WHERE id = %s"
|
||||
args = (profile_data["script"],)
|
||||
db_cur.execute(query, args)
|
||||
db_row = db_cur.fetchone()
|
||||
if db_row:
|
||||
vm_data["script"] = db_row.get("script")
|
||||
else:
|
||||
vm_data["script"] = None
|
||||
|
||||
if profile_data.get("profile_type") == "ova":
|
||||
query = "SELECT * FROM ova WHERE id = %s"
|
||||
args = (profile_data["ova"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["ova_details"] = db_cur.fetchone()
|
||||
|
||||
query = "SELECT * FROM ova_volume WHERE ova = %s"
|
||||
args = (profile_data["ova"],)
|
||||
db_cur.execute(query, args)
|
||||
# Replace the existing volumes list with our OVA volume list
|
||||
vm_data["volumes"] = db_cur.fetchall()
|
||||
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("uname -m")
|
||||
vm_data["system_architecture"] = stdout.strip()
|
||||
|
||||
monitor_list = list()
|
||||
coordinator_names = config["storage_hosts"]
|
||||
for coordinator in coordinator_names:
|
||||
monitor_list.append("{}.{}".format(coordinator, config["storage_domain"]))
|
||||
vm_data["ceph_monitor_list"] = monitor_list
|
||||
vm_data["ceph_monitor_port"] = config["ceph_monitor_port"]
|
||||
vm_data["ceph_monitor_secret"] = config["ceph_storage_secret_uuid"]
|
||||
|
||||
# Parse the script arguments
|
||||
script_arguments = dict()
|
||||
for argument in vm_data["script_arguments"]:
|
||||
argument_name, argument_data = argument.split("=")
|
||||
script_arguments[argument_name] = argument_data
|
||||
|
||||
# Parse the runtime arguments
|
||||
if script_run_args is not None:
|
||||
for argument in script_run_args:
|
||||
argument_name, argument_data = argument.split("=")
|
||||
script_arguments[argument_name] = argument_data
|
||||
|
||||
print("Script arguments: {}".format(script_arguments))
|
||||
vm_data["script_arguments"] = script_arguments
|
||||
|
||||
print(
|
||||
"VM configuration data:\n{}".format(
|
||||
json.dumps(vm_data, sort_keys=True, indent=2)
|
||||
)
|
||||
)
|
||||
|
||||
# Phase 2 - verification
|
||||
# * Ensure that at least one node has enough free RAM to hold the VM (becomes main host)
|
||||
# * Ensure that all networks are valid
|
||||
# * Ensure that there is enough disk space in the Ceph cluster for the disks
|
||||
# This is the "safe fail" step when an invalid configuration will be caught
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 2,
|
||||
"total": 10,
|
||||
"status": "Verifying configuration against cluster",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
with open_zk(config) as zkhandler:
|
||||
# Verify that a VM with this name does not already exist
|
||||
if pvc_vm.searchClusterByName(zkhandler, vm_name):
|
||||
raise ClusterError(
|
||||
"A VM with the name '{}' already exists in the cluster.".format(vm_name)
|
||||
)
|
||||
|
||||
# Verify that at least one host has enough free RAM to run the VM
|
||||
_discard, nodes = pvc_node.get_list(zkhandler, None)
|
||||
target_node = None
|
||||
last_free = 0
|
||||
for node in nodes:
|
||||
# Skip the node if it is not ready to run VMs
|
||||
if node["daemon_state"] != "run" or node["domain_state"] != "ready":
|
||||
continue
|
||||
# Skip the node if its free memory is less than the new VM's size, plus a 512MB buffer
|
||||
if node["memory"]["free"] < (vm_data["system_details"]["vram_mb"] + 512):
|
||||
continue
|
||||
# If this node has the most free, use it
|
||||
if node["memory"]["free"] > last_free:
|
||||
last_free = node["memory"]["free"]
|
||||
target_node = node["name"]
|
||||
# Raise if no node was found
|
||||
if not target_node:
|
||||
raise ClusterError(
|
||||
"No ready cluster node contains at least {}+512 MB of free RAM.".format(
|
||||
vm_data["system_details"]["vram_mb"]
|
||||
)
|
||||
)
|
||||
|
||||
print(
|
||||
'Selecting target node "{}" with "{}" MB free RAM'.format(
|
||||
target_node, last_free
|
||||
)
|
||||
)
|
||||
|
||||
# Verify that all configured networks are present on the cluster
|
||||
cluster_networks, _discard = pvc_network.getClusterNetworkList(zkhandler)
|
||||
for network in vm_data["networks"]:
|
||||
vni = str(network["vni"])
|
||||
if vni not in cluster_networks and vni not in [
|
||||
"upstream",
|
||||
"cluster",
|
||||
"storage",
|
||||
]:
|
||||
raise ClusterError(
|
||||
'The network VNI "{}" is not present on the cluster.'.format(vni)
|
||||
)
|
||||
|
||||
print("All configured networks for VM are valid")
|
||||
|
||||
# Verify that there is enough disk space free to provision all VM disks
|
||||
pools = dict()
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume.get("source_volume") is not None:
|
||||
volume_data = pvc_ceph.getVolumeInformation(
|
||||
zkhandler, volume["pool"], volume["source_volume"]
|
||||
)
|
||||
if not volume_data:
|
||||
raise ClusterError(
|
||||
"The source volume {}/{} could not be found.".format(
|
||||
volume["pool"], volume["source_volume"]
|
||||
)
|
||||
)
|
||||
if not volume["pool"] in pools:
|
||||
pools[volume["pool"]] = int(
|
||||
pvc_ceph.format_bytes_fromhuman(volume_data["stats"]["size"])
|
||||
/ 1024
|
||||
/ 1024
|
||||
/ 1024
|
||||
)
|
||||
else:
|
||||
pools[volume["pool"]] += int(
|
||||
pvc_ceph.format_bytes_fromhuman(volume_data["stats"]["size"])
|
||||
/ 1024
|
||||
/ 1024
|
||||
/ 1024
|
||||
)
|
||||
else:
|
||||
if not volume["pool"] in pools:
|
||||
pools[volume["pool"]] = volume["disk_size_gb"]
|
||||
else:
|
||||
pools[volume["pool"]] += volume["disk_size_gb"]
|
||||
|
||||
for pool in pools:
|
||||
try:
|
||||
pool_information = pvc_ceph.getPoolInformation(zkhandler, pool)
|
||||
if not pool_information:
|
||||
raise
|
||||
except Exception:
|
||||
raise ClusterError(
|
||||
'Pool "{}" is not present on the cluster.'.format(pool)
|
||||
)
|
||||
pool_free_space_gb = int(
|
||||
pool_information["stats"]["free_bytes"] / 1024 / 1024 / 1024
|
||||
)
|
||||
pool_vm_usage_gb = int(pools[pool])
|
||||
|
||||
if pool_vm_usage_gb >= pool_free_space_gb:
|
||||
raise ClusterError(
|
||||
'Pool "{}" has only {} GB free and VM requires {} GB.'.format(
|
||||
pool, pool_free_space_gb, pool_vm_usage_gb
|
||||
)
|
||||
)
|
||||
|
||||
print("There is enough space on cluster to store VM volumes")
|
||||
|
||||
# Verify that every specified filesystem is valid
|
||||
used_filesystems = list()
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume["source_volume"] is not None:
|
||||
continue
|
||||
if volume["filesystem"] and volume["filesystem"] not in used_filesystems:
|
||||
used_filesystems.append(volume["filesystem"])
|
||||
|
||||
for filesystem in used_filesystems:
|
||||
if filesystem == "swap":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("which mkswap")
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to find binary for mkswap: {}".format(stderr)
|
||||
)
|
||||
else:
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"which mkfs.{}".format(filesystem)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to find binary for mkfs.{}: {}".format(filesystem, stderr)
|
||||
)
|
||||
|
||||
print("All selected filesystems are valid")
|
||||
|
||||
# Phase 3 - provisioning script preparation
|
||||
# * Import the provisioning script as a library with importlib
|
||||
# * Ensure the required function(s) are present
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 3, "total": 10, "status": "Preparing provisioning script"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# Write the script out to a temporary file
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("mktemp")
|
||||
if retcode:
|
||||
raise ProvisioningError("Failed to create a temporary file: {}".format(stderr))
|
||||
script_file = stdout.strip()
|
||||
with open(script_file, "w") as fh:
|
||||
fh.write(vm_data["script"])
|
||||
fh.write("\n")
|
||||
|
||||
# Import the script file
|
||||
loader = importlib.machinery.SourceFileLoader("installer_script", script_file)
|
||||
spec = importlib.util.spec_from_loader(loader.name, loader)
|
||||
installer_script = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(installer_script)
|
||||
|
||||
# Set up the VMBuilderScript object
|
||||
vm_builder = installer_script.VMBuilderScript(
|
||||
vm_name=vm_name,
|
||||
vm_id=vm_id,
|
||||
vm_profile=vm_profile,
|
||||
vm_data=vm_data,
|
||||
)
|
||||
|
||||
print("Provisioning script imported successfully")
|
||||
|
||||
# Create temporary directory for external chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("mktemp -d")
|
||||
if retcode:
|
||||
raise ProvisioningError(f"Failed to create a temporary directory: {stderr}")
|
||||
temp_dir = stdout.strip()
|
||||
|
||||
# Bind mount / to the chroot location /
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount --bind --options ro / {temp_dir}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount rootfs onto {temp_dir} for chroot: {stderr}"
|
||||
)
|
||||
|
||||
# Mount tmpfs to the chroot location /tmp
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount --type tmpfs tmpfs {temp_dir}/tmp"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount tmpfs onto {temp_dir}/tmp for chroot: {stderr}"
|
||||
)
|
||||
|
||||
# Bind mount /dev to the chroot location /dev
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount --bind --options ro /dev {temp_dir}/dev"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount devfs onto {temp_dir}/dev for chroot: {stderr}"
|
||||
)
|
||||
|
||||
# Bind mount /run to the chroot location /run
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount --bind --options rw /run {temp_dir}/run"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount runfs onto {temp_dir}/run for chroot: {stderr}"
|
||||
)
|
||||
|
||||
# Bind mount /sys to the chroot location /sys
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount --bind --options rw /sys {temp_dir}/sys"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount sysfs onto {temp_dir}/sys for chroot: {stderr}"
|
||||
)
|
||||
|
||||
print("Chroot environment prepared successfully")
|
||||
|
||||
def general_cleanup():
|
||||
print("Running upper cleanup steps")
|
||||
|
||||
try:
|
||||
# Unmount bind-mounted devfs on the chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"umount {temp_dir}/dev"
|
||||
)
|
||||
# Unmount bind-mounted runfs on the chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"umount {temp_dir}/run"
|
||||
)
|
||||
# Unmount bind-mounted sysfs on the chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"umount {temp_dir}/sys"
|
||||
)
|
||||
# Unmount bind-mounted tmpfs on the chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"umount {temp_dir}/tmp"
|
||||
)
|
||||
# Unmount bind-mounted rootfs on the chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(f"umount {temp_dir}")
|
||||
except Exception as e:
|
||||
# We don't care about fails during cleanup, log and continue
|
||||
print(f"Suberror during general cleanup unmounts: {e}")
|
||||
|
||||
try:
|
||||
# Remove the temp_dir
|
||||
os.rmdir(temp_dir)
|
||||
except Exception as e:
|
||||
# We don't care about fails during cleanup, log and continue
|
||||
print(f"Suberror during general cleanup directory removal: {e}")
|
||||
|
||||
try:
|
||||
# Remote temporary script (don't fail if not removed)
|
||||
os.remove(script_file)
|
||||
except Exception as e:
|
||||
# We don't care about fails during cleanup, log and continue
|
||||
print(f"Suberror during general cleanup script removal: {e}")
|
||||
|
||||
# Phase 4 - script: setup()
|
||||
# * Run pre-setup steps
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 4,
|
||||
"total": 10,
|
||||
"status": "Running script setup() step",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Running script setup() step")
|
||||
|
||||
try:
|
||||
with chroot(temp_dir):
|
||||
vm_builder.setup()
|
||||
except Exception as e:
|
||||
general_cleanup()
|
||||
raise ProvisioningError(f"Error in script setup() step: {e}")
|
||||
|
||||
# Phase 5 - script: create()
|
||||
# * Prepare the libvirt XML defintion for the VM
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 5,
|
||||
"total": 10,
|
||||
"status": "Running script create() step",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if define_vm:
|
||||
print("Running script create() step")
|
||||
|
||||
try:
|
||||
with chroot(temp_dir):
|
||||
vm_schema = vm_builder.create()
|
||||
except Exception as e:
|
||||
general_cleanup()
|
||||
raise ProvisioningError(f"Error in script create() step: {e}")
|
||||
|
||||
print("Generated VM schema:\n{}\n".format(vm_schema))
|
||||
|
||||
print("Defining VM on cluster")
|
||||
node_limit = vm_data["system_details"]["node_limit"]
|
||||
if node_limit:
|
||||
node_limit = node_limit.split(",")
|
||||
node_selector = vm_data["system_details"]["node_selector"]
|
||||
node_autostart = vm_data["system_details"]["node_autostart"]
|
||||
migration_method = vm_data["system_details"]["migration_method"]
|
||||
with open_zk(config) as zkhandler:
|
||||
retcode, retmsg = pvc_vm.define_vm(
|
||||
zkhandler,
|
||||
vm_schema.strip(),
|
||||
target_node,
|
||||
node_limit,
|
||||
node_selector,
|
||||
node_autostart,
|
||||
migration_method,
|
||||
vm_profile,
|
||||
initial_state="provision",
|
||||
)
|
||||
print(retmsg)
|
||||
else:
|
||||
print("Skipping VM definition due to define_vm=False")
|
||||
|
||||
# Phase 6 - script: prepare()
|
||||
# * Run preparation steps (e.g. disk creation and mapping, filesystem creation, etc.)
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 6,
|
||||
"total": 10,
|
||||
"status": "Running script prepare() step",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Running script prepare() step")
|
||||
|
||||
try:
|
||||
with chroot(temp_dir):
|
||||
vm_builder.prepare()
|
||||
except Exception as e:
|
||||
with chroot(temp_dir):
|
||||
vm_builder.cleanup()
|
||||
general_cleanup()
|
||||
raise ProvisioningError(f"Error in script prepare() step: {e}")
|
||||
|
||||
# Phase 7 - script: install()
|
||||
# * Run installation with arguments
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 7,
|
||||
"total": 10,
|
||||
"status": "Running script install() step",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Running script install() step")
|
||||
|
||||
try:
|
||||
with chroot(temp_dir):
|
||||
vm_builder.install()
|
||||
except Exception as e:
|
||||
with chroot(temp_dir):
|
||||
vm_builder.cleanup()
|
||||
general_cleanup()
|
||||
raise ProvisioningError(f"Error in script install() step: {e}")
|
||||
|
||||
# Phase 8 - script: cleanup()
|
||||
# * Run cleanup steps
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 8,
|
||||
"total": 10,
|
||||
"status": "Running script cleanup() step",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Running script cleanup() step")
|
||||
|
||||
try:
|
||||
with chroot(temp_dir):
|
||||
vm_builder.cleanup()
|
||||
except Exception as e:
|
||||
general_cleanup()
|
||||
raise ProvisioningError(f"Error in script cleanup() step: {e}")
|
||||
|
||||
# Phase 9 - general cleanup
|
||||
# * Clean up the chroot from earlier
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 9,
|
||||
"total": 10,
|
||||
"status": "Running general cleanup steps",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
general_cleanup()
|
||||
|
||||
# Phase 10 - startup
|
||||
# * Start the VM in the PVC cluster
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 10,
|
||||
"total": 10,
|
||||
"status": "Starting VM",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if start_vm:
|
||||
print("Starting VM")
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_vm.start_vm(zkhandler, vm_name)
|
||||
print(message)
|
||||
|
||||
end_message = f'VM "{vm_name}" with profile "{vm_profile}" has been provisioned and started successfully'
|
||||
else:
|
||||
end_message = f'VM "{vm_name}" with profile "{vm_profile}" has been provisioned successfully'
|
||||
|
||||
return {"status": end_message, "current": 10, "total": 10}
|
@ -5265,7 +5265,8 @@ def provisioner_profile_list(limit):
|
||||
"-s",
|
||||
"--system-template",
|
||||
"system_template",
|
||||
help="The system template for the profile.",
|
||||
required=True,
|
||||
help="The system template for the profile (required).",
|
||||
)
|
||||
@click.option(
|
||||
"-n",
|
||||
@ -5280,10 +5281,24 @@ def provisioner_profile_list(limit):
|
||||
help="The storage template for the profile.",
|
||||
)
|
||||
@click.option(
|
||||
"-u", "--userdata", "userdata", help="The userdata document for the profile."
|
||||
"-u",
|
||||
"--userdata",
|
||||
"userdata",
|
||||
help="The userdata document for the profile.",
|
||||
)
|
||||
@click.option(
|
||||
"-x",
|
||||
"--script",
|
||||
"script",
|
||||
required=True,
|
||||
help="The script for the profile (required).",
|
||||
)
|
||||
@click.option(
|
||||
"-o",
|
||||
"--ova",
|
||||
"ova",
|
||||
help="The OVA image for the profile; set automatically with 'provisioner ova upload'.",
|
||||
)
|
||||
@click.option("-x", "--script", "script", help="The script for the profile.")
|
||||
@click.option("-o", "--ova", "ova", help="The OVA image for the profile.")
|
||||
@click.option(
|
||||
"-a",
|
||||
"--script-arg",
|
||||
|
@ -14,6 +14,7 @@ Details of the Provisioner API interface can be found in [the API manual](/manua
|
||||
* [Deploying VMs from provisioner scripts](#deploying-vms-from-provisioner-scripts)
|
||||
* [Deploying VMs from OVA images](#deploying-vms-from-ova-images)
|
||||
+ [Uploading an OVA](#uploading-an-ova)
|
||||
+ [The OVA Provisioning Script](#the-ova-provisioning-script)
|
||||
+ [OVA limitations](#ova-limitations)
|
||||
|
||||
## Overview
|
||||
@ -172,64 +173,147 @@ basic-ssh 11 Content-Type: text/cloud-config; charset="us-ascii"
|
||||
|
||||
## Provisioning Scripts
|
||||
|
||||
The PVC provisioner provides a scripting framework in order to automate VM installation. This is generally the most useful with UNIX-like systems which can be installed over the network via shell scripts. For instance, the script might install a Debian VM using `debootstrap` or a Red Hat VM using `rpmstrap`. The PVC Ansible system will automatically install `debootstrap` on coordinator nodes, to allow out-of-the-box deployment of Debian-based VMs with `debootstrap` and the example script shipped with PVC (see below); any other deployment tool must be installed separately onto all PVC coordinator nodes, or installed by the script itself (e.g. using `os.system('apt-get install ...')`, `requests` to download a script, etc.).
|
||||
The PVC provisioner provides a scripting framework in order to automate VM installation. This is generally the most useful with UNIX-like systems which can be installed over the network via shell scripts. For instance, the script might install a Debian VM using `debootstrap` or a Red Hat VM using `rpmstrap`. The PVC Ansible system will automatically install `debootstrap` on coordinator nodes, to allow out-of-the-box deployment of Debian-based VMs with `debootstrap` and the example script shipped with PVC (see below); any other deployment tool must be installed separately onto all PVC coordinator nodes, or fetched by the script itself (with caveats as noted below).
|
||||
|
||||
Provisioner scripts are written in Python 3 and are called in a standardized way during the provisioning sequence. A single function called `install` is called during the provisioning sequence to perform arbitrary tasks. At execution time, the script is passed several default keyword arguments detailed below, and can also be passed arbitrary arguments defined either in the provisioner profile, or on the `provisioner create` CLI.
|
||||
Several example scripts are provided in the `/usr/share/pvc/provisioner/examples/scripts` directory of all PVC hypervisors, and these are imported by the provisioner system by default on install to help get you started. You are of course free to modify or extend these as you wish, or write your own based on them to suit your needs.
|
||||
|
||||
A full example script to perform a `debootstrap` Debian installation can be found under `/usr/share/pvc/provisioner/examples` on any PVC coordinator node.
|
||||
Provisioner scripts are written in Python 3 and are implemented as a class, `VMBuilderScript`, which extends the built-in `VMBuilder` class, for example:
|
||||
|
||||
The default script "empty" can be used to skip scripted installation for a profile. Additionally, profiles with no disk mountpoints (and specifically, no root `/` mountpoint) will skip scripted installation.
|
||||
```python
|
||||
#!/usr/bin/env python3
|
||||
# I am an example provisioner script
|
||||
|
||||
**WARNING**: It is important to remember that these provisioning scripts will run with the same privileges as the provisioner API daemon (usually root) on the system running the daemon. THIS MAY POSE A SECURITY RISK. However, the intent is that administrators of the cluster are the only ones allowed to specify these scripts, and that they check them thoroughly when adding them to the system, as well as limit access to the provisioning API to trusted sources. If neither of these conditions are possible, for instance if arbitrary users must specify custom scripts without administrator oversight, then the PVC provisioner script system may not be ideal.
|
||||
from pvcapid.vmbuilder import VMBuilder
|
||||
|
||||
**NOTE**: It is often required to perform a `chroot` to perform some aspects of the install process. The PVC script fully supports this, though it is relatively complicated. The example script details how to achieve this.
|
||||
|
||||
#### The `install` function
|
||||
|
||||
The `install` function is the main entrypoint for a provisioning script, and is the only part of the script that is explicitly called. The provisioner calls this function after setting up the temporary install directory and mounting the volumes. Thus, this script can then perform any sort of tasks required in the VM to install it, and then finishes, after which the main provisioner resumes control to unmount the volumes and finish the VM creation.
|
||||
|
||||
It is good practice in these scripts to "fail through", since terminating the script abruptly would affect the entire provisioning flow and thus may leave the half-provisioned VM in an undefined state. Care should be taken to `try`/`catch` possible errors, and attempt to finish the script execution (or `return`) even if some aspect fails.
|
||||
|
||||
This function is passed a number of keyword arguments that it can then use during installation. These include those specified by the administrator in the profile, on the CLI at deploy time, as well as a number of default arguments:
|
||||
|
||||
##### `vm_name`
|
||||
|
||||
The `vm_name` keyword argument contains the name of the new VM from PVC's perspective.
|
||||
|
||||
##### `vm_id`
|
||||
|
||||
The `vm_id` keyword argument contains the VM identifier (the last numeral of the VM name, or `0` for a VM that does not end in a numeral).
|
||||
|
||||
##### `temporary_directory`
|
||||
|
||||
The `temporary_directory` keyword argument contains the path to the temporary directory on which the new VM's disks are mounted. The function *must* perform any installation steps to/under this directory.
|
||||
|
||||
##### `disks`
|
||||
|
||||
The `disks` keyword argument contains a Python list of the configured disks, as dictionaries of values as specified in the Disk template. The function may use these values as appropriate, for instance to specify an `/etc/fstab`.
|
||||
|
||||
##### `networks`
|
||||
|
||||
The `networks` keyword argument contains a Python list of the configured networks, as dictionaries of values as specified in the Network template. The function may use these values as appropriate, for instance to write an `/etc/network/interfaces` file.
|
||||
|
||||
#### Examples
|
||||
|
||||
```
|
||||
$ pvc provisioner script list
|
||||
Using cluster "local" - Host: "10.0.0.1:7370" Scheme: "http" Prefix: "/api/v1"
|
||||
|
||||
Name ID Script
|
||||
empty 1
|
||||
debootstrap 2 #!/usr/bin/env python3
|
||||
|
||||
def install(**kwargs):
|
||||
vm_name = kwargs['vm_name']
|
||||
[...]
|
||||
class VMBuilderScript(VMBuilder):
|
||||
def setup(self):
|
||||
...
|
||||
```
|
||||
|
||||
* The first example is the default, always-present `empty` document, which is used if the VM does not specify a valid root mountpoint, or can be configured explicitly for profiles that do not require scripts, instead of leaving that section of the profile as `None`.
|
||||
Each `VMBuilderScript` class instance should provide the 5 functions defined by the VMBuilder class (or they will be noops). All 5 functions should take no arguments except `self`; data is passed to them from the parent `VMBuilder` class as outlined below. Each function provides a specific part of the installation process to automate each step with maximum flexibility:
|
||||
|
||||
* The second, truncated, example is the start of a normal Python install script. The full example is provided in the folder mentioned above on all PVC coordinator nodes.
|
||||
* `setup()`: Performs any special initial setup (e.g. fetching scripts or configs from the Internet) and validation of the environment (e.g. checking if particular binaries are available) before proceeding with the install.
|
||||
|
||||
* `create()`: Creates the VM libvirt XML definition based on the information provided by the VM profile and arguments. This is the only function that returns data (namely, the string representation of the XML config).
|
||||
|
||||
* `prepare()`: Creates and prepares any RBD storage volumes, filesystems, and mountpoints for the next step.
|
||||
|
||||
* `install()`: Performs any install steps required; note that the lines between `prepare()` and `install()` are fuzzy; the main point is that these are delineated in the sequence as discrete steps.
|
||||
|
||||
* `cleanup()`: Performs any "inner" cleanup of things done in the `prepare()` or `install()` steps (e.g. unmounting and unmapping RBD volumes, removing temporary files, etc.); also called on any *failure* of those steps.
|
||||
|
||||
Each step is described in more detail in the various examples, and those should be consulted to get a full understanding of how the steps work.
|
||||
|
||||
Note that no `__init__` should be provided by a script: doing so could result in failing scripts and should not be required.
|
||||
|
||||
As mentioned above, the `VMBuilderScript` instance includes several instance variables inherited from the parent `VMBuilder` definition. These consist of:
|
||||
|
||||
* `self.vm_name`: The name of the VM as provided to `pvc provisioner create`.
|
||||
|
||||
* `self.vm_id`: The numeral at the end of the `vm_name` (e.g. 2 for `web2`), or `0` if no numeral is present. Mostly useful when combined with network MAC address templates or preseeding clustered hosts.
|
||||
|
||||
* `self.vm_uuid`: An automatically, randomly-generated universal unique ID for the VM to use in its Libvirt XML definition (or elsewhere, if required).
|
||||
|
||||
* `self.vm_profile`: The name of the PVC provisioner profile used to create the VM. Mostly useful for VM descriptions.
|
||||
|
||||
* `self.vm_data`: A full dictionary representation of the data provided by the PVC provisioner about the VM. Includes many useful details for crafting the VM configuration and setting up disks and networks. An example, in JSON format:
|
||||
|
||||
```
|
||||
{
|
||||
"ceph_monitor_list": [
|
||||
"hv1.pvcstorage.tld",
|
||||
"hv2.pvcstorage.tld",
|
||||
"hv3.pvcstorage.tld"
|
||||
],
|
||||
"ceph_monitor_port": "6789",
|
||||
"ceph_monitor_secret": "96721723-8650-4a72-b8f6-a93cd1a20f0c",
|
||||
"mac_template": null,
|
||||
"networks": [
|
||||
{
|
||||
"eth_bridge": "vmbr1001",
|
||||
"id": 72,
|
||||
"network_template": 69,
|
||||
"vni": "1001"
|
||||
},
|
||||
{
|
||||
"eth_bridge": "vmbr101",
|
||||
"id": 73,
|
||||
"network_template": 69,
|
||||
"vni": "101"
|
||||
}
|
||||
],
|
||||
"script": [contents of this file]
|
||||
"script_arguments": {
|
||||
"deb_mirror": "http://ftp.debian.org/debian",
|
||||
"deb_release": "bullseye"
|
||||
},
|
||||
"system_architecture": "x86_64",
|
||||
"system_details": {
|
||||
"id": 78,
|
||||
"migration_method": "live",
|
||||
"name": "small",
|
||||
"node_autostart": false,
|
||||
"node_limit": null,
|
||||
"node_selector": null,
|
||||
"ova": null,
|
||||
"serial": true,
|
||||
"vcpu_count": 2,
|
||||
"vnc": false,
|
||||
"vnc_bind": null,
|
||||
"vram_mb": 2048
|
||||
},
|
||||
"volumes": [
|
||||
{
|
||||
"disk_id": "sda",
|
||||
"disk_size_gb": 4,
|
||||
"filesystem": "ext4",
|
||||
"filesystem_args": "-L=root",
|
||||
"id": 9,
|
||||
"mountpoint": "/",
|
||||
"pool": "vms",
|
||||
"source_volume": null,
|
||||
"storage_template": 67
|
||||
},
|
||||
{
|
||||
"disk_id": "sdb",
|
||||
"disk_size_gb": 4,
|
||||
"filesystem": "ext4",
|
||||
"filesystem_args": "-L=var",
|
||||
"id": 10,
|
||||
"mountpoint": "/var",
|
||||
"pool": "vms",
|
||||
"source_volume": null,
|
||||
"storage_template": 67
|
||||
},
|
||||
{
|
||||
"disk_id": "sdc",
|
||||
"disk_size_gb": 4,
|
||||
"filesystem": "ext4",
|
||||
"filesystem_args": "-L=log",
|
||||
"id": 11,
|
||||
"mountpoint": "/var/log",
|
||||
"pool": "vms",
|
||||
"source_volume": null,
|
||||
"storage_template": 67
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Since the `VMBuilderScript` runs within its own context but within the PVC Provisioner/API system, it is possible to use many helper libraries from the PVC system itself, including both the built-in daemon libraries (used by the API itself) and several explicit provisioning script helpers. The following are commonly-used (in the examples) imports that can be leveraged:
|
||||
|
||||
* `pvcapid.vmbuilder.VMBuilder`: Required, provides the parent class for the `VMBuilderScript` class.
|
||||
* `pvcapid.vmbuilder.ProvisioningError`: An exception that should be used within the `VMBuilderScript` to raise exceptions (though you can of course raise any other exception you wish or define your own).
|
||||
* `pvcapid.vmbuilder.open_zk`: A context manager that can be used to open a Zookeeper connection, providing a `zkhandler` that can be passed to other PVC daemon library functions below.
|
||||
* `pvcapid.vmbuilder.chroot`: A context manager that can be used to easily `chroot` into a given directory.
|
||||
* `pvcapid.Daemon.config`: A configuration variable that *must* be passed to `open_zk` if it is used.
|
||||
* `pvcapid.libvirt_schema`: A library providing a number of helpful Libvirt XML snippits that can be used to aid in building a working VM config for PVC. See the examples for a full usecase.
|
||||
* `daemon_lib.common`: Part of the PVC daemon libraries, provides several common functions, including, most usefully, `run_os_command` which provides a wrapped, convenient method to call arbitrary shell/OS commands while returning a POSIX returncode, stdout, and stderr (a tuple of the 3 in that order).
|
||||
* `daemon_lib.ceph`: Part of the PVC daemon libraries, provides several commands for managing Ceph RBD volumes, including, but not limited to, `clone_volume`, `add_volume`, `map_volume`, and `unmap_volume`. See the `debootstrap` example for a detailed usage example.
|
||||
|
||||
For safety reasons, the script runs in a modified chroot environment on the hypervisor. It will have full access to the entire / (root partition) of the hypervisor, but read-only. In addition it has read-write access to /dev, /sys, /run, and a fresh /tmp to write to; use /tmp/target (as convention) as the destination for any mounting of volumes and installation. Thus it is not possible to do things like `apt-get install`ing additional programs within a script; any such requirements must be set up before running the script (e.g. via `pvc-ansible`).
|
||||
|
||||
**WARNING**: Of course, despite this "safety" mechanism, it is VERY IMPORTANT to be cognizant that this script runs AS ROOT ON THE HYPERVISOR SYSTEM with FULL ACCESS to the cluster. You should NEVER allow arbitrary, untrusted users the ability to add or modify provisioning scripts. It is trivially easy to write scripts which will do destructive things - for example writing to arbitrary /dev objects, running arbitrary root-level commands, or importing PVC library functions to delete VMs, RBD volumes, or pools. Thus, ensure you vett and understand every script on the system, audit them regularly for both intentional and accidental malicious activity, and of course (to reiterate), do not allow untrusted script creation!
|
||||
|
||||
## Profiles
|
||||
|
||||
@ -265,8 +349,8 @@ This will create a worker job on the current primary node, and status can be que
|
||||
Using cluster "local" - Host: "10.0.0.1:7370" Scheme: "http" Prefix: "/api/v1"
|
||||
|
||||
Job state: RUNNING
|
||||
Stage: 7/10
|
||||
Status: Mapping, formatting, and mounting storage volumes locally
|
||||
Stage: 4/10
|
||||
Status: Running script setup() step
|
||||
```
|
||||
|
||||
A list of all running and queued jobs can be obtained by requesting the provisioner status without an ID.
|
||||
@ -332,6 +416,10 @@ Once the OVA is uploaded to the cluster with the `pvc provisioner ova upload` co
|
||||
|
||||
* In `pvc profile list`, a new profile will be visible which matches the OVA `NAME` from the upload. This profile will have a "Source" of `OVA <NAME>`, and a system template of the same name. This system template will contain the basic configuration of the VM. You may notice that the other templates and data are set to `N/A`. For full details on this, see the next section.
|
||||
|
||||
## The OVA Provisioner Script
|
||||
|
||||
OVA installs leverage a special provisioner script to handle the VM creation, identical to any other provisioner profile type. This (example) script is installed by default and used by OVAs by default, though the script that an individual OVA profile uses can be modified as required.
|
||||
|
||||
## OVA limitations
|
||||
|
||||
PVC does not implement a *complete* OVA framework. While all basic elements of the OVA are included, the following areas require special attention.
|
||||
|
@ -3025,14 +3025,14 @@
|
||||
"description": "Script name",
|
||||
"in": "query",
|
||||
"name": "script",
|
||||
"required": false,
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "System template name",
|
||||
"in": "query",
|
||||
"name": "system_template",
|
||||
"required": false,
|
||||
"required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
@ -3165,21 +3165,21 @@
|
||||
"description": "Network template name",
|
||||
"in": "query",
|
||||
"name": "network_template",
|
||||
"required": true,
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Storage template name",
|
||||
"in": "query",
|
||||
"name": "storage_template",
|
||||
"required": true,
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"description": "Userdata template name",
|
||||
"in": "query",
|
||||
"name": "userdata",
|
||||
"required": true,
|
||||
"required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
|
Loading…
x
Reference in New Issue
Block a user