Complete OVA provisioning script

This commit is contained in:
Joshua Boniface 2022-10-06 10:14:04 -04:00
parent 58ce133c8d
commit 6cbaeb5dc8
1 changed files with 117 additions and 327 deletions

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3
# 3-ova.py - PVC Provisioner example script for OVA install
# 1-ova.py - PVC Provisioner example script for OVA profile install
# Part of the Parallel Virtual Cluster (PVC) system
#
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
@ -20,22 +20,25 @@
###############################################################################
# This script provides an example of a PVC provisioner script. It will create a
# custom VM config based on an OVA profile.
# standard VM config suitable for the OVA profile, and prepare the disks, but do
# no additional install tasks (noop).
# This script can thus be used as an example or reference implementation of a
# PVC provisioner script and expanded upon as required; this specific script is
# also hard-coded into a normal PVC provisioner system.
# PVC provisioner script and expanded upon as required.
# *** READ THIS SCRIPT THOROUGHLY BEFORE USING TO UNDERSTAND HOW IT WORKS. ***
# The script must implement the class "VMBuilderScript" which extens "VMBuilder",
# A script must implement the class "VMBuilderScript" which extends "VMBuilder",
# providing the 5 functions indicated. Detailed explanation of the role of each
# function is provided.
# function is provided in context of the example; see the other examples for
# more potential uses.
# Within the VMBuilderScript class, several common variables are exposed:
# Within the VMBuilderScript class, several common variables are exposed through
# the parent VMBuilder class:
# self.vm_name: The name of the VM from PVC's perspective
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
# self.vm_uuid: An automatically-generated UUID for the VM
# self.vm_profile: The PVC provisioner profile name used for the VM
# self.vm-data: A dictionary of VM data collected by the provisioner; an example:
# self.vm_data: A dictionary of VM data collected by the provisioner; as an example:
# {
# "ceph_monitor_list": [
# "hv1.pvcstorage.tld",
@ -115,41 +118,61 @@
# }
# ]
# }
#
# Any other information you may require must be obtained manually.
# WARNING:
#
# For safety reasons, the script runs in a modified chroot. It will have full access to
# the entire / (root partition) of the hypervisor, but read-only. In addition it has
# access to /dev, /sys, /run, and a fresh /tmp to write to; use /tmp/target (as
# convention) as the destination for any mounting of volumes and installation.
# Of course, in addition to this safety, it is VERY IMPORTANT to be aware that this
# script runs AS ROOT ON THE HYPERVISOR SYSTEM. You should never allow arbitrary,
# untrusted users the ability to add provisioning scripts even with this safeguard,
# since they could still do destructive things to /dev and the like!
# Run any imports first
import datetime
import random
import pvcapid.libvirt_schema as libvirt_schema
# This import is always required here, as VMBuilder is used by the VMBuilderScript class
# and ProvisioningError is the primary exception that should be raised within the class.
from pvcapid.vmbuilder import VMBuilder, ProvisioningError
# The VMBuilderScript class must be named as such, and extend VMBuilder.
class VMBuilderScript(VMBuilder):
def setup(self):
"""
setup(): Perform special setup steps or validation before proceeding
Validate that we're actually an OVA profile.
"""
# Ensure we have debootstrap intalled on the provisioner system
retcode, stdout, stderr = pvc_common.run_os_command(f"which debootstrap")
if retcode:
raise ProvisioningError("Failed to find critical dependency: debootstrap")
if self.vm_data.get("ova_details") is None:
raise ProvisioningError(
"Attempting to provision non-OVA profile with OVA script."
)
def create(self):
"""
create(): Create the VM libvirt schema definition
This step *must* return a fully-formed Libvirt XML document as a string.
This step *must* return a fully-formed Libvirt XML document as a string or the
provisioning task will fail.
This example leverages the built-in libvirt_schema objects provided by PVC; these
can be used as-is, or replaced with your own schema(s) on a per-script basis.
"""
# Run any imports first
import pvcapid.libvirt_schema as libvirt_schema
import datetime
import random
# Create the empty schema document that we will append to and return at the end
schema = ""
# Prepare a description based on the VM profile
description = (
f"PVC provisioner @ {datetime.datetime.now()}, profile '{self.vm_profile}'"
)
description = f"PVC provisioner @ {datetime.datetime.now()}, profile '{self.vm_profile}', OVA '{self.vm_data['ova_details']['name']}'"
# Format the header
schema += libvirt_schema.libvirt_header.format(
@ -186,7 +209,7 @@ class VMBuilderScript(VMBuilder):
# Add the network devices
network_id = 0
for network in self.vm_data["networks"]:
vm_id_hex = "{:x}".format(int(vm_id % 16))
vm_id_hex = "{:x}".format(int(self.vm_id % 16))
net_id_hex = "{:x}".format(int(network_id % 16))
if self.vm_data.get("mac_template") is not None:
@ -211,7 +234,7 @@ class VMBuilderScript(VMBuilder):
schema += libvirt_schema.devices_net_interface.format(
eth_macaddr=eth_macaddr,
eth_bridge=eth_bridge,
eth_bridge=network["eth_bridge"],
)
network_id += 1
@ -250,28 +273,16 @@ class VMBuilderScript(VMBuilder):
def prepare(self):
"""
prepare(): Prepare any disks/volumes for the install() step
This function should use the various exposed PVC commands as indicated to create
block devices and map them to the host.
"""
# First loop: Create the disks, either by cloning (pvc_ceph.clone_volume), or by
# new creation (pvc_ceph.add_volume).
# Run any imports first
from pvcapid.vmbuilder import open_zk
from pvcapid.Daemon import config
import daemon_lib.common as pvc_common
import daemon_lib.ceph as pvc_ceph
# First loop: Create the destination disks
for volume in self.vm_data["volumes"]:
if volume.get("source_volume") is not None:
with open_zk(config) as zkhandler:
success, message = pvc_ceph.clone_volume(
zkhandler,
volume["pool"],
volume["source_volume"],
f"{self.vm_name}_{volume['disk_id']}",
)
print(message)
if not success:
raise ProvisioningError(
f"Failed to clone volume '{volume['source_volume']}' to '{volume['disk_id']}'."
)
else:
with open_zk(config) as zkhandler:
success, message = pvc_ceph.add_volume(
zkhandler,
@ -285,7 +296,7 @@ class VMBuilderScript(VMBuilder):
f"Failed to create volume '{volume['disk_id']}'."
)
# Second loop: Map the disks to the local system
# Second loop: Map the destination disks
for volume in self.vm_data["volumes"]:
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
@ -297,301 +308,80 @@ class VMBuilderScript(VMBuilder):
dst_volume_name,
)
print(message)
if not retcode:
if not success:
raise ProvisioningError(f"Failed to map volume '{dst_volume}'.")
# Third loop: Create filesystems on the volumes
# Third loop: Map the source disks
for volume in self.vm_data["volumes"]:
src_volume_name = volume["volume_name"]
src_volume = f"{volume['pool']}/{src_volume_name}"
with open_zk(config) as zkhandler:
success, message = pvc_ceph.map_volume(
zkhandler,
volume["pool"],
src_volume_name,
)
print(message)
if not success:
raise ProvisioningError(f"Failed to map volume '{src_volume}'.")
# Fourth loop: Convert the source (usually VMDK) volume to the raw destination volume
for volume in self.vm_data["volumes"]:
src_volume_name = volume["volume_name"]
src_volume = f"{volume['pool']}/{src_volume_name}"
src_devpath = f"/dev/rbd/{src_volume}"
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
dst_devpath = f"/dev/rbd/{dst_volume}"
if volume.get("source_volume") is not None:
continue
if volume.get("filesystem") is None:
continue
filesystem_args_list = list()
for arg in volume["filesystem_args"].split():
arg_entry, *arg_data = arg.split("=")
arg_data = "=".join(arg_data)
filesystem_args_list.append(arg_entry)
filesystem_args_list.append(arg_data)
filesystem_args = " ".join(filesystem_args_list)
if volume["filesystem"] == "swap":
retcode, stdout, stderr = pvc_common.run_os_command(
f"mkswap -f /dev/rbd/{dst_volume}"
f"qemu-img convert -C -f {volume['volume_format']} -O raw {src_devpath} {dst_devpath}"
)
if retcode:
raise ProvisioningError(
f"Failed to create swap on '{dst_volume}': {stderr}"
)
else:
retcode, stdout, stderr = pvc_common.run_os_command(
f"mkfs.{volume['filesystem']} {filesystem_args} /dev/rbd/{dst_volume}"
)
if retcode:
raise ProvisioningError(
f"Faield to create {volume['filesystem']} file on '{dst_volume}': {stderr}"
)
print(stdout)
# Create a temporary directory to use during install
temp_dir = "/tmp/target"
if not os.exists(temp_dir):
os.mkdir(temp_dir)
# Fourth loop: Mount the volumes to a set of temporary directories
for volume in self.vm_data["volumes"]:
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
if volume.get("source_volume") is not None:
continue
if volume.get("filesystem") is None:
continue
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
mount_path = f"{temp_dir}/{volume['mountpoint']}"
if not os.exists(mount_path):
os.mkdir(mount_path)
# Mount filesystem
retcode, stdout, stderr = pvc_common.run_os_command(
f"mount {mapped_dst_volume} {mount_path}"
)
if retcode:
raise ProvisioningError(
f"Failed to mount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
f"Failed to convert {volume['volume_format']} volume '{src_volume}' to raw volume '{dst_volume}' with qemu-img: {stderr}"
)
def install(self):
"""
install(): Perform the installation
Since this is a noop example, this step does nothing, aside from getting some
arguments for demonstration.
Noop for OVA deploys as no further tasks are performed.
"""
# The directory we mounted things on earlier during prepare()
temporary_directory = "/tmp/target"
# Use these convenient aliases for later (avoiding lots of "self.vm_data" everywhere)
vm_name = self.vm_name
disks = self.vm_data["disks"]
networks = self.vm_data["networks"]
# Parse these arguments out of self.vm_data["script_arguments"]
if self.vm_data["script_arguments"].get("deb_release") is not None:
deb_release = self.vm_data["script_arguments"].get("deb_release")
else:
deb_release = "stable"
if self.vm_data["script_arguments"].get("deb_mirror") is not None:
deb_mirror = self.vm_data["script_arguments"].get("deb_mirror")
else:
deb_mirror = "http://ftp.debian.org/debian"
if self.vm_data["script_arguments"].get("deb_packages") is not None:
deb_packages = (
self.vm_data["script_arguments"].get("deb_packages").split(",")
)
else:
deb_packages = [
"linux-image-amd64",
"grub-pc",
"cloud-init",
"python3-cffi-backend",
"wget",
]
# We need to know our root disk
root_disk = None
for disk in disks:
if disk["mountpoint"] == "/":
root_disk = disk
if not root_disk:
raise ProvisioningError("Failed to find root disk in disks list")
# Perform a deboostrap installation
os.system(
"debootstrap --include={pkgs} {suite} {target} {mirror}".format(
suite=deb_release,
target=temporary_directory,
mirror=deb_mirror,
pkgs=",".join(deb_packages),
)
)
# Bind mount the devfs
os.system("mount --bind /dev {}/dev".format(temporary_directory))
# Create an fstab entry for each disk
fstab_file = "{}/etc/fstab".format(temporary_directory)
# The disk ID starts at zero and increments by one for each disk in the fixed-order
# disk list. This lets us work around the insanity of Libvirt IDs not matching guest IDs,
# while still letting us have some semblance of control here without enforcing things
# like labels. It increments in the for loop below at the end of each iteration, and is
# used to craft a /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-X device ID
# which will always match the correct order from Libvirt (unlike sdX/vdX names).
disk_id = 0
for disk in disks:
# We assume SSD-based/-like storage, and dislike atimes
options = "defaults,discard,noatime,nodiratime"
# The root, var, and log volumes have specific values
if disk["mountpoint"] == "/":
root_disk["scsi_id"] = disk_id
dump = 0
cpass = 1
elif disk["mountpoint"] == "/var" or disk["mountpoint"] == "/var/log":
dump = 0
cpass = 2
else:
dump = 0
cpass = 0
# Append the fstab line
with open(fstab_file, "a") as fh:
data = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{disk} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
disk=disk_id,
mountpoint=disk["mountpoint"],
filesystem=disk["filesystem"],
options=options,
dump=dump,
cpass=cpass,
)
fh.write(data)
# Increment the disk_id
disk_id += 1
# Write the hostname
hostname_file = "{}/etc/hostname".format(temporary_directory)
with open(hostname_file, "w") as fh:
fh.write("{}".format(vm_name))
# Fix the cloud-init.target since it's broken
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
temporary_directory
)
with open(cloudinit_target_file, "w") as fh:
data = """[Install]
WantedBy=multi-user.target
[Unit]
Description=Cloud-init target
After=multi-user.target
"""
fh.write(data)
# NOTE: Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
# will always be on PCI bus ID 2, hence the name "ens2".
# Write a DHCP stanza for ens2
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(
temporary_directory
)
with open(ens2_network_file, "w") as fh:
data = """auto ens2
iface ens2 inet dhcp
"""
fh.write(data)
# Write the DHCP config for ens2
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
with open(dhclient_file, "w") as fh:
data = (
"""# DHCP client configuration
# Written by the PVC provisioner
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
interface "ens2" {
"""
+ """ send fqdn.fqdn = "{hostname}";
send host-name = "{hostname}";
""".format(
hostname=vm_name
)
+ """ request subnet-mask, broadcast-address, time-offset, routers,
domain-name, domain-name-servers, domain-search, host-name,
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
netbios-name-servers, netbios-scope, interface-mtu,
rfc3442-classless-static-routes, ntp-servers;
}
"""
)
fh.write(data)
# Write the GRUB configuration
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
with open(grubcfg_file, "w") as fh:
data = """# Written by the PVC provisioner
GRUB_DEFAULT=0
GRUB_TIMEOUT=1
GRUB_DISTRIBUTOR="PVC Virtual Machine"
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{root_disk} console=tty0 console=ttyS0,115200n8"
GRUB_CMDLINE_LINUX=""
GRUB_TERMINAL=console
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
GRUB_DISABLE_LINUX_UUID=false
""".format(
root_disk=root_disk["scsi_id"]
)
fh.write(data)
# Chroot, do some in-root tasks, then exit the chroot
with chroot_target(temporary_directory):
# Install and update GRUB
os.system(
"grub-install --force /dev/rbd/{}/{}_{}".format(
root_disk["pool"], vm_name, root_disk["disk_id"]
)
)
os.system("update-grub")
# Set a really dumb root password [TEMPORARY]
os.system("echo root:test123 | chpasswd")
# Enable cloud-init target on (first) boot
# NOTE: Your user-data should handle this and disable it once done, or things get messy.
# That cloud-init won't run without this hack seems like a bug... but even the official
# Debian cloud images are affected, so who knows.
os.system("systemctl enable cloud-init.target")
# Unmount the bound devfs
os.system("umount {}/dev".format(temporary_directory))
pass
def cleanup(self):
"""
cleanup(): Perform any cleanup required due to prepare()/install()
It is important to now reverse *all* steps taken in those functions that might
need cleanup before teardown of the overlay chroot environment.
This function is also called if there is ANY exception raised in the prepare()
or install() steps. While this doesn't mean you shouldn't or can't raise exceptions
here, be warned that doing so might cause loops. Do this only if you really need to.
"""
temp_dir = "/tmp/target"
for volume in list(reversed(self.vm_data["volumes"])):
src_volume_name = volume["volume_name"]
src_volume = f"{volume['pool']}/{src_volume_name}"
src_devpath = f"/dev/rbd/{src_volume}"
with open_zk(config) as zkhandler:
success, message = pvc_ceph.unmap_volume(
zkhandler,
volume["pool"],
src_volume_name,
)
if not success:
raise ProvisioningError(
f"Failed to unmap source volume '{src_volume_name}': {message}"
)
for volume in list(reversed(self.vm_data["volumes"])):
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
mount_path = f"{temp_dir}/{volume['mountpoint']}"
dst_devpath = f"/dev/rbd/{dst_volume}"
if (
volume.get("source_volume") is None
and volume.get("filesystem") is not None
):
# Unmount filesystem
retcode, stdout, stderr = pvc_common.run_os_command(
f"umount {mount_path}"
)
if retcode:
raise ProvisioningError(
f"Failed to unmount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
)
# Unmap volume
with open_zk(config) as zkhandler:
success, message = pvc_ceph.unmap_volume(
zkhandler,
@ -600,5 +390,5 @@ GRUB_DISABLE_LINUX_UUID=false
)
if not success:
raise ProvisioningError(
f"Failed to unmap '{mapped_dst_volume}': {stderr}"
f"Failed to unmap destination volume '{dst_volume_name}': {message}"
)