pvc/api-daemon/provisioner/examples/script/3-ova.py

605 lines
22 KiB
Python
Raw Normal View History

2022-10-05 16:03:05 -04:00
#!/usr/bin/env python3
# 3-ova.py - PVC Provisioner example script for OVA install
# Part of the Parallel Virtual Cluster (PVC) system
#
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###############################################################################
# This script provides an example of a PVC provisioner script. It will create a
# custom VM config based on an OVA profile.
# This script can thus be used as an example or reference implementation of a
# PVC provisioner script and expanded upon as required; this specific script is
# also hard-coded into a normal PVC provisioner system.
# The script must implement the class "VMBuilderScript" which extens "VMBuilder",
# providing the 5 functions indicated. Detailed explanation of the role of each
# function is provided.
# Within the VMBuilderScript class, several common variables are exposed:
# self.vm_name: The name of the VM from PVC's perspective
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
# self.vm_uuid: An automatically-generated UUID for the VM
# self.vm_profile: The PVC provisioner profile name used for the VM
# self.vm-data: A dictionary of VM data collected by the provisioner; an example:
# {
# "ceph_monitor_list": [
# "hv1.pvcstorage.tld",
# "hv2.pvcstorage.tld",
# "hv3.pvcstorage.tld"
# ],
# "ceph_monitor_port": "6789",
# "ceph_monitor_secret": "96721723-8650-4a72-b8f6-a93cd1a20f0c",
# "mac_template": null,
# "networks": [
# {
# "eth_bridge": "vmbr1001",
# "id": 72,
# "network_template": 69,
# "vni": "1001"
# },
# {
# "eth_bridge": "vmbr101",
# "id": 73,
# "network_template": 69,
# "vni": "101"
# }
# ],
# "script": [contents of this file]
# "script_arguments": {
# "deb_mirror": "http://ftp.debian.org/debian",
# "deb_release": "bullseye"
# },
# "system_architecture": "x86_64",
# "system_details": {
# "id": 78,
# "migration_method": "live",
# "name": "small",
# "node_autostart": false,
# "node_limit": null,
# "node_selector": null,
# "ova": null,
# "serial": true,
# "vcpu_count": 2,
# "vnc": false,
# "vnc_bind": null,
# "vram_mb": 2048
# },
# "volumes": [
# {
# "disk_id": "sda",
# "disk_size_gb": 4,
# "filesystem": "ext4",
# "filesystem_args": "-L=root",
# "id": 9,
# "mountpoint": "/",
# "pool": "vms",
# "source_volume": null,
# "storage_template": 67
# },
# {
# "disk_id": "sdb",
# "disk_size_gb": 4,
# "filesystem": "ext4",
# "filesystem_args": "-L=var",
# "id": 10,
# "mountpoint": "/var",
# "pool": "vms",
# "source_volume": null,
# "storage_template": 67
# },
# {
# "disk_id": "sdc",
# "disk_size_gb": 4,
# "filesystem": "ext4",
# "filesystem_args": "-L=log",
# "id": 11,
# "mountpoint": "/var/log",
# "pool": "vms",
# "source_volume": null,
# "storage_template": 67
# }
# ]
# }
# Run any imports first
import datetime
import random
import pvcapid.libvirt_schema as libvirt_schema
class VMBuilderScript(VMBuilder):
def setup(self):
"""
setup(): Perform special setup steps or validation before proceeding
"""
# Ensure we have debootstrap intalled on the provisioner system
retcode, stdout, stderr = pvc_common.run_os_command(f"which debootstrap")
if retcode:
raise ProvisioningError("Failed to find critical dependency: debootstrap")
def create(self):
"""
create(): Create the VM libvirt schema definition
This step *must* return a fully-formed Libvirt XML document as a string.
This example leverages the built-in libvirt_schema objects provided by PVC; these
can be used as-is, or replaced with your own schema(s) on a per-script basis.
"""
schema = ""
# Prepare a description based on the VM profile
description = (
f"PVC provisioner @ {datetime.datetime.now()}, profile '{self.vm_profile}'"
)
# Format the header
schema += libvirt_schema.libvirt_header.format(
vm_name=self.vm_name,
vm_uuid=self.vm_uuid,
vm_description=description,
vm_memory=self.vm_data["system_details"]["vram_mb"],
vm_vcpus=self.vm_data["system_details"]["vcpu_count"],
vm_architecture=self.vm_data["system_architecture"],
)
# Add the disk devices
monitor_list = self.vm_data["ceph_monitor_list"]
monitor_port = self.vm_data["ceph_monitor_port"]
monitor_secret = self.vm_data["ceph_monitor_secret"]
for volume in self.vm_data["volumes"]:
schema += libvirt_schema.devices_disk_header.format(
ceph_storage_secret=monitor_secret,
disk_pool=volume["pool"],
vm_name=self.vm_name,
disk_id=volume["disk_id"],
)
for monitor in monitor_list:
schema += libvirt_schema.devices_disk_coordinator.format(
coordinator_name=monitor,
coordinator_ceph_mon_port=monitor_port,
)
schema += libvirt_schema.devices_disk_footer
# Add the special vhostmd device for hypervisor information inside the VM
schema += libvirt_schema.devices_vhostmd
# Add the network devices
network_id = 0
for network in self.vm_data["networks"]:
vm_id_hex = "{:x}".format(int(vm_id % 16))
net_id_hex = "{:x}".format(int(network_id % 16))
if self.vm_data.get("mac_template") is not None:
mac_prefix = "52:54:01"
macgen_template = self.vm_data["mac_template"]
eth_macaddr = macgen_template.format(
prefix=mac_prefix, vmid=vm_id_hex, netid=net_id_hex
)
else:
mac_prefix = "52:54:00"
random_octet_A = "{:x}".format(random.randint(16, 238))
random_octet_B = "{:x}".format(random.randint(16, 238))
random_octet_C = "{:x}".format(random.randint(16, 238))
macgen_template = "{prefix}:{octetA}:{octetB}:{octetC}"
eth_macaddr = macgen_template.format(
prefix=mac_prefix,
octetA=random_octet_A,
octetB=random_octet_B,
octetC=random_octet_C,
)
schema += libvirt_schema.devices_net_interface.format(
eth_macaddr=eth_macaddr,
eth_bridge=eth_bridge,
)
network_id += 1
# Add default devices
schema += libvirt_schema.devices_default
# Add serial device
if self.vm_data["system_details"]["serial"]:
schema += libvirt_schema.devices_serial.format(vm_name=self.vm_name)
# Add VNC device
if self.vm_data["system_details"]["vnc"]:
if self.vm_data["system_details"]["vnc_bind"]:
vm_vnc_bind = self.vm_data["system_details"]["vnc_bind"]
else:
vm_vnc_bind = "127.0.0.1"
vm_vncport = 5900
vm_vnc_autoport = "yes"
schema += libvirt_schema.devices_vnc.format(
vm_vncport=vm_vncport,
vm_vnc_autoport=vm_vnc_autoport,
vm_vnc_bind=vm_vnc_bind,
)
# Add SCSI controller
schema += libvirt_schema.devices_scsi_controller
# Add footer
schema += libvirt_schema.libvirt_footer
return schema
def prepare(self):
"""
prepare(): Prepare any disks/volumes for the install() step
This function should use the various exposed PVC commands as indicated to create
block devices and map them to the host.
"""
# First loop: Create the disks, either by cloning (pvc_ceph.clone_volume), or by
# new creation (pvc_ceph.add_volume).
for volume in self.vm_data["volumes"]:
if volume.get("source_volume") is not None:
with open_zk(config) as zkhandler:
success, message = pvc_ceph.clone_volume(
zkhandler,
volume["pool"],
volume["source_volume"],
f"{self.vm_name}_{volume['disk_id']}",
)
print(message)
if not success:
raise ProvisioningError(
f"Failed to clone volume '{volume['source_volume']}' to '{volume['disk_id']}'."
)
else:
with open_zk(config) as zkhandler:
success, message = pvc_ceph.add_volume(
zkhandler,
volume["pool"],
f"{self.vm_name}_{volume['disk_id']}",
f"{volume['disk_size_gb']}G",
)
print(message)
if not success:
raise ProvisioningError(
f"Failed to create volume '{volume['disk_id']}'."
)
# Second loop: Map the disks to the local system
for volume in self.vm_data["volumes"]:
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
with open_zk(config) as zkhandler:
success, message = pvc_ceph.map_volume(
zkhandler,
volume["pool"],
dst_volume_name,
)
print(message)
if not retcode:
raise ProvisioningError(f"Failed to map volume '{dst_volume}'.")
# Third loop: Create filesystems on the volumes
for volume in self.vm_data["volumes"]:
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
if volume.get("source_volume") is not None:
continue
if volume.get("filesystem") is None:
continue
filesystem_args_list = list()
for arg in volume["filesystem_args"].split():
arg_entry, *arg_data = arg.split("=")
arg_data = "=".join(arg_data)
filesystem_args_list.append(arg_entry)
filesystem_args_list.append(arg_data)
filesystem_args = " ".join(filesystem_args_list)
if volume["filesystem"] == "swap":
retcode, stdout, stderr = pvc_common.run_os_command(
f"mkswap -f /dev/rbd/{dst_volume}"
)
if retcode:
raise ProvisioningError(
f"Failed to create swap on '{dst_volume}': {stderr}"
)
else:
retcode, stdout, stderr = pvc_common.run_os_command(
f"mkfs.{volume['filesystem']} {filesystem_args} /dev/rbd/{dst_volume}"
)
if retcode:
raise ProvisioningError(
f"Faield to create {volume['filesystem']} file on '{dst_volume}': {stderr}"
)
print(stdout)
# Create a temporary directory to use during install
temp_dir = "/tmp/target"
if not os.exists(temp_dir):
os.mkdir(temp_dir)
# Fourth loop: Mount the volumes to a set of temporary directories
for volume in self.vm_data["volumes"]:
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
if volume.get("source_volume") is not None:
continue
if volume.get("filesystem") is None:
continue
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
mount_path = f"{temp_dir}/{volume['mountpoint']}"
if not os.exists(mount_path):
os.mkdir(mount_path)
# Mount filesystem
retcode, stdout, stderr = pvc_common.run_os_command(
f"mount {mapped_dst_volume} {mount_path}"
)
if retcode:
raise ProvisioningError(
f"Failed to mount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
)
def install(self):
"""
install(): Perform the installation
Since this is a noop example, this step does nothing, aside from getting some
arguments for demonstration.
"""
# The directory we mounted things on earlier during prepare()
temporary_directory = "/tmp/target"
# Use these convenient aliases for later (avoiding lots of "self.vm_data" everywhere)
vm_name = self.vm_name
disks = self.vm_data["disks"]
networks = self.vm_data["networks"]
# Parse these arguments out of self.vm_data["script_arguments"]
if self.vm_data["script_arguments"].get("deb_release") is not None:
deb_release = self.vm_data["script_arguments"].get("deb_release")
else:
deb_release = "stable"
if self.vm_data["script_arguments"].get("deb_mirror") is not None:
deb_mirror = self.vm_data["script_arguments"].get("deb_mirror")
else:
deb_mirror = "http://ftp.debian.org/debian"
if self.vm_data["script_arguments"].get("deb_packages") is not None:
deb_packages = (
self.vm_data["script_arguments"].get("deb_packages").split(",")
)
else:
deb_packages = [
"linux-image-amd64",
"grub-pc",
"cloud-init",
"python3-cffi-backend",
"wget",
]
# We need to know our root disk
root_disk = None
for disk in disks:
if disk["mountpoint"] == "/":
root_disk = disk
if not root_disk:
raise ProvisioningError("Failed to find root disk in disks list")
# Perform a deboostrap installation
os.system(
"debootstrap --include={pkgs} {suite} {target} {mirror}".format(
suite=deb_release,
target=temporary_directory,
mirror=deb_mirror,
pkgs=",".join(deb_packages),
)
)
# Bind mount the devfs
os.system("mount --bind /dev {}/dev".format(temporary_directory))
# Create an fstab entry for each disk
fstab_file = "{}/etc/fstab".format(temporary_directory)
# The disk ID starts at zero and increments by one for each disk in the fixed-order
# disk list. This lets us work around the insanity of Libvirt IDs not matching guest IDs,
# while still letting us have some semblance of control here without enforcing things
# like labels. It increments in the for loop below at the end of each iteration, and is
# used to craft a /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-X device ID
# which will always match the correct order from Libvirt (unlike sdX/vdX names).
disk_id = 0
for disk in disks:
# We assume SSD-based/-like storage, and dislike atimes
options = "defaults,discard,noatime,nodiratime"
# The root, var, and log volumes have specific values
if disk["mountpoint"] == "/":
root_disk["scsi_id"] = disk_id
dump = 0
cpass = 1
elif disk["mountpoint"] == "/var" or disk["mountpoint"] == "/var/log":
dump = 0
cpass = 2
else:
dump = 0
cpass = 0
# Append the fstab line
with open(fstab_file, "a") as fh:
data = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{disk} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
disk=disk_id,
mountpoint=disk["mountpoint"],
filesystem=disk["filesystem"],
options=options,
dump=dump,
cpass=cpass,
)
fh.write(data)
# Increment the disk_id
disk_id += 1
# Write the hostname
hostname_file = "{}/etc/hostname".format(temporary_directory)
with open(hostname_file, "w") as fh:
fh.write("{}".format(vm_name))
# Fix the cloud-init.target since it's broken
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
temporary_directory
)
with open(cloudinit_target_file, "w") as fh:
data = """[Install]
WantedBy=multi-user.target
[Unit]
Description=Cloud-init target
After=multi-user.target
"""
fh.write(data)
# NOTE: Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
# will always be on PCI bus ID 2, hence the name "ens2".
# Write a DHCP stanza for ens2
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(
temporary_directory
)
with open(ens2_network_file, "w") as fh:
data = """auto ens2
iface ens2 inet dhcp
"""
fh.write(data)
# Write the DHCP config for ens2
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
with open(dhclient_file, "w") as fh:
data = (
"""# DHCP client configuration
# Written by the PVC provisioner
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
interface "ens2" {
"""
+ """ send fqdn.fqdn = "{hostname}";
send host-name = "{hostname}";
""".format(
hostname=vm_name
)
+ """ request subnet-mask, broadcast-address, time-offset, routers,
domain-name, domain-name-servers, domain-search, host-name,
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
netbios-name-servers, netbios-scope, interface-mtu,
rfc3442-classless-static-routes, ntp-servers;
}
"""
)
fh.write(data)
# Write the GRUB configuration
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
with open(grubcfg_file, "w") as fh:
data = """# Written by the PVC provisioner
GRUB_DEFAULT=0
GRUB_TIMEOUT=1
GRUB_DISTRIBUTOR="PVC Virtual Machine"
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{root_disk} console=tty0 console=ttyS0,115200n8"
GRUB_CMDLINE_LINUX=""
GRUB_TERMINAL=console
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
GRUB_DISABLE_LINUX_UUID=false
""".format(
root_disk=root_disk["scsi_id"]
)
fh.write(data)
# Chroot, do some in-root tasks, then exit the chroot
with chroot_target(temporary_directory):
# Install and update GRUB
os.system(
"grub-install --force /dev/rbd/{}/{}_{}".format(
root_disk["pool"], vm_name, root_disk["disk_id"]
)
)
os.system("update-grub")
# Set a really dumb root password [TEMPORARY]
os.system("echo root:test123 | chpasswd")
# Enable cloud-init target on (first) boot
# NOTE: Your user-data should handle this and disable it once done, or things get messy.
# That cloud-init won't run without this hack seems like a bug... but even the official
# Debian cloud images are affected, so who knows.
os.system("systemctl enable cloud-init.target")
# Unmount the bound devfs
os.system("umount {}/dev".format(temporary_directory))
def cleanup(self):
"""
cleanup(): Perform any cleanup required due to prepare()/install()
It is important to now reverse *all* steps taken in those functions that might
need cleanup before teardown of the overlay chroot environment.
"""
temp_dir = "/tmp/target"
for volume in list(reversed(self.vm_data["volumes"])):
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
mount_path = f"{temp_dir}/{volume['mountpoint']}"
if (
volume.get("source_volume") is None
and volume.get("filesystem") is not None
):
# Unmount filesystem
retcode, stdout, stderr = pvc_common.run_os_command(
f"umount {mount_path}"
)
if retcode:
raise ProvisioningError(
f"Failed to unmount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
)
# Unmap volume
with open_zk(config) as zkhandler:
success, message = pvc_ceph.unmap_volume(
zkhandler,
volume["pool"],
dst_volume_name,
)
if not success:
raise ProvisioningError(
f"Failed to unmap '{mapped_dst_volume}': {stderr}"
)