pvc/api-daemon/provisioner/examples/script/2-ova.py

421 lines
16 KiB
Python
Raw Permalink Normal View History

2022-10-05 16:03:05 -04:00
#!/usr/bin/env python3
2022-10-06 11:54:36 -04:00
# 2-ova.py - PVC Provisioner example script for OVA profile install
2022-10-05 16:03:05 -04:00
# Part of the Parallel Virtual Cluster (PVC) system
#
# Copyright (C) 2018-2024 Joshua M. Boniface <joshua@boniface.me>
2022-10-05 16:03:05 -04:00
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
###############################################################################
# This script provides an example of a PVC provisioner script. It will create a
2022-10-06 10:14:04 -04:00
# standard VM config suitable for the OVA profile, and prepare the disks, but do
# no additional install tasks (noop).
2022-10-05 16:03:05 -04:00
# This script can thus be used as an example or reference implementation of a
2022-10-06 10:14:04 -04:00
# PVC provisioner script and expanded upon as required.
# *** READ THIS SCRIPT THOROUGHLY BEFORE USING TO UNDERSTAND HOW IT WORKS. ***
2022-10-05 16:03:05 -04:00
2022-10-06 10:14:04 -04:00
# A script must implement the class "VMBuilderScript" which extends "VMBuilder",
2022-10-05 16:03:05 -04:00
# providing the 5 functions indicated. Detailed explanation of the role of each
2022-10-06 10:14:04 -04:00
# function is provided in context of the example; see the other examples for
# more potential uses.
2022-10-05 16:03:05 -04:00
# Within the VMBuilderScript class, several helper functions are exposed through
# the parent VMBuilder class:
# self.log_info(message):
# Use this function to log an "informational" message instead of "print()"
# self.log_warn(message):
# Use this function to log a "warning" message
# self.log_err(message):
# Use this function to log an "error" message outside of an exception (see below)
# self.fail(message, exception=<ExceptionClass>):
# Use this function to bail out of the script safely instead if raising a
# normal Python exception. You may pass an optional exception class keyword
# argument for posterity in the logs if you wish; otherwise, ProvisioningException
# is used. This function implicitly calls a "self.log_err" with the passed message
2022-10-06 10:14:04 -04:00
# Within the VMBuilderScript class, several common variables are exposed through
# the parent VMBuilder class:
2022-10-05 16:03:05 -04:00
# self.vm_name: The name of the VM from PVC's perspective
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
# self.vm_uuid: An automatically-generated UUID for the VM
# self.vm_profile: The PVC provisioner profile name used for the VM
2022-10-06 10:14:04 -04:00
# self.vm_data: A dictionary of VM data collected by the provisioner; as an example:
2022-10-05 16:03:05 -04:00
# {
# "ceph_monitor_list": [
# "hv1.pvcstorage.tld",
# "hv2.pvcstorage.tld",
# "hv3.pvcstorage.tld"
# ],
# "ceph_monitor_port": "6789",
# "ceph_monitor_secret": "96721723-8650-4a72-b8f6-a93cd1a20f0c",
# "mac_template": null,
# "networks": [
# {
# "eth_bridge": "vmbr1001",
# "id": 72,
# "network_template": 69,
# "vni": "1001"
# },
# {
# "eth_bridge": "vmbr101",
# "id": 73,
# "network_template": 69,
# "vni": "101"
# }
# ],
# "script": [contents of this file]
# "script_arguments": {
# "deb_mirror": "http://ftp.debian.org/debian",
# "deb_release": "bullseye"
# },
# "system_architecture": "x86_64",
# "system_details": {
# "id": 78,
# "migration_method": "live",
# "name": "small",
# "node_autostart": false,
# "node_limit": null,
# "node_selector": null,
# "ova": null,
# "serial": true,
# "vcpu_count": 2,
# "vnc": false,
# "vnc_bind": null,
# "vram_mb": 2048
# },
# "volumes": [
# {
# "disk_id": "sda",
# "disk_size_gb": 4,
# "filesystem": "ext4",
# "filesystem_args": "-L=root",
# "id": 9,
# "mountpoint": "/",
# "pool": "vms",
# "source_volume": null,
# "storage_template": 67
# },
# {
# "disk_id": "sdb",
# "disk_size_gb": 4,
# "filesystem": "ext4",
# "filesystem_args": "-L=var",
# "id": 10,
# "mountpoint": "/var",
# "pool": "vms",
# "source_volume": null,
# "storage_template": 67
# },
# {
# "disk_id": "sdc",
# "disk_size_gb": 4,
# "filesystem": "ext4",
# "filesystem_args": "-L=log",
# "id": 11,
# "mountpoint": "/var/log",
# "pool": "vms",
# "source_volume": null,
# "storage_template": 67
# }
# ]
# }
2022-10-06 10:14:04 -04:00
#
# Any other information you may require must be obtained manually.
# WARNING:
#
# For safety reasons, the script runs in a modified chroot. It will have full access to
# the entire / (root partition) of the hypervisor, but read-only. In addition it has
# access to /dev, /sys, /run, and a fresh /tmp to write to; use /tmp/target (as
# convention) as the destination for any mounting of volumes and installation.
# Of course, in addition to this safety, it is VERY IMPORTANT to be aware that this
# script runs AS ROOT ON THE HYPERVISOR SYSTEM. You should never allow arbitrary,
# untrusted users the ability to add provisioning scripts even with this safeguard,
# since they could still do destructive things to /dev and the like!
2022-10-05 16:03:05 -04:00
# This import is always required here, as VMBuilder is used by the VMBuilderScript class.
2023-12-09 12:18:33 -05:00
from daemon_lib.vmbuilder import VMBuilder
2022-10-05 16:03:05 -04:00
2022-10-06 10:14:04 -04:00
# The VMBuilderScript class must be named as such, and extend VMBuilder.
2022-10-05 16:03:05 -04:00
class VMBuilderScript(VMBuilder):
def setup(self):
"""
setup(): Perform special setup steps or validation before proceeding
2022-10-06 10:14:04 -04:00
Validate that we're actually an OVA profile.
2022-10-05 16:03:05 -04:00
"""
2022-10-06 10:14:04 -04:00
if self.vm_data.get("ova_details") is None:
raise ProvisioningError(
"Attempting to provision non-OVA profile with OVA script."
)
2022-10-05 16:03:05 -04:00
def create(self):
"""
create(): Create the VM libvirt schema definition
2022-10-06 10:14:04 -04:00
This step *must* return a fully-formed Libvirt XML document as a string or the
provisioning task will fail.
2022-10-05 16:03:05 -04:00
This example leverages the built-in libvirt_schema objects provided by PVC; these
can be used as-is, or replaced with your own schema(s) on a per-script basis.
"""
2022-10-06 10:14:04 -04:00
# Run any imports first
import daemon_lib.libvirt_schema as libvirt_schema
2022-10-06 10:14:04 -04:00
import datetime
import random
# Create the empty schema document that we will append to and return at the end
2022-10-05 16:03:05 -04:00
schema = ""
# Prepare a description based on the VM profile
2022-10-06 10:14:04 -04:00
description = f"PVC provisioner @ {datetime.datetime.now()}, profile '{self.vm_profile}', OVA '{self.vm_data['ova_details']['name']}'"
2022-10-05 16:03:05 -04:00
# Format the header
schema += libvirt_schema.libvirt_header.format(
vm_name=self.vm_name,
vm_uuid=self.vm_uuid,
vm_description=description,
vm_memory=self.vm_data["system_details"]["vram_mb"],
vm_vcpus=self.vm_data["system_details"]["vcpu_count"],
vm_architecture=self.vm_data["system_architecture"],
)
# Add the disk devices
monitor_list = self.vm_data["ceph_monitor_list"]
monitor_port = self.vm_data["ceph_monitor_port"]
monitor_secret = self.vm_data["ceph_monitor_secret"]
for volume in self.vm_data["volumes"]:
schema += libvirt_schema.devices_disk_header.format(
ceph_storage_secret=monitor_secret,
disk_pool=volume["pool"],
vm_name=self.vm_name,
disk_id=volume["disk_id"],
)
for monitor in monitor_list:
schema += libvirt_schema.devices_disk_coordinator.format(
coordinator_name=monitor,
coordinator_ceph_mon_port=monitor_port,
)
schema += libvirt_schema.devices_disk_footer
# Add the special vhostmd device for hypervisor information inside the VM
schema += libvirt_schema.devices_vhostmd
# Add the network devices
network_id = 0
for network in self.vm_data["networks"]:
2022-10-06 10:14:04 -04:00
vm_id_hex = "{:x}".format(int(self.vm_id % 16))
2022-10-05 16:03:05 -04:00
net_id_hex = "{:x}".format(int(network_id % 16))
if self.vm_data.get("mac_template") is not None:
mac_prefix = "52:54:01"
macgen_template = self.vm_data["mac_template"]
eth_macaddr = macgen_template.format(
prefix=mac_prefix, vmid=vm_id_hex, netid=net_id_hex
)
else:
mac_prefix = "52:54:00"
random_octet_A = "{:x}".format(random.randint(16, 238))
random_octet_B = "{:x}".format(random.randint(16, 238))
random_octet_C = "{:x}".format(random.randint(16, 238))
macgen_template = "{prefix}:{octetA}:{octetB}:{octetC}"
eth_macaddr = macgen_template.format(
prefix=mac_prefix,
octetA=random_octet_A,
octetB=random_octet_B,
octetC=random_octet_C,
)
schema += libvirt_schema.devices_net_interface.format(
eth_macaddr=eth_macaddr,
2022-10-06 10:14:04 -04:00
eth_bridge=network["eth_bridge"],
2022-10-05 16:03:05 -04:00
)
network_id += 1
# Add default devices
schema += libvirt_schema.devices_default
# Add serial device
if self.vm_data["system_details"]["serial"]:
schema += libvirt_schema.devices_serial.format(vm_name=self.vm_name)
# Add VNC device
if self.vm_data["system_details"]["vnc"]:
if self.vm_data["system_details"]["vnc_bind"]:
vm_vnc_bind = self.vm_data["system_details"]["vnc_bind"]
else:
vm_vnc_bind = "127.0.0.1"
vm_vncport = 5900
vm_vnc_autoport = "yes"
schema += libvirt_schema.devices_vnc.format(
vm_vncport=vm_vncport,
vm_vnc_autoport=vm_vnc_autoport,
vm_vnc_bind=vm_vnc_bind,
)
# Add SCSI controller
schema += libvirt_schema.devices_scsi_controller
# Add footer
schema += libvirt_schema.libvirt_footer
return schema
def prepare(self):
"""
prepare(): Prepare any disks/volumes for the install() step
"""
2022-10-06 10:14:04 -04:00
# Run any imports first
from daemon_lib.vmbuilder import open_zk
2023-12-09 12:22:19 -05:00
from pvcworkerd.Daemon import config
2022-10-06 10:14:04 -04:00
import daemon_lib.common as pvc_common
import daemon_lib.ceph as pvc_ceph
2022-10-19 13:07:34 -04:00
import os
2022-10-06 10:14:04 -04:00
# First loop: Create the destination disks
self.log_info("Creating destination disk volumes")
2022-10-05 16:03:05 -04:00
for volume in self.vm_data["volumes"]:
self.log_info(f"Processing volume {volume['volume_name']}")
2022-10-06 10:14:04 -04:00
with open_zk(config) as zkhandler:
success, message = pvc_ceph.add_volume(
zkhandler,
volume["pool"],
f"{self.vm_name}_{volume['disk_id']}",
f"{volume['disk_size_gb']}G",
)
self.log_info(message)
2022-10-06 10:14:04 -04:00
if not success:
self.fail(f"Failed to create volume '{volume['disk_id']}'.")
2022-10-05 16:03:05 -04:00
2022-10-06 10:14:04 -04:00
# Second loop: Map the destination disks
self.log_info("Mapping destination disk volumes")
2022-10-05 16:03:05 -04:00
for volume in self.vm_data["volumes"]:
self.log_info(f"Processing volume {volume['volume_name']}")
2022-10-05 16:03:05 -04:00
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
with open_zk(config) as zkhandler:
success, message = pvc_ceph.map_volume(
zkhandler,
volume["pool"],
dst_volume_name,
)
self.log_info(message)
2022-10-06 10:14:04 -04:00
if not success:
self.fail(f"Failed to map volume '{dst_volume}'.")
2022-10-05 16:03:05 -04:00
2022-10-06 10:14:04 -04:00
# Third loop: Map the source disks
self.log_info("Mapping source disk volumes")
2022-10-05 16:03:05 -04:00
for volume in self.vm_data["volumes"]:
self.log_info(f"Processing volume {volume['volume_name']}")
2022-10-06 10:14:04 -04:00
src_volume_name = volume["volume_name"]
src_volume = f"{volume['pool']}/{src_volume_name}"
2022-10-05 16:03:05 -04:00
2022-10-06 10:14:04 -04:00
with open_zk(config) as zkhandler:
success, message = pvc_ceph.map_volume(
zkhandler,
volume["pool"],
src_volume_name,
2022-10-05 16:03:05 -04:00
)
self.log_info(message)
2022-10-06 10:14:04 -04:00
if not success:
self.fail(f"Failed to map volume '{src_volume}'.")
2022-10-05 16:03:05 -04:00
def install(self):
"""
install(): Perform the installation
Convert the mapped source volumes to the mapped destination volumes
"""
# Run any imports first
import daemon_lib.common as pvc_common
2022-10-05 16:03:05 -04:00
for volume in self.vm_data["volumes"]:
2022-10-06 10:14:04 -04:00
src_volume_name = volume["volume_name"]
src_volume = f"{volume['pool']}/{src_volume_name}"
src_devpath = f"/dev/rbd/{src_volume}"
2022-10-05 16:03:05 -04:00
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
2022-10-06 10:14:04 -04:00
dst_devpath = f"/dev/rbd/{dst_volume}"
2022-10-05 16:03:05 -04:00
self.log_info(
2022-10-19 12:58:11 -04:00
f"Converting {volume['volume_format']} {src_volume} at {src_devpath} to {dst_volume} at {dst_devpath}"
)
2022-10-05 16:03:05 -04:00
retcode, stdout, stderr = pvc_common.run_os_command(
2022-10-06 10:14:04 -04:00
f"qemu-img convert -C -f {volume['volume_format']} -O raw {src_devpath} {dst_devpath}"
2022-10-05 16:03:05 -04:00
)
if retcode:
self.fail(
2022-10-06 10:14:04 -04:00
f"Failed to convert {volume['volume_format']} volume '{src_volume}' to raw volume '{dst_volume}' with qemu-img: {stderr}"
2022-10-05 16:03:05 -04:00
)
def cleanup(self):
"""
cleanup(): Perform any cleanup required due to prepare()/install()
2022-10-06 10:14:04 -04:00
This function is also called if there is ANY exception raised in the prepare()
or install() steps. While this doesn't mean you shouldn't or can't raise exceptions
here, be warned that doing so might cause loops. Do this only if you really need to!
2022-10-05 16:03:05 -04:00
"""
2022-10-19 13:07:34 -04:00
# Run any imports first
from daemon_lib.vmbuilder import open_zk
2023-12-09 12:22:19 -05:00
from pvcworkerd.Daemon import config
2022-10-19 13:10:40 -04:00
import daemon_lib.ceph as pvc_ceph
2022-10-19 13:07:34 -04:00
2022-10-06 10:14:04 -04:00
for volume in list(reversed(self.vm_data["volumes"])):
src_volume_name = volume["volume_name"]
src_volume = f"{volume['pool']}/{src_volume_name}"
src_devpath = f"/dev/rbd/{src_volume}"
with open_zk(config) as zkhandler:
success, message = pvc_ceph.unmap_volume(
zkhandler,
volume["pool"],
src_volume_name,
)
if not success:
self.log_err(
2022-10-06 10:14:04 -04:00
f"Failed to unmap source volume '{src_volume_name}': {message}"
)
2022-10-05 16:03:05 -04:00
for volume in list(reversed(self.vm_data["volumes"])):
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
2022-10-06 10:14:04 -04:00
dst_devpath = f"/dev/rbd/{dst_volume}"
with open_zk(config) as zkhandler:
success, message = pvc_ceph.unmap_volume(
zkhandler,
volume["pool"],
dst_volume_name,
2022-10-05 16:03:05 -04:00
)
2022-10-06 10:14:04 -04:00
if not success:
self.log_err(
2022-10-06 10:14:04 -04:00
f"Failed to unmap destination volume '{dst_volume_name}': {message}"
2022-10-05 16:03:05 -04:00
)