Reorganize and add more comments to examples

This commit is contained in:
Joshua Boniface 2022-10-05 23:18:52 -04:00
parent 1e0b502250
commit 3a5d8c61da
5 changed files with 136 additions and 223 deletions

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# 1-noinstall.py - PVC Provisioner example script for noop install # 1-noop.py - PVC Provisioner example script for noop install
# Part of the Parallel Virtual Cluster (PVC) system # Part of the Parallel Virtual Cluster (PVC) system
# #
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me> # Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
@ -20,21 +20,24 @@
############################################################################### ###############################################################################
# This script provides an example of a PVC provisioner script. It will create a # This script provides an example of a PVC provisioner script. It will create a
# standard VM config but do no preparation/installation/cleanup (noop). # standard VM config but do no actual setup/prepare/install/cleanup (noop).
# This script can thus be used as an example or reference implementation of a # This script can thus be used as an example or reference implementation of a
# PVC provisioner script and expanded upon as required. # PVC provisioner script and expanded upon as required.
# *** READ THIS SCRIPT THOROUGHLY BEFORE USING TO UNDERSTAND HOW IT WORKS. ***
# The script must implement the class "VMBuilderScript" which extens "VMBuilder", # A script must implement the class "VMBuilderScript" which extends "VMBuilder",
# providing the 5 functions indicated. Detailed explanation of the role of each # providing the 5 functions indicated. Detailed explanation of the role of each
# function is provided. # function is provided in context of the example; see the other examples for
# more potential uses.
# Within the VMBuilderScript class, several common variables are exposed: # Within the VMBuilderScript class, several common variables are exposed through
# the parent VMBuilder class:
# self.vm_name: The name of the VM from PVC's perspective # self.vm_name: The name of the VM from PVC's perspective
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective # self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
# self.vm_uuid: An automatically-generated UUID for the VM # self.vm_uuid: An automatically-generated UUID for the VM
# self.vm_profile: The PVC provisioner profile name used for the VM # self.vm_profile: The PVC provisioner profile name used for the VM
# self.vm-data: A dictionary of VM data collected by the provisioner; an example: # self.vm_data: A dictionary of VM data collected by the provisioner; as an example:
# { # {
# "ceph_monitor_list": [ # "ceph_monitor_list": [
# "hv1.pvcstorage.tld", # "hv1.pvcstorage.tld",
@ -114,17 +117,31 @@
# } # }
# ] # ]
# } # }
#
# Any other information you may require must be obtained manually.
# WARNING:
#
# For safety reasons, the script runs in a modified chroot. It will have full access to
# the entire / (root partition) of the hypervisor, but read-only. In addition it has
# access to /dev, /sys, /run, and a fresh /tmp to write to; use /tmp/target (as
# convention) as the destination for any mounting of volumes and installation.
# Of course, in addition to this safety, it is VERY IMPORTANT to be aware that this
# script runs AS ROOT ON THE HYPERVISOR SYSTEM. You should never allow arbitrary,
# untrusted users the ability to add provisioning scripts even with this safeguard,
# since they could still do destructive things to /dev and the like!
from pvcapi.vmbuilder import VMBuilder, ProvisioningError # This import is always required here, as VMBuilder is used by the VMBuilderScript class
# and ProvisioningError is the primary exception that should be raised within the class.
from pvcapid.vmbuilder import VMBuilder, ProvisioningError
# The VMBuilderScript class must be named as such, and extend VMBuilder.
class VMBuilderScript(VMBuilder): class VMBuilderScript(VMBuilder):
def setup(self): def setup(self):
""" """
setup(): Perform special setup steps or validation before proceeding setup(): Perform special setup steps or validation before proceeding
Since we do no install in this example, it does nothing.
""" """
pass pass
@ -133,17 +150,22 @@ class VMBuilderScript(VMBuilder):
""" """
create(): Create the VM libvirt schema definition create(): Create the VM libvirt schema definition
This step *must* return a fully-formed Libvirt XML document as a string. This step *must* return a fully-formed Libvirt XML document as a string or the
provisioning task will fail.
This example leverages the built-in libvirt_schema objects provided by PVC; these This example leverages the built-in libvirt_schema objects provided by PVC; these
can be used as-is, or replaced with your own schema(s) on a per-script basis. can be used as-is, or replaced with your own schema(s) on a per-script basis.
Even though we noop the rest of the script, we still create a fully-formed libvirt
XML document here as a demonstration.
""" """
# Run any imports first # Run any imports first
import pvcapid.libvirt_schema as libvirt_schema
import datetime import datetime
import random import random
import pvcapid.libvirt_schema as libvirt_schema
# Create the empty schema document that we will append to and return at the end
schema = "" schema = ""
# Prepare a description based on the VM profile # Prepare a description based on the VM profile
@ -250,192 +272,24 @@ class VMBuilderScript(VMBuilder):
def prepare(self): def prepare(self):
""" """
prepare(): Prepare any disks/volumes for the install() step prepare(): Prepare any disks/volumes for the install() step
This function should use the various exposed PVC commands as indicated to create
block devices and map them to the host.
""" """
# Run any imports first pass
import os
from pvcapid.vmbuilder import open_zk
from pvcapid.Daemon import config
import daemon_lib.common as pvc_common
import daemon_lib.ceph as pvc_ceph
# First loop: Create the disks, either by cloning (pvc_ceph.clone_volume), or by
# new creation (pvc_ceph.add_volume).
for volume in self.vm_data["volumes"]:
if volume.get("source_volume") is not None:
with open_zk(config) as zkhandler:
success, message = pvc_ceph.clone_volume(
zkhandler,
volume["pool"],
volume["source_volume"],
f"{self.vm_name}_{volume['disk_id']}",
)
print(message)
if not success:
raise ProvisioningError(
f"Failed to clone volume '{volume['source_volume']}' to '{volume['disk_id']}'."
)
else:
with open_zk(config) as zkhandler:
success, message = pvc_ceph.add_volume(
zkhandler,
volume["pool"],
f"{self.vm_name}_{volume['disk_id']}",
f"{volume['disk_size_gb']}G",
)
print(message)
if not success:
raise ProvisioningError(
f"Failed to create volume '{volume['disk_id']}'."
)
# Second loop: Map the disks to the local system
for volume in self.vm_data["volumes"]:
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
with open_zk(config) as zkhandler:
success, message = pvc_ceph.map_volume(
zkhandler,
volume["pool"],
dst_volume_name,
)
print(message)
if not success:
raise ProvisioningError(f"Failed to map volume '{dst_volume}'.")
# Third loop: Create filesystems on the volumes
for volume in self.vm_data["volumes"]:
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
if volume.get("source_volume") is not None:
continue
if volume.get("filesystem") is None:
continue
filesystem_args_list = list()
for arg in volume["filesystem_args"].split():
arg_entry, *arg_data = arg.split("=")
arg_data = "=".join(arg_data)
filesystem_args_list.append(arg_entry)
filesystem_args_list.append(arg_data)
filesystem_args = " ".join(filesystem_args_list)
if volume["filesystem"] == "swap":
retcode, stdout, stderr = pvc_common.run_os_command(
f"mkswap -f /dev/rbd/{dst_volume}"
)
if retcode:
raise ProvisioningError(
f"Failed to create swap on '{dst_volume}': {stderr}"
)
else:
retcode, stdout, stderr = pvc_common.run_os_command(
f"mkfs.{volume['filesystem']} {filesystem_args} /dev/rbd/{dst_volume}"
)
if retcode:
raise ProvisioningError(
f"Faield to create {volume['filesystem']} file on '{dst_volume}': {stderr}"
)
print(stdout)
# Create a temporary directory to use during install
temp_dir = "/tmp/target"
if not os.path.isdir(temp_dir):
os.mkdir(temp_dir)
# Fourth loop: Mount the volumes to a set of temporary directories
for volume in self.vm_data["volumes"]:
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
if volume.get("source_volume") is not None:
continue
if volume.get("filesystem") is None:
continue
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
mount_path = f"{temp_dir}/{volume['mountpoint']}"
if not os.path.isdir(mount_path):
os.mkdir(mount_path)
# Mount filesystem
retcode, stdout, stderr = pvc_common.run_os_command(
f"mount {mapped_dst_volume} {mount_path}"
)
if retcode:
raise ProvisioningError(
f"Failed to mount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
)
def install(self): def install(self):
""" """
install(): Perform the installation install(): Perform the installation
Since this is a noop example, this step does nothing, aside from getting some
arguments for demonstration.
""" """
arguments = self.vm_data["script_arguments"]
if arguments.get("vm_fqdn"):
vm_fqdn = arguments.get("vm_fqdn")
else:
vm_fqdn = self.vm_name
pass pass
def cleanup(self): def cleanup(self):
""" """
cleanup(): Perform any cleanup required due to prepare()/install() cleanup(): Perform any cleanup required due to prepare()/install()
It is important to now reverse *all* steps taken in those functions that might This function is also called if there is ANY exception raised in the prepare()
need cleanup before teardown of the overlay chroot environment. or install() steps. While this doesn't mean you shouldn't or can't raise exceptions
here, be warned that doing so might cause loops. Do this only if you really need to.
""" """
# Run any imports first pass
from pvcapid.vmbuilder import open_zk
from pvcapid.Daemon import config
import daemon_lib.common as pvc_common
import daemon_lib.ceph as pvc_ceph
temp_dir = "/tmp/target"
for volume in list(reversed(self.vm_data["volumes"])):
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}"
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
mount_path = f"{temp_dir}/{volume['mountpoint']}"
if (
volume.get("source_volume") is None
and volume.get("filesystem") is not None
):
# Unmount filesystem
retcode, stdout, stderr = pvc_common.run_os_command(
f"umount {mount_path}"
)
if retcode:
raise ProvisioningError(
f"Failed to unmount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
)
# Unmap volume
with open_zk(config) as zkhandler:
success, message = pvc_ceph.unmap_volume(
zkhandler,
volume["pool"],
dst_volume_name,
)
if not success:
raise ProvisioningError(
f"Failed to unmap '{mapped_dst_volume}': {stderr}"
)

View File

@ -24,17 +24,20 @@
# This script can thus be used as an example or reference implementation of a # This script can thus be used as an example or reference implementation of a
# PVC provisioner script and expanded upon as required. # PVC provisioner script and expanded upon as required.
# *** READ THIS SCRIPT THOROUGHLY BEFORE USING TO UNDERSTAND HOW IT WORKS. ***
# The script must implement the class "VMBuilderScript" which extens "VMBuilder", # A script must implement the class "VMBuilderScript" which extends "VMBuilder",
# providing the 5 functions indicated. Detailed explanation of the role of each # providing the 5 functions indicated. Detailed explanation of the role of each
# function is provided. # function is provided in context of the example; see the other examples for
# more potential uses.
# Within the VMBuilderScript class, several common variables are exposed: # Within the VMBuilderScript class, several common variables are exposed through
# the parent VMBuilder class:
# self.vm_name: The name of the VM from PVC's perspective # self.vm_name: The name of the VM from PVC's perspective
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective # self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
# self.vm_uuid: An automatically-generated UUID for the VM # self.vm_uuid: An automatically-generated UUID for the VM
# self.vm_profile: The PVC provisioner profile name used for the VM # self.vm_profile: The PVC provisioner profile name used for the VM
# self.vm-data: A dictionary of VM data collected by the provisioner; an example: # self.vm_data: A dictionary of VM data collected by the provisioner; as an example:
# { # {
# "ceph_monitor_list": [ # "ceph_monitor_list": [
# "hv1.pvcstorage.tld", # "hv1.pvcstorage.tld",
@ -114,40 +117,67 @@
# } # }
# ] # ]
# } # }
#
# Any other information you may require must be obtained manually.
# WARNING:
#
# For safety reasons, the script runs in a modified chroot. It will have full access to
# the entire / (root partition) of the hypervisor, but read-only. In addition it has
# access to /dev, /sys, /run, and a fresh /tmp to write to; use /tmp/target (as
# convention) as the destination for any mounting of volumes and installation.
# Of course, in addition to this safety, it is VERY IMPORTANT to be aware that this
# script runs AS ROOT ON THE HYPERVISOR SYSTEM. You should never allow arbitrary,
# untrusted users the ability to add provisioning scripts even with this safeguard,
# since they could still do destructive things to /dev and the like!
# This import is always required here, as VMBuilder is used by the VMBuilderScript class
# and ProvisioningError is the primary exception that should be raised within the class.
from pvcapid.vmbuilder import VMBuilder, ProvisioningError from pvcapid.vmbuilder import VMBuilder, ProvisioningError
# The VMBuilderScript class must be named as such, and extend VMBuilder.
class VMBuilderScript(VMBuilder): class VMBuilderScript(VMBuilder):
def setup(self): def setup(self):
""" """
setup(): Perform special setup steps or validation before proceeding setup(): Perform special setup steps or validation before proceeding
This example uses the PVC built-in command runner to verify that debootstrap is
installed and throws and error if not.
Note that, due to the aforementioned chroot, you *cannot* install or otherwise
modify the hypervisor system here: any tooling, etc. must be pre-installed.
""" """
# Run any imports first # Run any imports first; as shown here, you can import anything from the PVC
# namespace, as well as (of course) the main Python namespaces
import daemon_lib.common as pvc_common import daemon_lib.common as pvc_common
# Ensure we have debootstrap intalled on the provisioner system # Ensure we have debootstrap intalled on the provisioner system
retcode, stdout, stderr = pvc_common.run_os_command(f"which debootstrap") retcode, stdout, stderr = pvc_common.run_os_command(f"which debootstrap")
if retcode: if retcode:
# Raise a ProvisioningError for any exception; the provisioner will handle
# this gracefully and properly, avoiding dangling mounts, RBD maps, etc.
raise ProvisioningError("Failed to find critical dependency: debootstrap") raise ProvisioningError("Failed to find critical dependency: debootstrap")
def create(self): def create(self):
""" """
create(): Create the VM libvirt schema definition create(): Create the VM libvirt schema definition
This step *must* return a fully-formed Libvirt XML document as a string. This step *must* return a fully-formed Libvirt XML document as a string or the
provisioning task will fail.
This example leverages the built-in libvirt_schema objects provided by PVC; these This example leverages the built-in libvirt_schema objects provided by PVC; these
can be used as-is, or replaced with your own schema(s) on a per-script basis. can be used as-is, or replaced with your own schema(s) on a per-script basis.
""" """
# Run any imports first # Run any imports first
import pvcapid.libvirt_schema as libvirt_schema
import datetime import datetime
import random import random
import pvcapid.libvirt_schema as libvirt_schema
# Create the empty schema document that we will append to and return at the end
schema = "" schema = ""
# Prepare a description based on the VM profile # Prepare a description based on the VM profile
@ -256,7 +286,12 @@ class VMBuilderScript(VMBuilder):
prepare(): Prepare any disks/volumes for the install() step prepare(): Prepare any disks/volumes for the install() step
This function should use the various exposed PVC commands as indicated to create This function should use the various exposed PVC commands as indicated to create
block devices and map them to the host. RBD block devices and map them to the host as required.
open_zk is exposed from pvcapid.vmbuilder to provide a context manager for opening
connections to the PVC Zookeeper cluster; ensure you also import (and pass it)
the config object from pvcapid.Daemon as well. This context manager then allows
the use of various common daemon library functions, without going through the API.
""" """
# Run any imports first # Run any imports first
@ -267,7 +302,7 @@ class VMBuilderScript(VMBuilder):
import daemon_lib.ceph as pvc_ceph import daemon_lib.ceph as pvc_ceph
# First loop: Create the disks, either by cloning (pvc_ceph.clone_volume), or by # First loop: Create the disks, either by cloning (pvc_ceph.clone_volume), or by
# new creation (pvc_ceph.add_volume). # new creation (pvc_ceph.add_volume), depending on the source_volume entry
for volume in self.vm_data["volumes"]: for volume in self.vm_data["volumes"]:
if volume.get("source_volume") is not None: if volume.get("source_volume") is not None:
with open_zk(config) as zkhandler: with open_zk(config) as zkhandler:
@ -386,15 +421,26 @@ class VMBuilderScript(VMBuilder):
""" """
install(): Perform the installation install(): Perform the installation
Since this is a noop example, this step does nothing, aside from getting some This example, unlike noop, performs a full debootstrap install and base config
arguments for demonstration. of a Debian-like system, including installing GRUB for fully-virtualized boot
(required by PVC) and cloud-init for later configuration with the PVC userdata
functionality, leveraging a PVC managed network on the first NIC for DHCP.
Several arguments are also supported; these can be set either in the provisioner
profile itself, or on the command line at runtime.
To show the options, this function does not use the previous PVC-exposed
run_os_command function, but instead just uses os.system. The downside here is
a lack of response and error handling, but the upside is simpler-to-read code.
Use whichever you feel is appropriate for your situation.
""" """
# Run any imports first # Run any imports first
import os import os
from pvcapid.vmbuilder import chroot from pvcapid.vmbuilder import chroot
# The directory we mounted things on earlier during prepare() # The directory we mounted things on earlier during prepare(); this could very well
# be exposed as a module-level variable if you so choose
temporary_directory = "/tmp/target" temporary_directory = "/tmp/target"
# Use these convenient aliases for later (avoiding lots of "self.vm_data" everywhere) # Use these convenient aliases for later (avoiding lots of "self.vm_data" everywhere)
@ -426,7 +472,7 @@ class VMBuilderScript(VMBuilder):
"wget", "wget",
] ]
# We need to know our root disk # We need to know our root disk for later GRUB-ing
root_disk = None root_disk = None
for volume in volumes: for volume in volumes:
if volume["mountpoint"] == "/": if volume["mountpoint"] == "/":
@ -444,7 +490,7 @@ class VMBuilderScript(VMBuilder):
) )
) )
# Bind mount the devfs # Bind mount the devfs so we can grub-install later
os.system("mount --bind /dev {}/dev".format(temporary_directory)) os.system("mount --bind /dev {}/dev".format(temporary_directory))
# Create an fstab entry for each volume # Create an fstab entry for each volume
@ -457,11 +503,12 @@ class VMBuilderScript(VMBuilder):
# which will always match the correct order from Libvirt (unlike sdX/vdX names). # which will always match the correct order from Libvirt (unlike sdX/vdX names).
volume_id = 0 volume_id = 0
for volume in volumes: for volume in volumes:
# We assume SSD-based/-like storage, and dislike atimes # We assume SSD-based/-like storage (because Ceph behaves this way), and dislike atimes
options = "defaults,discard,noatime,nodiratime" options = "defaults,discard,noatime,nodiratime"
# The root, var, and log volumes have specific values # The root, var, and log volumes have specific values
if volume["mountpoint"] == "/": if volume["mountpoint"] == "/":
# This will be used later by GRUB's cmdline
root_volume["scsi_id"] = volume_id root_volume["scsi_id"] = volume_id
dump = 0 dump = 0
cpass = 1 cpass = 1
@ -474,6 +521,10 @@ class VMBuilderScript(VMBuilder):
# Append the fstab line # Append the fstab line
with open(fstab_file, "a") as fh: with open(fstab_file, "a") as fh:
# Using these /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK entries guarantees
# proper ordering; /dev/sdX (or similar) names are NOT guaranteed to be
# in any order nor are they guaranteed to match the volume's sdX/vdX name
# when inside the VM due to Linux's quirks.
data = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{volume} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format( data = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{volume} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
volume=volume_id, volume=volume_id,
mountpoint=volume["mountpoint"], mountpoint=volume["mountpoint"],
@ -487,16 +538,18 @@ class VMBuilderScript(VMBuilder):
# Increment the volume_id # Increment the volume_id
volume_id += 1 volume_id += 1
# Write the hostname # Write the hostname; you could also take an FQDN argument for this as an example
hostname_file = "{}/etc/hostname".format(temporary_directory) hostname_file = "{}/etc/hostname".format(temporary_directory)
with open(hostname_file, "w") as fh: with open(hostname_file, "w") as fh:
fh.write("{}".format(vm_name)) fh.write("{}".format(vm_name))
# Fix the cloud-init.target since it's broken # Fix the cloud-init.target since it's broken by default in Debian 11
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format( cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
temporary_directory temporary_directory
) )
with open(cloudinit_target_file, "w") as fh: with open(cloudinit_target_file, "w") as fh:
# We lose our indent on these raw blocks to preserve the apperance of the files
# inside the VM itself
data = """[Install] data = """[Install]
WantedBy=multi-user.target WantedBy=multi-user.target
[Unit] [Unit]
@ -505,8 +558,8 @@ After=multi-user.target
""" """
fh.write(data) fh.write(data)
# NOTE: Due to device ordering within the Libvirt XML configuration, the first Ethernet interface # Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
# will always be on PCI bus ID 2, hence the name "ens2". # will always be on PCI bus ID 2, hence the name "ens2".
# Write a DHCP stanza for ens2 # Write a DHCP stanza for ens2
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format( ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(
temporary_directory temporary_directory
@ -520,25 +573,21 @@ iface ens2 inet dhcp
# Write the DHCP config for ens2 # Write the DHCP config for ens2
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory) dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
with open(dhclient_file, "w") as fh: with open(dhclient_file, "w") as fh:
data = ( # We can use fstrings too, since PVC will always have Python 3.6+, though
"""# DHCP client configuration # using format() might be preferable for clarity in some situations
data = f"""# DHCP client configuration
# Written by the PVC provisioner # Written by the PVC provisioner
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8; option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
interface "ens2" { interface "ens2" {
""" send fqdn.fqdn = "{vm_name}";
+ """ send fqdn.fqdn = "{hostname}"; send host-name = "{vm_name}";
send host-name = "{hostname}"; request subnet-mask, broadcast-address, time-offset, routers,
""".format(
hostname=vm_name
)
+ """ request subnet-mask, broadcast-address, time-offset, routers,
domain-name, domain-name-servers, domain-search, host-name, domain-name, domain-name-servers, domain-search, host-name,
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers, dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
netbios-name-servers, netbios-scope, interface-mtu, netbios-name-servers, netbios-scope, interface-mtu,
rfc3442-classless-static-routes, ntp-servers; rfc3442-classless-static-routes, ntp-servers;
} }
""" """
)
fh.write(data) fh.write(data)
# Write the GRUB configuration # Write the GRUB configuration
@ -558,7 +607,7 @@ GRUB_DISABLE_LINUX_UUID=false
) )
fh.write(data) fh.write(data)
# Chroot, do some in-root tasks, then exit the chroot # Do some tasks inside the chroot using the provided context manager
with chroot(temporary_directory): with chroot(temporary_directory):
# Install and update GRUB # Install and update GRUB
os.system( os.system(
@ -567,12 +616,16 @@ GRUB_DISABLE_LINUX_UUID=false
) )
) )
os.system("update-grub") os.system("update-grub")
# Set a really dumb root password [TEMPORARY]
# Set a really dumb root password so the VM can be debugged
# EITHER CHANGE THIS YOURSELF, here or in Userdata, or run something after install
# to change the root password: don't leave it like this on an Internet-facing machine!
os.system("echo root:test123 | chpasswd") os.system("echo root:test123 | chpasswd")
# Enable cloud-init target on (first) boot # Enable cloud-init target on (first) boot
# NOTE: Your user-data should handle this and disable it once done, or things get messy. # Your user-data should handle this and disable it once done, or things get messy.
# That cloud-init won't run without this hack seems like a bug... but even the official # That cloud-init won't run without this hack seems like a bug... but even the official
# Debian cloud images are affected, so who knows. # Debian cloud images are affected, so who knows.
os.system("systemctl enable cloud-init.target") os.system("systemctl enable cloud-init.target")
# Unmount the bound devfs # Unmount the bound devfs
@ -583,7 +636,11 @@ GRUB_DISABLE_LINUX_UUID=false
cleanup(): Perform any cleanup required due to prepare()/install() cleanup(): Perform any cleanup required due to prepare()/install()
It is important to now reverse *all* steps taken in those functions that might It is important to now reverse *all* steps taken in those functions that might
need cleanup before teardown of the overlay chroot environment. need cleanup before teardown of the upper chroot environment.
This function is also called if there is ANY exception raised in the prepare()
or install() steps. While this doesn't mean you shouldn't or can't raise exceptions
here, be warned that doing so might cause loops. Do this only if you really need to.
""" """
# Run any imports first # Run any imports first
@ -592,8 +649,10 @@ GRUB_DISABLE_LINUX_UUID=false
import daemon_lib.common as pvc_common import daemon_lib.common as pvc_common
import daemon_lib.ceph as pvc_ceph import daemon_lib.ceph as pvc_ceph
# Set the tempdir we used in the prepare() and install() steps
temp_dir = "/tmp/target" temp_dir = "/tmp/target"
# Use this construct for reversing the list, as the normal reverse() messes with the list
for volume in list(reversed(self.vm_data["volumes"])): for volume in list(reversed(self.vm_data["volumes"])):
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}" dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
dst_volume = f"{volume['pool']}/{dst_volume_name}" dst_volume = f"{volume['pool']}/{dst_volume_name}"
@ -620,7 +679,7 @@ GRUB_DISABLE_LINUX_UUID=false
volume["pool"], volume["pool"],
dst_volume_name, dst_volume_name,
) )
if not success: if not success:
raise ProvisioningError( raise ProvisioningError(
f"Failed to unmap '{mapped_dst_volume}': {stderr}" f"Failed to unmap '{mapped_dst_volume}': {stderr}"
) )