Implement new provisioner setup
This commit is contained in:
parent
f1df1cfe93
commit
4df70cf086
|
@ -0,0 +1,425 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# 1-noinstall.py - PVC Provisioner example script for noop install
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will create a
|
||||
# standard VM config but do no preparation/installation/cleanup (noop).
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required.
|
||||
|
||||
# The script must implement the class "VMBuilderScript" which extens "VMBuilder",
|
||||
# providing the 5 functions indicated. Detailed explanation of the role of each
|
||||
# function is provided.
|
||||
|
||||
# Within the VMBuilderScript class, several common variables are exposed:
|
||||
# self.vm_name: The name of the VM from PVC's perspective
|
||||
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
|
||||
# self.vm_uuid: An automatically-generated UUID for the VM
|
||||
# self.vm_profile: The PVC provisioner profile name used for the VM
|
||||
# self.vm-data: A dictionary of VM data collected by the provisioner; an example:
|
||||
# {
|
||||
# "ceph_monitor_list": [
|
||||
# "hv1.pvcstorage.tld",
|
||||
# "hv2.pvcstorage.tld",
|
||||
# "hv3.pvcstorage.tld"
|
||||
# ],
|
||||
# "ceph_monitor_port": "6789",
|
||||
# "ceph_monitor_secret": "96721723-8650-4a72-b8f6-a93cd1a20f0c",
|
||||
# "mac_template": null,
|
||||
# "networks": [
|
||||
# {
|
||||
# "eth_bridge": "vmbr1001",
|
||||
# "id": 72,
|
||||
# "network_template": 69,
|
||||
# "vni": "1001"
|
||||
# },
|
||||
# {
|
||||
# "eth_bridge": "vmbr101",
|
||||
# "id": 73,
|
||||
# "network_template": 69,
|
||||
# "vni": "101"
|
||||
# }
|
||||
# ],
|
||||
# "script": [contents of this file]
|
||||
# "script_arguments": {
|
||||
# "deb_mirror": "http://ftp.debian.org/debian",
|
||||
# "deb_release": "bullseye"
|
||||
# },
|
||||
# "system_architecture": "x86_64",
|
||||
# "system_details": {
|
||||
# "id": 78,
|
||||
# "migration_method": "live",
|
||||
# "name": "small",
|
||||
# "node_autostart": false,
|
||||
# "node_limit": null,
|
||||
# "node_selector": null,
|
||||
# "ova": null,
|
||||
# "serial": true,
|
||||
# "vcpu_count": 2,
|
||||
# "vnc": false,
|
||||
# "vnc_bind": null,
|
||||
# "vram_mb": 2048
|
||||
# },
|
||||
# "volumes": [
|
||||
# {
|
||||
# "disk_id": "sda",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=root",
|
||||
# "id": 9,
|
||||
# "mountpoint": "/",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdb",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=var",
|
||||
# "id": 10,
|
||||
# "mountpoint": "/var",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdc",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=log",
|
||||
# "id": 11,
|
||||
# "mountpoint": "/var/log",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
|
||||
|
||||
class VMBuilderScript(VMBuilder):
|
||||
def setup(self):
|
||||
"""
|
||||
setup(): Perform special setup steps or validation before proceeding
|
||||
|
||||
Since we do no install in this example, it does nothing.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
create(): Create the VM libvirt schema definition
|
||||
|
||||
This step *must* return a fully-formed Libvirt XML document as a string.
|
||||
|
||||
This example leverages the built-in libvirt_schema objects provided by PVC; these
|
||||
can be used as-is, or replaced with your own schema(s) on a per-script basis.
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
import datetime
|
||||
import random
|
||||
import pvcapid.libvirt_schema as libvirt_schema
|
||||
|
||||
schema = ""
|
||||
|
||||
# Prepare a description based on the VM profile
|
||||
description = (
|
||||
f"PVC provisioner @ {datetime.datetime.now()}, profile '{self.vm_profile}'"
|
||||
)
|
||||
|
||||
# Format the header
|
||||
schema += libvirt_schema.libvirt_header.format(
|
||||
vm_name=self.vm_name,
|
||||
vm_uuid=self.vm_uuid,
|
||||
vm_description=description,
|
||||
vm_memory=self.vm_data["system_details"]["vram_mb"],
|
||||
vm_vcpus=self.vm_data["system_details"]["vcpu_count"],
|
||||
vm_architecture=self.vm_data["system_architecture"],
|
||||
)
|
||||
|
||||
# Add the disk devices
|
||||
monitor_list = self.vm_data["ceph_monitor_list"]
|
||||
monitor_port = self.vm_data["ceph_monitor_port"]
|
||||
monitor_secret = self.vm_data["ceph_monitor_secret"]
|
||||
|
||||
for volume in self.vm_data["volumes"]:
|
||||
schema += libvirt_schema.devices_disk_header.format(
|
||||
ceph_storage_secret=monitor_secret,
|
||||
disk_pool=volume["pool"],
|
||||
vm_name=self.vm_name,
|
||||
disk_id=volume["disk_id"],
|
||||
)
|
||||
for monitor in monitor_list:
|
||||
schema += libvirt_schema.devices_disk_coordinator.format(
|
||||
coordinator_name=monitor,
|
||||
coordinator_ceph_mon_port=monitor_port,
|
||||
)
|
||||
schema += libvirt_schema.devices_disk_footer
|
||||
|
||||
# Add the special vhostmd device for hypervisor information inside the VM
|
||||
schema += libvirt_schema.devices_vhostmd
|
||||
|
||||
# Add the network devices
|
||||
network_id = 0
|
||||
for network in self.vm_data["networks"]:
|
||||
vm_id_hex = "{:x}".format(int(vm_id % 16))
|
||||
net_id_hex = "{:x}".format(int(network_id % 16))
|
||||
|
||||
if self.vm_data.get("mac_template") is not None:
|
||||
mac_prefix = "52:54:01"
|
||||
macgen_template = self.vm_data["mac_template"]
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix, vmid=vm_id_hex, netid=net_id_hex
|
||||
)
|
||||
else:
|
||||
mac_prefix = "52:54:00"
|
||||
random_octet_A = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_B = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_C = "{:x}".format(random.randint(16, 238))
|
||||
|
||||
macgen_template = "{prefix}:{octetA}:{octetB}:{octetC}"
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
octetA=random_octet_A,
|
||||
octetB=random_octet_B,
|
||||
octetC=random_octet_C,
|
||||
)
|
||||
|
||||
schema += libvirt_schema.devices_net_interface.format(
|
||||
eth_macaddr=eth_macaddr,
|
||||
eth_bridge=eth_bridge,
|
||||
)
|
||||
|
||||
network_id += 1
|
||||
|
||||
# Add default devices
|
||||
schema += libvirt_schema.devices_default
|
||||
|
||||
# Add serial device
|
||||
if self.vm_data["system_details"]["serial"]:
|
||||
schema += libvirt_schema.devices_serial.format(vm_name=self.vm_name)
|
||||
|
||||
# Add VNC device
|
||||
if self.vm_data["system_details"]["vnc"]:
|
||||
if self.vm_data["system_details"]["vnc_bind"]:
|
||||
vm_vnc_bind = self.vm_data["system_details"]["vnc_bind"]
|
||||
else:
|
||||
vm_vnc_bind = "127.0.0.1"
|
||||
|
||||
vm_vncport = 5900
|
||||
vm_vnc_autoport = "yes"
|
||||
|
||||
schema += libvirt_schema.devices_vnc.format(
|
||||
vm_vncport=vm_vncport,
|
||||
vm_vnc_autoport=vm_vnc_autoport,
|
||||
vm_vnc_bind=vm_vnc_bind,
|
||||
)
|
||||
|
||||
# Add SCSI controller
|
||||
schema += libvirt_schema.devices_scsi_controller
|
||||
|
||||
# Add footer
|
||||
schema += libvirt_schema.libvirt_footer
|
||||
|
||||
return schema
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
prepare(): Prepare any disks/volumes for the install() step
|
||||
|
||||
This function should use the various exposed PVC commands as indicated to create
|
||||
block devices and map them to the host.
|
||||
"""
|
||||
|
||||
# First loop: Create the disks, either by cloning (pvc_ceph.clone_volume), or by
|
||||
# new creation (pvc_ceph.add_volume).
|
||||
for volume in self.vm_data["volumes"]:
|
||||
if volume.get("source_volume") is not None:
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.clone_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
volume["source_volume"],
|
||||
f"{self.vm_name}_{volume['disk_id']}",
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to clone volume '{volume['source_volume']}' to '{volume['disk_id']}'."
|
||||
)
|
||||
else:
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.add_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
f"{self.vm_name}_{volume['disk_id']}",
|
||||
f"{volume['disk_size_gb']}G",
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to create volume '{volume['disk_id']}'."
|
||||
)
|
||||
|
||||
# Second loop: Map the disks to the local system
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.map_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
dst_volume_name,
|
||||
)
|
||||
print(message)
|
||||
if not retcode:
|
||||
raise ProvisioningError(f"Failed to map volume '{dst_volume}'.")
|
||||
|
||||
# Third loop: Create filesystems on the volumes
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
continue
|
||||
|
||||
filesystem_args_list = list()
|
||||
for arg in volume["filesystem_args"].split():
|
||||
arg_entry, *arg_data = arg.split("=")
|
||||
arg_data = "=".join(arg_data)
|
||||
filesystem_args_list.append(arg_entry)
|
||||
filesystem_args_list.append(arg_data)
|
||||
filesystem_args = " ".join(filesystem_args_list)
|
||||
|
||||
if volume["filesystem"] == "swap":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mkswap -f /dev/rbd/{dst_volume}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to create swap on '{dst_volume}': {stderr}"
|
||||
)
|
||||
else:
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mkfs.{volume['filesystem']} {filesystem_args} /dev/rbd/{dst_volume}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Faield to create {volume['filesystem']} file on '{dst_volume}': {stderr}"
|
||||
)
|
||||
|
||||
print(stdout)
|
||||
|
||||
# Create a temporary directory to use during install
|
||||
temp_dir = "/tmp/target"
|
||||
if not os.exists(temp_dir):
|
||||
os.mkdir(temp_dir)
|
||||
|
||||
# Fourth loop: Mount the volumes to a set of temporary directories
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
continue
|
||||
|
||||
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
|
||||
|
||||
mount_path = f"{temp_dir}/{volume['mountpoint']}"
|
||||
|
||||
if not os.exists(mount_path):
|
||||
os.mkdir(mount_path)
|
||||
|
||||
# Mount filesystem
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount {mapped_dst_volume} {mount_path}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
|
||||
)
|
||||
|
||||
def install(self):
|
||||
"""
|
||||
install(): Perform the installation
|
||||
|
||||
Since this is a noop example, this step does nothing, aside from getting some
|
||||
arguments for demonstration.
|
||||
"""
|
||||
|
||||
arguments = self.vm_data["script_arguments"]
|
||||
if arguments.get("vm_fqdn"):
|
||||
vm_fqdn = arguments.get("vm_fqdn")
|
||||
else:
|
||||
vm_fqdn = self.vm_name
|
||||
|
||||
pass
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
cleanup(): Perform any cleanup required due to prepare()/install()
|
||||
|
||||
It is important to now reverse *all* steps taken in those functions that might
|
||||
need cleanup before teardown of the overlay chroot environment.
|
||||
"""
|
||||
|
||||
temp_dir = "/tmp/target"
|
||||
|
||||
for volume in list(reversed(self.vm_data["volumes"])):
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
|
||||
mount_path = f"{temp_dir}/{volume['mountpoint']}"
|
||||
|
||||
if (
|
||||
volume.get("source_volume") is None
|
||||
and volume.get("filesystem") is not None
|
||||
):
|
||||
# Unmount filesystem
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"umount {mount_path}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to unmount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
|
||||
)
|
||||
|
||||
# Unmap volume
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.unmap_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
dst_volume_name,
|
||||
)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to unmap '{mapped_dst_volume}': {stderr}"
|
||||
)
|
|
@ -0,0 +1,602 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# 2-debootstrap.py - PVC Provisioner example script for debootstrap install
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will create a
|
||||
# standard VM config and install a Debian-like OS using debootstrap.
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required.
|
||||
|
||||
# The script must implement the class "VMBuilderScript" which extens "VMBuilder",
|
||||
# providing the 5 functions indicated. Detailed explanation of the role of each
|
||||
# function is provided.
|
||||
|
||||
# Within the VMBuilderScript class, several common variables are exposed:
|
||||
# self.vm_name: The name of the VM from PVC's perspective
|
||||
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
|
||||
# self.vm_uuid: An automatically-generated UUID for the VM
|
||||
# self.vm_profile: The PVC provisioner profile name used for the VM
|
||||
# self.vm-data: A dictionary of VM data collected by the provisioner; an example:
|
||||
# {
|
||||
# "ceph_monitor_list": [
|
||||
# "hv1.pvcstorage.tld",
|
||||
# "hv2.pvcstorage.tld",
|
||||
# "hv3.pvcstorage.tld"
|
||||
# ],
|
||||
# "ceph_monitor_port": "6789",
|
||||
# "ceph_monitor_secret": "96721723-8650-4a72-b8f6-a93cd1a20f0c",
|
||||
# "mac_template": null,
|
||||
# "networks": [
|
||||
# {
|
||||
# "eth_bridge": "vmbr1001",
|
||||
# "id": 72,
|
||||
# "network_template": 69,
|
||||
# "vni": "1001"
|
||||
# },
|
||||
# {
|
||||
# "eth_bridge": "vmbr101",
|
||||
# "id": 73,
|
||||
# "network_template": 69,
|
||||
# "vni": "101"
|
||||
# }
|
||||
# ],
|
||||
# "script": [contents of this file]
|
||||
# "script_arguments": {
|
||||
# "deb_mirror": "http://ftp.debian.org/debian",
|
||||
# "deb_release": "bullseye"
|
||||
# },
|
||||
# "system_architecture": "x86_64",
|
||||
# "system_details": {
|
||||
# "id": 78,
|
||||
# "migration_method": "live",
|
||||
# "name": "small",
|
||||
# "node_autostart": false,
|
||||
# "node_limit": null,
|
||||
# "node_selector": null,
|
||||
# "ova": null,
|
||||
# "serial": true,
|
||||
# "vcpu_count": 2,
|
||||
# "vnc": false,
|
||||
# "vnc_bind": null,
|
||||
# "vram_mb": 2048
|
||||
# },
|
||||
# "volumes": [
|
||||
# {
|
||||
# "disk_id": "sda",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=root",
|
||||
# "id": 9,
|
||||
# "mountpoint": "/",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdb",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=var",
|
||||
# "id": 10,
|
||||
# "mountpoint": "/var",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdc",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=log",
|
||||
# "id": 11,
|
||||
# "mountpoint": "/var/log",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
|
||||
|
||||
class VMBuilderScript(VMBuilder):
|
||||
def setup(self):
|
||||
"""
|
||||
setup(): Perform special setup steps or validation before proceeding
|
||||
"""
|
||||
|
||||
# Ensure we have debootstrap intalled on the provisioner system
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(f"which debootstrap")
|
||||
if retcode:
|
||||
raise ProvisioningError("Failed to find critical dependency: debootstrap")
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
create(): Create the VM libvirt schema definition
|
||||
|
||||
This step *must* return a fully-formed Libvirt XML document as a string.
|
||||
|
||||
This example leverages the built-in libvirt_schema objects provided by PVC; these
|
||||
can be used as-is, or replaced with your own schema(s) on a per-script basis.
|
||||
"""
|
||||
|
||||
# Run any imports first
|
||||
import datetime
|
||||
import random
|
||||
import pvcapid.libvirt_schema as libvirt_schema
|
||||
|
||||
schema = ""
|
||||
|
||||
# Prepare a description based on the VM profile
|
||||
description = (
|
||||
f"PVC provisioner @ {datetime.datetime.now()}, profile '{self.vm_profile}'"
|
||||
)
|
||||
|
||||
# Format the header
|
||||
schema += libvirt_schema.libvirt_header.format(
|
||||
vm_name=self.vm_name,
|
||||
vm_uuid=self.vm_uuid,
|
||||
vm_description=description,
|
||||
vm_memory=self.vm_data["system_details"]["vram_mb"],
|
||||
vm_vcpus=self.vm_data["system_details"]["vcpu_count"],
|
||||
vm_architecture=self.vm_data["system_architecture"],
|
||||
)
|
||||
|
||||
# Add the disk devices
|
||||
monitor_list = self.vm_data["ceph_monitor_list"]
|
||||
monitor_port = self.vm_data["ceph_monitor_port"]
|
||||
monitor_secret = self.vm_data["ceph_monitor_secret"]
|
||||
|
||||
for volume in self.vm_data["volumes"]:
|
||||
schema += libvirt_schema.devices_disk_header.format(
|
||||
ceph_storage_secret=monitor_secret,
|
||||
disk_pool=volume["pool"],
|
||||
vm_name=self.vm_name,
|
||||
disk_id=volume["disk_id"],
|
||||
)
|
||||
for monitor in monitor_list:
|
||||
schema += libvirt_schema.devices_disk_coordinator.format(
|
||||
coordinator_name=monitor,
|
||||
coordinator_ceph_mon_port=monitor_port,
|
||||
)
|
||||
schema += libvirt_schema.devices_disk_footer
|
||||
|
||||
# Add the special vhostmd device for hypervisor information inside the VM
|
||||
schema += libvirt_schema.devices_vhostmd
|
||||
|
||||
# Add the network devices
|
||||
network_id = 0
|
||||
for network in self.vm_data["networks"]:
|
||||
vm_id_hex = "{:x}".format(int(vm_id % 16))
|
||||
net_id_hex = "{:x}".format(int(network_id % 16))
|
||||
|
||||
if self.vm_data.get("mac_template") is not None:
|
||||
mac_prefix = "52:54:01"
|
||||
macgen_template = self.vm_data["mac_template"]
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix, vmid=vm_id_hex, netid=net_id_hex
|
||||
)
|
||||
else:
|
||||
mac_prefix = "52:54:00"
|
||||
random_octet_A = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_B = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_C = "{:x}".format(random.randint(16, 238))
|
||||
|
||||
macgen_template = "{prefix}:{octetA}:{octetB}:{octetC}"
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
octetA=random_octet_A,
|
||||
octetB=random_octet_B,
|
||||
octetC=random_octet_C,
|
||||
)
|
||||
|
||||
schema += libvirt_schema.devices_net_interface.format(
|
||||
eth_macaddr=eth_macaddr,
|
||||
eth_bridge=eth_bridge,
|
||||
)
|
||||
|
||||
network_id += 1
|
||||
|
||||
# Add default devices
|
||||
schema += libvirt_schema.devices_default
|
||||
|
||||
# Add serial device
|
||||
if self.vm_data["system_details"]["serial"]:
|
||||
schema += libvirt_schema.devices_serial.format(vm_name=self.vm_name)
|
||||
|
||||
# Add VNC device
|
||||
if self.vm_data["system_details"]["vnc"]:
|
||||
if self.vm_data["system_details"]["vnc_bind"]:
|
||||
vm_vnc_bind = self.vm_data["system_details"]["vnc_bind"]
|
||||
else:
|
||||
vm_vnc_bind = "127.0.0.1"
|
||||
|
||||
vm_vncport = 5900
|
||||
vm_vnc_autoport = "yes"
|
||||
|
||||
schema += libvirt_schema.devices_vnc.format(
|
||||
vm_vncport=vm_vncport,
|
||||
vm_vnc_autoport=vm_vnc_autoport,
|
||||
vm_vnc_bind=vm_vnc_bind,
|
||||
)
|
||||
|
||||
# Add SCSI controller
|
||||
schema += libvirt_schema.devices_scsi_controller
|
||||
|
||||
# Add footer
|
||||
schema += libvirt_schema.libvirt_footer
|
||||
|
||||
return schema
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
prepare(): Prepare any disks/volumes for the install() step
|
||||
|
||||
This function should use the various exposed PVC commands as indicated to create
|
||||
block devices and map them to the host.
|
||||
"""
|
||||
|
||||
# First loop: Create the disks, either by cloning (pvc_ceph.clone_volume), or by
|
||||
# new creation (pvc_ceph.add_volume).
|
||||
for volume in self.vm_data["volumes"]:
|
||||
if volume.get("source_volume") is not None:
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.clone_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
volume["source_volume"],
|
||||
f"{self.vm_name}_{volume['disk_id']}",
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to clone volume '{volume['source_volume']}' to '{volume['disk_id']}'."
|
||||
)
|
||||
else:
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.add_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
f"{self.vm_name}_{volume['disk_id']}",
|
||||
f"{volume['disk_size_gb']}G",
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to create volume '{volume['disk_id']}'."
|
||||
)
|
||||
|
||||
# Second loop: Map the disks to the local system
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.map_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
dst_volume_name,
|
||||
)
|
||||
print(message)
|
||||
if not retcode:
|
||||
raise ProvisioningError(f"Failed to map volume '{dst_volume}'.")
|
||||
|
||||
# Third loop: Create filesystems on the volumes
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
continue
|
||||
|
||||
filesystem_args_list = list()
|
||||
for arg in volume["filesystem_args"].split():
|
||||
arg_entry, *arg_data = arg.split("=")
|
||||
arg_data = "=".join(arg_data)
|
||||
filesystem_args_list.append(arg_entry)
|
||||
filesystem_args_list.append(arg_data)
|
||||
filesystem_args = " ".join(filesystem_args_list)
|
||||
|
||||
if volume["filesystem"] == "swap":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mkswap -f /dev/rbd/{dst_volume}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to create swap on '{dst_volume}': {stderr}"
|
||||
)
|
||||
else:
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mkfs.{volume['filesystem']} {filesystem_args} /dev/rbd/{dst_volume}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Faield to create {volume['filesystem']} file on '{dst_volume}': {stderr}"
|
||||
)
|
||||
|
||||
print(stdout)
|
||||
|
||||
# Create a temporary directory to use during install
|
||||
temp_dir = "/tmp/target"
|
||||
if not os.exists(temp_dir):
|
||||
os.mkdir(temp_dir)
|
||||
|
||||
# Fourth loop: Mount the volumes to a set of temporary directories
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
continue
|
||||
|
||||
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
|
||||
|
||||
mount_path = f"{temp_dir}/{volume['mountpoint']}"
|
||||
|
||||
if not os.exists(mount_path):
|
||||
os.mkdir(mount_path)
|
||||
|
||||
# Mount filesystem
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount {mapped_dst_volume} {mount_path}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
|
||||
)
|
||||
|
||||
def install(self):
|
||||
"""
|
||||
install(): Perform the installation
|
||||
|
||||
Since this is a noop example, this step does nothing, aside from getting some
|
||||
arguments for demonstration.
|
||||
"""
|
||||
|
||||
# The directory we mounted things on earlier during prepare()
|
||||
temporary_directory = "/tmp/target"
|
||||
|
||||
# Use these convenient aliases for later (avoiding lots of "self.vm_data" everywhere)
|
||||
vm_name = self.vm_name
|
||||
disks = self.vm_data["disks"]
|
||||
networks = self.vm_data["networks"]
|
||||
|
||||
# Parse these arguments out of self.vm_data["script_arguments"]
|
||||
if self.vm_data["script_arguments"].get("deb_release") is not None:
|
||||
deb_release = self.vm_data["script_arguments"].get("deb_release")
|
||||
else:
|
||||
deb_release = "stable"
|
||||
|
||||
if self.vm_data["script_arguments"].get("deb_mirror") is not None:
|
||||
deb_mirror = self.vm_data["script_arguments"].get("deb_mirror")
|
||||
else:
|
||||
deb_mirror = "http://ftp.debian.org/debian"
|
||||
|
||||
if self.vm_data["script_arguments"].get("deb_packages") is not None:
|
||||
deb_packages = (
|
||||
self.vm_data["script_arguments"].get("deb_packages").split(",")
|
||||
)
|
||||
else:
|
||||
deb_packages = [
|
||||
"linux-image-amd64",
|
||||
"grub-pc",
|
||||
"cloud-init",
|
||||
"python3-cffi-backend",
|
||||
"wget",
|
||||
]
|
||||
|
||||
# We need to know our root disk
|
||||
root_disk = None
|
||||
for disk in disks:
|
||||
if disk["mountpoint"] == "/":
|
||||
root_disk = disk
|
||||
if not root_disk:
|
||||
raise ProvisioningError("Failed to find root disk in disks list")
|
||||
|
||||
# Perform a deboostrap installation
|
||||
os.system(
|
||||
"debootstrap --include={pkgs} {suite} {target} {mirror}".format(
|
||||
suite=deb_release,
|
||||
target=temporary_directory,
|
||||
mirror=deb_mirror,
|
||||
pkgs=",".join(deb_packages),
|
||||
)
|
||||
)
|
||||
|
||||
# Bind mount the devfs
|
||||
os.system("mount --bind /dev {}/dev".format(temporary_directory))
|
||||
|
||||
# Create an fstab entry for each disk
|
||||
fstab_file = "{}/etc/fstab".format(temporary_directory)
|
||||
# The disk ID starts at zero and increments by one for each disk in the fixed-order
|
||||
# disk list. This lets us work around the insanity of Libvirt IDs not matching guest IDs,
|
||||
# while still letting us have some semblance of control here without enforcing things
|
||||
# like labels. It increments in the for loop below at the end of each iteration, and is
|
||||
# used to craft a /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-X device ID
|
||||
# which will always match the correct order from Libvirt (unlike sdX/vdX names).
|
||||
disk_id = 0
|
||||
for disk in disks:
|
||||
# We assume SSD-based/-like storage, and dislike atimes
|
||||
options = "defaults,discard,noatime,nodiratime"
|
||||
|
||||
# The root, var, and log volumes have specific values
|
||||
if disk["mountpoint"] == "/":
|
||||
root_disk["scsi_id"] = disk_id
|
||||
dump = 0
|
||||
cpass = 1
|
||||
elif disk["mountpoint"] == "/var" or disk["mountpoint"] == "/var/log":
|
||||
dump = 0
|
||||
cpass = 2
|
||||
else:
|
||||
dump = 0
|
||||
cpass = 0
|
||||
|
||||
# Append the fstab line
|
||||
with open(fstab_file, "a") as fh:
|
||||
data = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{disk} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
|
||||
disk=disk_id,
|
||||
mountpoint=disk["mountpoint"],
|
||||
filesystem=disk["filesystem"],
|
||||
options=options,
|
||||
dump=dump,
|
||||
cpass=cpass,
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Increment the disk_id
|
||||
disk_id += 1
|
||||
|
||||
# Write the hostname
|
||||
hostname_file = "{}/etc/hostname".format(temporary_directory)
|
||||
with open(hostname_file, "w") as fh:
|
||||
fh.write("{}".format(vm_name))
|
||||
|
||||
# Fix the cloud-init.target since it's broken
|
||||
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
|
||||
temporary_directory
|
||||
)
|
||||
with open(cloudinit_target_file, "w") as fh:
|
||||
data = """[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=Cloud-init target
|
||||
After=multi-user.target
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# NOTE: Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
|
||||
# will always be on PCI bus ID 2, hence the name "ens2".
|
||||
# Write a DHCP stanza for ens2
|
||||
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(
|
||||
temporary_directory
|
||||
)
|
||||
with open(ens2_network_file, "w") as fh:
|
||||
data = """auto ens2
|
||||
iface ens2 inet dhcp
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# Write the DHCP config for ens2
|
||||
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
|
||||
with open(dhclient_file, "w") as fh:
|
||||
data = (
|
||||
"""# DHCP client configuration
|
||||
# Written by the PVC provisioner
|
||||
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
|
||||
interface "ens2" {
|
||||
"""
|
||||
+ """ send fqdn.fqdn = "{hostname}";
|
||||
send host-name = "{hostname}";
|
||||
""".format(
|
||||
hostname=vm_name
|
||||
)
|
||||
+ """ request subnet-mask, broadcast-address, time-offset, routers,
|
||||
domain-name, domain-name-servers, domain-search, host-name,
|
||||
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
|
||||
netbios-name-servers, netbios-scope, interface-mtu,
|
||||
rfc3442-classless-static-routes, ntp-servers;
|
||||
}
|
||||
"""
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Write the GRUB configuration
|
||||
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
|
||||
with open(grubcfg_file, "w") as fh:
|
||||
data = """# Written by the PVC provisioner
|
||||
GRUB_DEFAULT=0
|
||||
GRUB_TIMEOUT=1
|
||||
GRUB_DISTRIBUTOR="PVC Virtual Machine"
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{root_disk} console=tty0 console=ttyS0,115200n8"
|
||||
GRUB_CMDLINE_LINUX=""
|
||||
GRUB_TERMINAL=console
|
||||
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
|
||||
GRUB_DISABLE_LINUX_UUID=false
|
||||
""".format(
|
||||
root_disk=root_disk["scsi_id"]
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Chroot, do some in-root tasks, then exit the chroot
|
||||
with chroot_target(temporary_directory):
|
||||
# Install and update GRUB
|
||||
os.system(
|
||||
"grub-install --force /dev/rbd/{}/{}_{}".format(
|
||||
root_disk["pool"], vm_name, root_disk["disk_id"]
|
||||
)
|
||||
)
|
||||
os.system("update-grub")
|
||||
# Set a really dumb root password [TEMPORARY]
|
||||
os.system("echo root:test123 | chpasswd")
|
||||
# Enable cloud-init target on (first) boot
|
||||
# NOTE: Your user-data should handle this and disable it once done, or things get messy.
|
||||
# That cloud-init won't run without this hack seems like a bug... but even the official
|
||||
# Debian cloud images are affected, so who knows.
|
||||
os.system("systemctl enable cloud-init.target")
|
||||
|
||||
# Unmount the bound devfs
|
||||
os.system("umount {}/dev".format(temporary_directory))
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
cleanup(): Perform any cleanup required due to prepare()/install()
|
||||
|
||||
It is important to now reverse *all* steps taken in those functions that might
|
||||
need cleanup before teardown of the overlay chroot environment.
|
||||
"""
|
||||
|
||||
temp_dir = "/tmp/target"
|
||||
|
||||
for volume in list(reversed(self.vm_data["volumes"])):
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
|
||||
mount_path = f"{temp_dir}/{volume['mountpoint']}"
|
||||
|
||||
if (
|
||||
volume.get("source_volume") is None
|
||||
and volume.get("filesystem") is not None
|
||||
):
|
||||
# Unmount filesystem
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"umount {mount_path}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to unmount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
|
||||
)
|
||||
|
||||
# Unmap volume
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.unmap_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
dst_volume_name,
|
||||
)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to unmap '{mapped_dst_volume}': {stderr}"
|
||||
)
|
|
@ -0,0 +1,604 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# 3-ova.py - PVC Provisioner example script for OVA install
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will create a
|
||||
# custom VM config based on an OVA profile.
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required; this specific script is
|
||||
# also hard-coded into a normal PVC provisioner system.
|
||||
|
||||
# The script must implement the class "VMBuilderScript" which extens "VMBuilder",
|
||||
# providing the 5 functions indicated. Detailed explanation of the role of each
|
||||
# function is provided.
|
||||
|
||||
# Within the VMBuilderScript class, several common variables are exposed:
|
||||
# self.vm_name: The name of the VM from PVC's perspective
|
||||
# self.vm_id: The VM ID (numerical component of the vm_name) from PVC's perspective
|
||||
# self.vm_uuid: An automatically-generated UUID for the VM
|
||||
# self.vm_profile: The PVC provisioner profile name used for the VM
|
||||
# self.vm-data: A dictionary of VM data collected by the provisioner; an example:
|
||||
# {
|
||||
# "ceph_monitor_list": [
|
||||
# "hv1.pvcstorage.tld",
|
||||
# "hv2.pvcstorage.tld",
|
||||
# "hv3.pvcstorage.tld"
|
||||
# ],
|
||||
# "ceph_monitor_port": "6789",
|
||||
# "ceph_monitor_secret": "96721723-8650-4a72-b8f6-a93cd1a20f0c",
|
||||
# "mac_template": null,
|
||||
# "networks": [
|
||||
# {
|
||||
# "eth_bridge": "vmbr1001",
|
||||
# "id": 72,
|
||||
# "network_template": 69,
|
||||
# "vni": "1001"
|
||||
# },
|
||||
# {
|
||||
# "eth_bridge": "vmbr101",
|
||||
# "id": 73,
|
||||
# "network_template": 69,
|
||||
# "vni": "101"
|
||||
# }
|
||||
# ],
|
||||
# "script": [contents of this file]
|
||||
# "script_arguments": {
|
||||
# "deb_mirror": "http://ftp.debian.org/debian",
|
||||
# "deb_release": "bullseye"
|
||||
# },
|
||||
# "system_architecture": "x86_64",
|
||||
# "system_details": {
|
||||
# "id": 78,
|
||||
# "migration_method": "live",
|
||||
# "name": "small",
|
||||
# "node_autostart": false,
|
||||
# "node_limit": null,
|
||||
# "node_selector": null,
|
||||
# "ova": null,
|
||||
# "serial": true,
|
||||
# "vcpu_count": 2,
|
||||
# "vnc": false,
|
||||
# "vnc_bind": null,
|
||||
# "vram_mb": 2048
|
||||
# },
|
||||
# "volumes": [
|
||||
# {
|
||||
# "disk_id": "sda",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=root",
|
||||
# "id": 9,
|
||||
# "mountpoint": "/",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdb",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=var",
|
||||
# "id": 10,
|
||||
# "mountpoint": "/var",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# },
|
||||
# {
|
||||
# "disk_id": "sdc",
|
||||
# "disk_size_gb": 4,
|
||||
# "filesystem": "ext4",
|
||||
# "filesystem_args": "-L=log",
|
||||
# "id": 11,
|
||||
# "mountpoint": "/var/log",
|
||||
# "pool": "vms",
|
||||
# "source_volume": null,
|
||||
# "storage_template": 67
|
||||
# }
|
||||
# ]
|
||||
# }
|
||||
|
||||
|
||||
# Run any imports first
|
||||
import datetime
|
||||
import random
|
||||
import pvcapid.libvirt_schema as libvirt_schema
|
||||
|
||||
|
||||
class VMBuilderScript(VMBuilder):
|
||||
def setup(self):
|
||||
"""
|
||||
setup(): Perform special setup steps or validation before proceeding
|
||||
"""
|
||||
|
||||
# Ensure we have debootstrap intalled on the provisioner system
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(f"which debootstrap")
|
||||
if retcode:
|
||||
raise ProvisioningError("Failed to find critical dependency: debootstrap")
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
create(): Create the VM libvirt schema definition
|
||||
|
||||
This step *must* return a fully-formed Libvirt XML document as a string.
|
||||
|
||||
This example leverages the built-in libvirt_schema objects provided by PVC; these
|
||||
can be used as-is, or replaced with your own schema(s) on a per-script basis.
|
||||
"""
|
||||
|
||||
schema = ""
|
||||
|
||||
# Prepare a description based on the VM profile
|
||||
description = (
|
||||
f"PVC provisioner @ {datetime.datetime.now()}, profile '{self.vm_profile}'"
|
||||
)
|
||||
|
||||
# Format the header
|
||||
schema += libvirt_schema.libvirt_header.format(
|
||||
vm_name=self.vm_name,
|
||||
vm_uuid=self.vm_uuid,
|
||||
vm_description=description,
|
||||
vm_memory=self.vm_data["system_details"]["vram_mb"],
|
||||
vm_vcpus=self.vm_data["system_details"]["vcpu_count"],
|
||||
vm_architecture=self.vm_data["system_architecture"],
|
||||
)
|
||||
|
||||
# Add the disk devices
|
||||
monitor_list = self.vm_data["ceph_monitor_list"]
|
||||
monitor_port = self.vm_data["ceph_monitor_port"]
|
||||
monitor_secret = self.vm_data["ceph_monitor_secret"]
|
||||
|
||||
for volume in self.vm_data["volumes"]:
|
||||
schema += libvirt_schema.devices_disk_header.format(
|
||||
ceph_storage_secret=monitor_secret,
|
||||
disk_pool=volume["pool"],
|
||||
vm_name=self.vm_name,
|
||||
disk_id=volume["disk_id"],
|
||||
)
|
||||
for monitor in monitor_list:
|
||||
schema += libvirt_schema.devices_disk_coordinator.format(
|
||||
coordinator_name=monitor,
|
||||
coordinator_ceph_mon_port=monitor_port,
|
||||
)
|
||||
schema += libvirt_schema.devices_disk_footer
|
||||
|
||||
# Add the special vhostmd device for hypervisor information inside the VM
|
||||
schema += libvirt_schema.devices_vhostmd
|
||||
|
||||
# Add the network devices
|
||||
network_id = 0
|
||||
for network in self.vm_data["networks"]:
|
||||
vm_id_hex = "{:x}".format(int(vm_id % 16))
|
||||
net_id_hex = "{:x}".format(int(network_id % 16))
|
||||
|
||||
if self.vm_data.get("mac_template") is not None:
|
||||
mac_prefix = "52:54:01"
|
||||
macgen_template = self.vm_data["mac_template"]
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix, vmid=vm_id_hex, netid=net_id_hex
|
||||
)
|
||||
else:
|
||||
mac_prefix = "52:54:00"
|
||||
random_octet_A = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_B = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_C = "{:x}".format(random.randint(16, 238))
|
||||
|
||||
macgen_template = "{prefix}:{octetA}:{octetB}:{octetC}"
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
octetA=random_octet_A,
|
||||
octetB=random_octet_B,
|
||||
octetC=random_octet_C,
|
||||
)
|
||||
|
||||
schema += libvirt_schema.devices_net_interface.format(
|
||||
eth_macaddr=eth_macaddr,
|
||||
eth_bridge=eth_bridge,
|
||||
)
|
||||
|
||||
network_id += 1
|
||||
|
||||
# Add default devices
|
||||
schema += libvirt_schema.devices_default
|
||||
|
||||
# Add serial device
|
||||
if self.vm_data["system_details"]["serial"]:
|
||||
schema += libvirt_schema.devices_serial.format(vm_name=self.vm_name)
|
||||
|
||||
# Add VNC device
|
||||
if self.vm_data["system_details"]["vnc"]:
|
||||
if self.vm_data["system_details"]["vnc_bind"]:
|
||||
vm_vnc_bind = self.vm_data["system_details"]["vnc_bind"]
|
||||
else:
|
||||
vm_vnc_bind = "127.0.0.1"
|
||||
|
||||
vm_vncport = 5900
|
||||
vm_vnc_autoport = "yes"
|
||||
|
||||
schema += libvirt_schema.devices_vnc.format(
|
||||
vm_vncport=vm_vncport,
|
||||
vm_vnc_autoport=vm_vnc_autoport,
|
||||
vm_vnc_bind=vm_vnc_bind,
|
||||
)
|
||||
|
||||
# Add SCSI controller
|
||||
schema += libvirt_schema.devices_scsi_controller
|
||||
|
||||
# Add footer
|
||||
schema += libvirt_schema.libvirt_footer
|
||||
|
||||
return schema
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
prepare(): Prepare any disks/volumes for the install() step
|
||||
|
||||
This function should use the various exposed PVC commands as indicated to create
|
||||
block devices and map them to the host.
|
||||
"""
|
||||
|
||||
# First loop: Create the disks, either by cloning (pvc_ceph.clone_volume), or by
|
||||
# new creation (pvc_ceph.add_volume).
|
||||
for volume in self.vm_data["volumes"]:
|
||||
if volume.get("source_volume") is not None:
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.clone_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
volume["source_volume"],
|
||||
f"{self.vm_name}_{volume['disk_id']}",
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to clone volume '{volume['source_volume']}' to '{volume['disk_id']}'."
|
||||
)
|
||||
else:
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.add_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
f"{self.vm_name}_{volume['disk_id']}",
|
||||
f"{volume['disk_size_gb']}G",
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to create volume '{volume['disk_id']}'."
|
||||
)
|
||||
|
||||
# Second loop: Map the disks to the local system
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.map_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
dst_volume_name,
|
||||
)
|
||||
print(message)
|
||||
if not retcode:
|
||||
raise ProvisioningError(f"Failed to map volume '{dst_volume}'.")
|
||||
|
||||
# Third loop: Create filesystems on the volumes
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
continue
|
||||
|
||||
filesystem_args_list = list()
|
||||
for arg in volume["filesystem_args"].split():
|
||||
arg_entry, *arg_data = arg.split("=")
|
||||
arg_data = "=".join(arg_data)
|
||||
filesystem_args_list.append(arg_entry)
|
||||
filesystem_args_list.append(arg_data)
|
||||
filesystem_args = " ".join(filesystem_args_list)
|
||||
|
||||
if volume["filesystem"] == "swap":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mkswap -f /dev/rbd/{dst_volume}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to create swap on '{dst_volume}': {stderr}"
|
||||
)
|
||||
else:
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mkfs.{volume['filesystem']} {filesystem_args} /dev/rbd/{dst_volume}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Faield to create {volume['filesystem']} file on '{dst_volume}': {stderr}"
|
||||
)
|
||||
|
||||
print(stdout)
|
||||
|
||||
# Create a temporary directory to use during install
|
||||
temp_dir = "/tmp/target"
|
||||
if not os.exists(temp_dir):
|
||||
os.mkdir(temp_dir)
|
||||
|
||||
# Fourth loop: Mount the volumes to a set of temporary directories
|
||||
for volume in self.vm_data["volumes"]:
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
continue
|
||||
|
||||
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
|
||||
|
||||
mount_path = f"{temp_dir}/{volume['mountpoint']}"
|
||||
|
||||
if not os.exists(mount_path):
|
||||
os.mkdir(mount_path)
|
||||
|
||||
# Mount filesystem
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount {mapped_dst_volume} {mount_path}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
|
||||
)
|
||||
|
||||
def install(self):
|
||||
"""
|
||||
install(): Perform the installation
|
||||
|
||||
Since this is a noop example, this step does nothing, aside from getting some
|
||||
arguments for demonstration.
|
||||
"""
|
||||
|
||||
# The directory we mounted things on earlier during prepare()
|
||||
temporary_directory = "/tmp/target"
|
||||
|
||||
# Use these convenient aliases for later (avoiding lots of "self.vm_data" everywhere)
|
||||
vm_name = self.vm_name
|
||||
disks = self.vm_data["disks"]
|
||||
networks = self.vm_data["networks"]
|
||||
|
||||
# Parse these arguments out of self.vm_data["script_arguments"]
|
||||
if self.vm_data["script_arguments"].get("deb_release") is not None:
|
||||
deb_release = self.vm_data["script_arguments"].get("deb_release")
|
||||
else:
|
||||
deb_release = "stable"
|
||||
|
||||
if self.vm_data["script_arguments"].get("deb_mirror") is not None:
|
||||
deb_mirror = self.vm_data["script_arguments"].get("deb_mirror")
|
||||
else:
|
||||
deb_mirror = "http://ftp.debian.org/debian"
|
||||
|
||||
if self.vm_data["script_arguments"].get("deb_packages") is not None:
|
||||
deb_packages = (
|
||||
self.vm_data["script_arguments"].get("deb_packages").split(",")
|
||||
)
|
||||
else:
|
||||
deb_packages = [
|
||||
"linux-image-amd64",
|
||||
"grub-pc",
|
||||
"cloud-init",
|
||||
"python3-cffi-backend",
|
||||
"wget",
|
||||
]
|
||||
|
||||
# We need to know our root disk
|
||||
root_disk = None
|
||||
for disk in disks:
|
||||
if disk["mountpoint"] == "/":
|
||||
root_disk = disk
|
||||
if not root_disk:
|
||||
raise ProvisioningError("Failed to find root disk in disks list")
|
||||
|
||||
# Perform a deboostrap installation
|
||||
os.system(
|
||||
"debootstrap --include={pkgs} {suite} {target} {mirror}".format(
|
||||
suite=deb_release,
|
||||
target=temporary_directory,
|
||||
mirror=deb_mirror,
|
||||
pkgs=",".join(deb_packages),
|
||||
)
|
||||
)
|
||||
|
||||
# Bind mount the devfs
|
||||
os.system("mount --bind /dev {}/dev".format(temporary_directory))
|
||||
|
||||
# Create an fstab entry for each disk
|
||||
fstab_file = "{}/etc/fstab".format(temporary_directory)
|
||||
# The disk ID starts at zero and increments by one for each disk in the fixed-order
|
||||
# disk list. This lets us work around the insanity of Libvirt IDs not matching guest IDs,
|
||||
# while still letting us have some semblance of control here without enforcing things
|
||||
# like labels. It increments in the for loop below at the end of each iteration, and is
|
||||
# used to craft a /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-X device ID
|
||||
# which will always match the correct order from Libvirt (unlike sdX/vdX names).
|
||||
disk_id = 0
|
||||
for disk in disks:
|
||||
# We assume SSD-based/-like storage, and dislike atimes
|
||||
options = "defaults,discard,noatime,nodiratime"
|
||||
|
||||
# The root, var, and log volumes have specific values
|
||||
if disk["mountpoint"] == "/":
|
||||
root_disk["scsi_id"] = disk_id
|
||||
dump = 0
|
||||
cpass = 1
|
||||
elif disk["mountpoint"] == "/var" or disk["mountpoint"] == "/var/log":
|
||||
dump = 0
|
||||
cpass = 2
|
||||
else:
|
||||
dump = 0
|
||||
cpass = 0
|
||||
|
||||
# Append the fstab line
|
||||
with open(fstab_file, "a") as fh:
|
||||
data = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{disk} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
|
||||
disk=disk_id,
|
||||
mountpoint=disk["mountpoint"],
|
||||
filesystem=disk["filesystem"],
|
||||
options=options,
|
||||
dump=dump,
|
||||
cpass=cpass,
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Increment the disk_id
|
||||
disk_id += 1
|
||||
|
||||
# Write the hostname
|
||||
hostname_file = "{}/etc/hostname".format(temporary_directory)
|
||||
with open(hostname_file, "w") as fh:
|
||||
fh.write("{}".format(vm_name))
|
||||
|
||||
# Fix the cloud-init.target since it's broken
|
||||
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
|
||||
temporary_directory
|
||||
)
|
||||
with open(cloudinit_target_file, "w") as fh:
|
||||
data = """[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=Cloud-init target
|
||||
After=multi-user.target
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# NOTE: Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
|
||||
# will always be on PCI bus ID 2, hence the name "ens2".
|
||||
# Write a DHCP stanza for ens2
|
||||
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(
|
||||
temporary_directory
|
||||
)
|
||||
with open(ens2_network_file, "w") as fh:
|
||||
data = """auto ens2
|
||||
iface ens2 inet dhcp
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# Write the DHCP config for ens2
|
||||
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
|
||||
with open(dhclient_file, "w") as fh:
|
||||
data = (
|
||||
"""# DHCP client configuration
|
||||
# Written by the PVC provisioner
|
||||
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
|
||||
interface "ens2" {
|
||||
"""
|
||||
+ """ send fqdn.fqdn = "{hostname}";
|
||||
send host-name = "{hostname}";
|
||||
""".format(
|
||||
hostname=vm_name
|
||||
)
|
||||
+ """ request subnet-mask, broadcast-address, time-offset, routers,
|
||||
domain-name, domain-name-servers, domain-search, host-name,
|
||||
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
|
||||
netbios-name-servers, netbios-scope, interface-mtu,
|
||||
rfc3442-classless-static-routes, ntp-servers;
|
||||
}
|
||||
"""
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Write the GRUB configuration
|
||||
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
|
||||
with open(grubcfg_file, "w") as fh:
|
||||
data = """# Written by the PVC provisioner
|
||||
GRUB_DEFAULT=0
|
||||
GRUB_TIMEOUT=1
|
||||
GRUB_DISTRIBUTOR="PVC Virtual Machine"
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{root_disk} console=tty0 console=ttyS0,115200n8"
|
||||
GRUB_CMDLINE_LINUX=""
|
||||
GRUB_TERMINAL=console
|
||||
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
|
||||
GRUB_DISABLE_LINUX_UUID=false
|
||||
""".format(
|
||||
root_disk=root_disk["scsi_id"]
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Chroot, do some in-root tasks, then exit the chroot
|
||||
with chroot_target(temporary_directory):
|
||||
# Install and update GRUB
|
||||
os.system(
|
||||
"grub-install --force /dev/rbd/{}/{}_{}".format(
|
||||
root_disk["pool"], vm_name, root_disk["disk_id"]
|
||||
)
|
||||
)
|
||||
os.system("update-grub")
|
||||
# Set a really dumb root password [TEMPORARY]
|
||||
os.system("echo root:test123 | chpasswd")
|
||||
# Enable cloud-init target on (first) boot
|
||||
# NOTE: Your user-data should handle this and disable it once done, or things get messy.
|
||||
# That cloud-init won't run without this hack seems like a bug... but even the official
|
||||
# Debian cloud images are affected, so who knows.
|
||||
os.system("systemctl enable cloud-init.target")
|
||||
|
||||
# Unmount the bound devfs
|
||||
os.system("umount {}/dev".format(temporary_directory))
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
cleanup(): Perform any cleanup required due to prepare()/install()
|
||||
|
||||
It is important to now reverse *all* steps taken in those functions that might
|
||||
need cleanup before teardown of the overlay chroot environment.
|
||||
"""
|
||||
|
||||
temp_dir = "/tmp/target"
|
||||
|
||||
for volume in list(reversed(self.vm_data["volumes"])):
|
||||
dst_volume_name = f"{self.vm_name}_{volume['disk_id']}"
|
||||
dst_volume = f"{volume['pool']}/{dst_volume_name}"
|
||||
mapped_dst_volume = f"/dev/rbd/{dst_volume}"
|
||||
mount_path = f"{temp_dir}/{volume['mountpoint']}"
|
||||
|
||||
if (
|
||||
volume.get("source_volume") is None
|
||||
and volume.get("filesystem") is not None
|
||||
):
|
||||
# Unmount filesystem
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"umount {mount_path}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to unmount '{mapped_dst_volume}' on '{mount_path}': {stderr}"
|
||||
)
|
||||
|
||||
# Unmap volume
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_ceph.unmap_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
dst_volume_name,
|
||||
)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
f"Failed to unmap '{mapped_dst_volume}': {stderr}"
|
||||
)
|
|
@ -1,248 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# debootstrap_script.py - PVC Provisioner example script for Debootstrap
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2021 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will install
|
||||
# a Debian system, of the release specified in the keyword argument `deb_release`
|
||||
# and from the mirror specified in the keyword argument `deb_mirror`, and
|
||||
# including the packages specified in the keyword argument `deb_packages` (a list
|
||||
# of strings, which is then joined together as a CSV and passed to debootstrap),
|
||||
# to the configured disks, configure fstab, and install GRUB. Any later config
|
||||
# should be done within the VM, for instance via cloud-init.
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required.
|
||||
|
||||
# This script will run under root privileges as the provisioner does. Be careful
|
||||
# with that.
|
||||
|
||||
import os
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
# Create a chroot context manager
|
||||
# This can be used later in the script to chroot to the destination directory
|
||||
# for instance to run commands within the target.
|
||||
@contextmanager
|
||||
def chroot_target(destination):
|
||||
try:
|
||||
real_root = os.open("/", os.O_RDONLY)
|
||||
os.chroot(destination)
|
||||
fake_root = os.open("/", os.O_RDONLY)
|
||||
os.fchdir(fake_root)
|
||||
yield
|
||||
finally:
|
||||
os.fchdir(real_root)
|
||||
os.chroot(".")
|
||||
os.fchdir(real_root)
|
||||
os.close(fake_root)
|
||||
os.close(real_root)
|
||||
del fake_root
|
||||
del real_root
|
||||
|
||||
|
||||
# Installation function - performs a debootstrap install of a Debian system
|
||||
# Note that the only arguments are keyword arguments.
|
||||
def install(**kwargs):
|
||||
# The provisioner has already mounted the disks on kwargs['temporary_directory'].
|
||||
# by this point, so we can get right to running the debootstrap after setting
|
||||
# some nicer variable names; you don't necessarily have to do this.
|
||||
vm_name = kwargs["vm_name"]
|
||||
temporary_directory = kwargs["temporary_directory"]
|
||||
disks = kwargs["disks"]
|
||||
networks = kwargs["networks"]
|
||||
# Our own required arguments. We should, though are not required to, handle
|
||||
# failures of these gracefully, should administrators forget to specify them.
|
||||
try:
|
||||
deb_release = kwargs["deb_release"]
|
||||
except Exception:
|
||||
deb_release = "stable"
|
||||
try:
|
||||
deb_mirror = kwargs["deb_mirror"]
|
||||
except Exception:
|
||||
deb_mirror = "http://ftp.debian.org/debian"
|
||||
try:
|
||||
deb_packages = kwargs["deb_packages"].split(",")
|
||||
except Exception:
|
||||
deb_packages = [
|
||||
"linux-image-amd64",
|
||||
"grub-pc",
|
||||
"cloud-init",
|
||||
"python3-cffi-backend",
|
||||
"wget",
|
||||
]
|
||||
|
||||
# We need to know our root disk
|
||||
root_disk = None
|
||||
for disk in disks:
|
||||
if disk["mountpoint"] == "/":
|
||||
root_disk = disk
|
||||
if not root_disk:
|
||||
return
|
||||
|
||||
# Ensure we have debootstrap intalled on the provisioner system; this is a
|
||||
# good idea to include if you plan to use anything that is not part of the
|
||||
# base Debian host system, just in case the provisioner host is not properly
|
||||
# configured already.
|
||||
os.system("apt-get install -y debootstrap")
|
||||
|
||||
# Perform a deboostrap installation
|
||||
os.system(
|
||||
"debootstrap --include={pkgs} {suite} {target} {mirror}".format(
|
||||
suite=deb_release,
|
||||
target=temporary_directory,
|
||||
mirror=deb_mirror,
|
||||
pkgs=",".join(deb_packages),
|
||||
)
|
||||
)
|
||||
|
||||
# Bind mount the devfs
|
||||
os.system("mount --bind /dev {}/dev".format(temporary_directory))
|
||||
|
||||
# Create an fstab entry for each disk
|
||||
fstab_file = "{}/etc/fstab".format(temporary_directory)
|
||||
# The disk ID starts at zero and increments by one for each disk in the fixed-order
|
||||
# disk list. This lets us work around the insanity of Libvirt IDs not matching guest IDs,
|
||||
# while still letting us have some semblance of control here without enforcing things
|
||||
# like labels. It increments in the for loop below at the end of each iteration, and is
|
||||
# used to craft a /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-X device ID
|
||||
# which will always match the correct order from Libvirt (unlike sdX/vdX names).
|
||||
disk_id = 0
|
||||
for disk in disks:
|
||||
# We assume SSD-based/-like storage, and dislike atimes
|
||||
options = "defaults,discard,noatime,nodiratime"
|
||||
|
||||
# The root, var, and log volumes have specific values
|
||||
if disk["mountpoint"] == "/":
|
||||
root_disk["scsi_id"] = disk_id
|
||||
dump = 0
|
||||
cpass = 1
|
||||
elif disk["mountpoint"] == "/var" or disk["mountpoint"] == "/var/log":
|
||||
dump = 0
|
||||
cpass = 2
|
||||
else:
|
||||
dump = 0
|
||||
cpass = 0
|
||||
|
||||
# Append the fstab line
|
||||
with open(fstab_file, "a") as fh:
|
||||
data = "/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{disk} {mountpoint} {filesystem} {options} {dump} {cpass}\n".format(
|
||||
disk=disk_id,
|
||||
mountpoint=disk["mountpoint"],
|
||||
filesystem=disk["filesystem"],
|
||||
options=options,
|
||||
dump=dump,
|
||||
cpass=cpass,
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Increment the disk_id
|
||||
disk_id += 1
|
||||
|
||||
# Write the hostname
|
||||
hostname_file = "{}/etc/hostname".format(temporary_directory)
|
||||
with open(hostname_file, "w") as fh:
|
||||
fh.write("{}".format(vm_name))
|
||||
|
||||
# Fix the cloud-init.target since it's broken
|
||||
cloudinit_target_file = "{}/etc/systemd/system/cloud-init.target".format(
|
||||
temporary_directory
|
||||
)
|
||||
with open(cloudinit_target_file, "w") as fh:
|
||||
data = """[Install]
|
||||
WantedBy=multi-user.target
|
||||
[Unit]
|
||||
Description=Cloud-init target
|
||||
After=multi-user.target
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# NOTE: Due to device ordering within the Libvirt XML configuration, the first Ethernet interface
|
||||
# will always be on PCI bus ID 2, hence the name "ens2".
|
||||
# Write a DHCP stanza for ens2
|
||||
ens2_network_file = "{}/etc/network/interfaces.d/ens2".format(temporary_directory)
|
||||
with open(ens2_network_file, "w") as fh:
|
||||
data = """auto ens2
|
||||
iface ens2 inet dhcp
|
||||
"""
|
||||
fh.write(data)
|
||||
|
||||
# Write the DHCP config for ens2
|
||||
dhclient_file = "{}/etc/dhcp/dhclient.conf".format(temporary_directory)
|
||||
with open(dhclient_file, "w") as fh:
|
||||
data = (
|
||||
"""# DHCP client configuration
|
||||
# Written by the PVC provisioner
|
||||
option rfc3442-classless-static-routes code 121 = array of unsigned integer 8;
|
||||
interface "ens2" {
|
||||
"""
|
||||
+ """ send fqdn.fqdn = "{hostname}";
|
||||
send host-name = "{hostname}";
|
||||
""".format(
|
||||
hostname=vm_name
|
||||
)
|
||||
+ """ request subnet-mask, broadcast-address, time-offset, routers,
|
||||
domain-name, domain-name-servers, domain-search, host-name,
|
||||
dhcp6.name-servers, dhcp6.domain-search, dhcp6.fqdn, dhcp6.sntp-servers,
|
||||
netbios-name-servers, netbios-scope, interface-mtu,
|
||||
rfc3442-classless-static-routes, ntp-servers;
|
||||
}
|
||||
"""
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Write the GRUB configuration
|
||||
grubcfg_file = "{}/etc/default/grub".format(temporary_directory)
|
||||
with open(grubcfg_file, "w") as fh:
|
||||
data = """# Written by the PVC provisioner
|
||||
GRUB_DEFAULT=0
|
||||
GRUB_TIMEOUT=1
|
||||
GRUB_DISTRIBUTOR="PVC Virtual Machine"
|
||||
GRUB_CMDLINE_LINUX_DEFAULT="root=/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-{root_disk} console=tty0 console=ttyS0,115200n8"
|
||||
GRUB_CMDLINE_LINUX=""
|
||||
GRUB_TERMINAL=console
|
||||
GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
|
||||
GRUB_DISABLE_LINUX_UUID=false
|
||||
""".format(
|
||||
root_disk=root_disk["scsi_id"]
|
||||
)
|
||||
fh.write(data)
|
||||
|
||||
# Chroot, do some in-root tasks, then exit the chroot
|
||||
with chroot_target(temporary_directory):
|
||||
# Install and update GRUB
|
||||
os.system(
|
||||
"grub-install --force /dev/rbd/{}/{}_{}".format(
|
||||
root_disk["pool"], vm_name, root_disk["disk_id"]
|
||||
)
|
||||
)
|
||||
os.system("update-grub")
|
||||
# Set a really dumb root password [TEMPORARY]
|
||||
os.system("echo root:test123 | chpasswd")
|
||||
# Enable cloud-init target on (first) boot
|
||||
# NOTE: Your user-data should handle this and disable it once done, or things get messy.
|
||||
# That cloud-init won't run without this hack seems like a bug... but even the official
|
||||
# Debian cloud images are affected, so who knows.
|
||||
os.system("systemctl enable cloud-init.target")
|
||||
|
||||
# Unmount the bound devfs
|
||||
os.system("umount {}/dev".format(temporary_directory))
|
||||
|
||||
# Everything else is done via cloud-init user-data
|
|
@ -1,43 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# dummy_script.py - PVC Provisioner example script for noop
|
||||
# Part of the Parallel Virtual Cluster (PVC) system
|
||||
#
|
||||
# Copyright (C) 2018-2021 Joshua M. Boniface <joshua@boniface.me>
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, version 3.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
# This script provides an example of a PVC provisioner script. It will do
|
||||
# nothing and return back to the provisioner without taking any action, and
|
||||
# expecting no special arguments.
|
||||
|
||||
# This script can thus be used as an example or reference implementation of a
|
||||
# PVC provisioner script and expanded upon as required.
|
||||
|
||||
# This script will run under root privileges as the provisioner does. Be careful
|
||||
# with that.
|
||||
|
||||
# Installation function - performs no actions then returns
|
||||
# Note that the only arguments are keyword arguments.
|
||||
def install(**kwargs):
|
||||
# The provisioner has already mounted the disks on kwargs['temporary_directory'].
|
||||
# by this point, so we can get right to running the debootstrap after setting
|
||||
# some nicer variable names; you don't necessarily have to do this.
|
||||
vm_name = kwargs["vm_name"]
|
||||
temporary_directory = kwargs["temporary_directory"]
|
||||
disks = kwargs["disks"]
|
||||
networks = kwargs["networks"]
|
||||
# No operation - this script just returns
|
||||
pass
|
|
@ -29,6 +29,7 @@ from pvcapid.Daemon import config, strtobool, API_VERSION
|
|||
|
||||
import pvcapid.helper as api_helper
|
||||
import pvcapid.provisioner as api_provisioner
|
||||
import pvcapid.vmbuilder as api_vmbuilder
|
||||
import pvcapid.benchmark as api_benchmark
|
||||
import pvcapid.ova as api_ova
|
||||
|
||||
|
@ -144,7 +145,7 @@ def Authenticator(function):
|
|||
def create_vm(
|
||||
self, vm_name, profile_name, define_vm=True, start_vm=True, script_run_args=[]
|
||||
):
|
||||
return api_provisioner.create_vm(
|
||||
return api_vmbuilder.create_vm(
|
||||
self,
|
||||
vm_name,
|
||||
profile_name,
|
||||
|
|
|
@ -19,23 +19,12 @@
|
|||
#
|
||||
###############################################################################
|
||||
|
||||
import json
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
import re
|
||||
|
||||
from pvcapid.Daemon import config, strtobool
|
||||
|
||||
from daemon_lib.zkhandler import ZKHandler
|
||||
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.node as pvc_node
|
||||
import daemon_lib.vm as pvc_vm
|
||||
import daemon_lib.network as pvc_network
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
import pvcapid.libvirt_schema as libvirt_schema
|
||||
|
||||
from pvcapid.ova import list_ova
|
||||
|
||||
|
||||
|
@ -1229,866 +1218,3 @@ def delete_profile(name):
|
|||
retcode = 400
|
||||
close_database(conn, cur)
|
||||
return retmsg, retcode
|
||||
|
||||
|
||||
#
|
||||
# Main VM provisioning function - executed by the Celery worker
|
||||
#
|
||||
def create_vm(
|
||||
self, vm_name, vm_profile, define_vm=True, start_vm=True, script_run_args=[]
|
||||
):
|
||||
# Runtime imports
|
||||
import time
|
||||
import importlib
|
||||
import uuid
|
||||
import datetime
|
||||
import random
|
||||
|
||||
temp_dir = None
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
print(
|
||||
"Starting provisioning of VM '{}' with profile '{}'".format(vm_name, vm_profile)
|
||||
)
|
||||
|
||||
# Phase 0 - connect to databases
|
||||
try:
|
||||
db_conn, db_cur = open_database(config)
|
||||
except Exception:
|
||||
raise ClusterError("Failed to connect to Postgres")
|
||||
|
||||
try:
|
||||
zkhandler = ZKHandler(config)
|
||||
zkhandler.connect()
|
||||
except Exception:
|
||||
raise ClusterError("Failed to connect to Zookeeper")
|
||||
|
||||
# Phase 1 - setup
|
||||
# * Get the profile elements
|
||||
# * Get the details from these elements
|
||||
# * Assemble a VM configuration dictionary
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 1, "total": 10, "status": "Collecting configuration"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
vm_id = re.findall(r"/(\d+)$/", vm_name)
|
||||
if not vm_id:
|
||||
vm_id = 0
|
||||
else:
|
||||
vm_id = vm_id[0]
|
||||
|
||||
vm_data = dict()
|
||||
|
||||
# Get the profile information
|
||||
query = "SELECT * FROM profile WHERE name = %s"
|
||||
args = (vm_profile,)
|
||||
db_cur.execute(query, args)
|
||||
profile_data = db_cur.fetchone()
|
||||
if profile_data.get("arguments"):
|
||||
vm_data["script_arguments"] = profile_data.get("arguments").split("|")
|
||||
else:
|
||||
vm_data["script_arguments"] = []
|
||||
|
||||
if profile_data.get("profile_type") == "ova":
|
||||
is_ova_install = True
|
||||
is_script_install = False # By definition
|
||||
else:
|
||||
is_ova_install = False
|
||||
|
||||
# Get the system details
|
||||
query = "SELECT * FROM system_template WHERE id = %s"
|
||||
args = (profile_data["system_template"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["system_details"] = db_cur.fetchone()
|
||||
|
||||
# Get the MAC template
|
||||
query = "SELECT mac_template FROM network_template WHERE id = %s"
|
||||
args = (profile_data["network_template"],)
|
||||
db_cur.execute(query, args)
|
||||
db_row = db_cur.fetchone()
|
||||
if db_row:
|
||||
vm_data["mac_template"] = db_row.get("mac_template")
|
||||
else:
|
||||
vm_data["mac_template"] = None
|
||||
|
||||
# Get the networks
|
||||
query = "SELECT * FROM network WHERE network_template = %s"
|
||||
args = (profile_data["network_template"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["networks"] = db_cur.fetchall()
|
||||
|
||||
# Get the storage volumes
|
||||
# ORDER BY ensures disks are always in the sdX/vdX order, regardless of add order
|
||||
query = "SELECT * FROM storage WHERE storage_template = %s ORDER BY disk_id"
|
||||
args = (profile_data["storage_template"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["volumes"] = db_cur.fetchall()
|
||||
|
||||
# Get the script
|
||||
query = "SELECT script FROM script WHERE id = %s"
|
||||
args = (profile_data["script"],)
|
||||
db_cur.execute(query, args)
|
||||
db_row = db_cur.fetchone()
|
||||
if db_row:
|
||||
vm_data["script"] = db_row.get("script")
|
||||
else:
|
||||
vm_data["script"] = None
|
||||
|
||||
if vm_data["script"] and not is_ova_install:
|
||||
is_script_install = True
|
||||
else:
|
||||
is_script_install = False
|
||||
|
||||
# Get the OVA details
|
||||
if is_ova_install:
|
||||
query = "SELECT * FROM ova WHERE id = %s"
|
||||
args = (profile_data["ova"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["ova_details"] = db_cur.fetchone()
|
||||
|
||||
query = "SELECT * FROM ova_volume WHERE ova = %s"
|
||||
args = (profile_data["ova"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["volumes"] = db_cur.fetchall()
|
||||
|
||||
close_database(db_conn, db_cur)
|
||||
|
||||
print(
|
||||
"VM configuration data:\n{}".format(
|
||||
json.dumps(vm_data, sort_keys=True, indent=2)
|
||||
)
|
||||
)
|
||||
|
||||
# Phase 2 - verification
|
||||
# * Ensure that at least one node has enough free RAM to hold the VM (becomes main host)
|
||||
# * Ensure that all networks are valid
|
||||
# * Ensure that there is enough disk space in the Ceph cluster for the disks
|
||||
# This is the "safe fail" step when an invalid configuration will be caught
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 2,
|
||||
"total": 10,
|
||||
"status": "Verifying configuration against cluster",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# Verify that a VM with this name does not already exist
|
||||
if pvc_vm.searchClusterByName(zkhandler, vm_name):
|
||||
raise ClusterError(
|
||||
"A VM with the name '{}' already exists in the cluster.".format(vm_name)
|
||||
)
|
||||
|
||||
# Verify that at least one host has enough free RAM to run the VM
|
||||
_discard, nodes = pvc_node.get_list(zkhandler, None)
|
||||
target_node = None
|
||||
last_free = 0
|
||||
for node in nodes:
|
||||
# Skip the node if it is not ready to run VMs
|
||||
if node["daemon_state"] != "run" or node["domain_state"] != "ready":
|
||||
continue
|
||||
# Skip the node if its free memory is less than the new VM's size, plus a 512MB buffer
|
||||
if node["memory"]["free"] < (vm_data["system_details"]["vram_mb"] + 512):
|
||||
continue
|
||||
# If this node has the most free, use it
|
||||
if node["memory"]["free"] > last_free:
|
||||
last_free = node["memory"]["free"]
|
||||
target_node = node["name"]
|
||||
# Raise if no node was found
|
||||
if not target_node:
|
||||
raise ClusterError(
|
||||
"No ready cluster node contains at least {}+512 MB of free RAM.".format(
|
||||
vm_data["system_details"]["vram_mb"]
|
||||
)
|
||||
)
|
||||
|
||||
print(
|
||||
'Selecting target node "{}" with "{}" MB free RAM'.format(
|
||||
target_node, last_free
|
||||
)
|
||||
)
|
||||
|
||||
# Verify that all configured networks are present on the cluster
|
||||
cluster_networks, _discard = pvc_network.getClusterNetworkList(zkhandler)
|
||||
for network in vm_data["networks"]:
|
||||
vni = str(network["vni"])
|
||||
if vni not in cluster_networks and vni not in [
|
||||
"upstream",
|
||||
"cluster",
|
||||
"storage",
|
||||
]:
|
||||
raise ClusterError(
|
||||
'The network VNI "{}" is not present on the cluster.'.format(vni)
|
||||
)
|
||||
|
||||
print("All configured networks for VM are valid")
|
||||
|
||||
# Verify that there is enough disk space free to provision all VM disks
|
||||
pools = dict()
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume.get("source_volume") is not None:
|
||||
volume_data = pvc_ceph.getVolumeInformation(
|
||||
zkhandler, volume["pool"], volume["source_volume"]
|
||||
)
|
||||
if not volume_data:
|
||||
raise ClusterError(
|
||||
"The source volume {}/{} could not be found.".format(
|
||||
volume["pool"], volume["source_volume"]
|
||||
)
|
||||
)
|
||||
if not volume["pool"] in pools:
|
||||
pools[volume["pool"]] = int(
|
||||
pvc_ceph.format_bytes_fromhuman(volume_data["stats"]["size"])
|
||||
/ 1024
|
||||
/ 1024
|
||||
/ 1024
|
||||
)
|
||||
else:
|
||||
pools[volume["pool"]] += int(
|
||||
pvc_ceph.format_bytes_fromhuman(volume_data["stats"]["size"])
|
||||
/ 1024
|
||||
/ 1024
|
||||
/ 1024
|
||||
)
|
||||
else:
|
||||
if not volume["pool"] in pools:
|
||||
pools[volume["pool"]] = volume["disk_size_gb"]
|
||||
else:
|
||||
pools[volume["pool"]] += volume["disk_size_gb"]
|
||||
|
||||
for pool in pools:
|
||||
try:
|
||||
pool_information = pvc_ceph.getPoolInformation(zkhandler, pool)
|
||||
if not pool_information:
|
||||
raise
|
||||
except Exception:
|
||||
raise ClusterError('Pool "{}" is not present on the cluster.'.format(pool))
|
||||
pool_free_space_gb = int(
|
||||
pool_information["stats"]["free_bytes"] / 1024 / 1024 / 1024
|
||||
)
|
||||
pool_vm_usage_gb = int(pools[pool])
|
||||
|
||||
if pool_vm_usage_gb >= pool_free_space_gb:
|
||||
raise ClusterError(
|
||||
'Pool "{}" has only {} GB free and VM requires {} GB.'.format(
|
||||
pool, pool_free_space_gb, pool_vm_usage_gb
|
||||
)
|
||||
)
|
||||
|
||||
print("There is enough space on cluster to store VM volumes")
|
||||
|
||||
if not is_ova_install:
|
||||
# Verify that every specified filesystem is valid
|
||||
used_filesystems = list()
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume["source_volume"] is not None:
|
||||
continue
|
||||
if volume["filesystem"] and volume["filesystem"] not in used_filesystems:
|
||||
used_filesystems.append(volume["filesystem"])
|
||||
|
||||
for filesystem in used_filesystems:
|
||||
if filesystem == "swap":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("which mkswap")
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to find binary for mkswap: {}".format(stderr)
|
||||
)
|
||||
else:
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"which mkfs.{}".format(filesystem)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to find binary for mkfs.{}: {}".format(
|
||||
filesystem, stderr
|
||||
)
|
||||
)
|
||||
|
||||
print("All selected filesystems are valid")
|
||||
|
||||
# Phase 3 - provisioning script preparation
|
||||
# * Import the provisioning script as a library with importlib
|
||||
# * Ensure the required function(s) are present
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 3, "total": 10, "status": "Preparing provisioning script"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if is_script_install:
|
||||
# Write the script out to a temporary file
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("mktemp")
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to create a temporary file: {}".format(stderr)
|
||||
)
|
||||
script_file = stdout.strip()
|
||||
with open(script_file, "w") as fh:
|
||||
fh.write(vm_data["script"])
|
||||
fh.write("\n")
|
||||
|
||||
# Import the script file
|
||||
loader = importlib.machinery.SourceFileLoader("installer_script", script_file)
|
||||
spec = importlib.util.spec_from_loader(loader.name, loader)
|
||||
installer_script = importlib.util.module_from_spec(spec)
|
||||
loader.exec_module(installer_script)
|
||||
|
||||
# Verify that the install() function is valid
|
||||
if "install" not in dir(installer_script):
|
||||
raise ProvisioningError(
|
||||
"Specified script does not contain an install() function."
|
||||
)
|
||||
|
||||
print("Provisioning script imported successfully")
|
||||
|
||||
# Phase 4 - configuration creation
|
||||
# * Create the libvirt XML configuration
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 4,
|
||||
"total": 10,
|
||||
"status": "Preparing Libvirt XML configuration",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Creating Libvirt configuration")
|
||||
|
||||
# Get information about VM
|
||||
vm_uuid = uuid.uuid4()
|
||||
vm_description = "PVC provisioner @ {}, profile '{}'".format(
|
||||
datetime.datetime.now(), vm_profile
|
||||
)
|
||||
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("uname -m")
|
||||
system_architecture = stdout.strip()
|
||||
|
||||
# Begin assembling libvirt schema
|
||||
vm_schema = ""
|
||||
|
||||
vm_schema += libvirt_schema.libvirt_header.format(
|
||||
vm_name=vm_name,
|
||||
vm_uuid=vm_uuid,
|
||||
vm_description=vm_description,
|
||||
vm_memory=vm_data["system_details"]["vram_mb"],
|
||||
vm_vcpus=vm_data["system_details"]["vcpu_count"],
|
||||
vm_architecture=system_architecture,
|
||||
)
|
||||
|
||||
# Add disk devices
|
||||
monitor_list = list()
|
||||
coordinator_names = config["storage_hosts"]
|
||||
for coordinator in coordinator_names:
|
||||
monitor_list.append("{}.{}".format(coordinator, config["storage_domain"]))
|
||||
|
||||
ceph_storage_secret = config["ceph_storage_secret_uuid"]
|
||||
|
||||
for volume in vm_data["volumes"]:
|
||||
vm_schema += libvirt_schema.devices_disk_header.format(
|
||||
ceph_storage_secret=ceph_storage_secret,
|
||||
disk_pool=volume["pool"],
|
||||
vm_name=vm_name,
|
||||
disk_id=volume["disk_id"],
|
||||
)
|
||||
for monitor in monitor_list:
|
||||
vm_schema += libvirt_schema.devices_disk_coordinator.format(
|
||||
coordinator_name=monitor,
|
||||
coordinator_ceph_mon_port=config["ceph_monitor_port"],
|
||||
)
|
||||
vm_schema += libvirt_schema.devices_disk_footer
|
||||
|
||||
vm_schema += libvirt_schema.devices_vhostmd
|
||||
|
||||
# Add network devices
|
||||
network_id = 0
|
||||
for network in vm_data["networks"]:
|
||||
vni = network["vni"]
|
||||
if vni in ["upstream", "cluster", "storage"]:
|
||||
eth_bridge = "br{}".format(vni)
|
||||
else:
|
||||
eth_bridge = "vmbr{}".format(vni)
|
||||
|
||||
vm_id_hex = "{:x}".format(int(vm_id % 16))
|
||||
net_id_hex = "{:x}".format(int(network_id % 16))
|
||||
|
||||
if vm_data.get("mac_template") is not None:
|
||||
mac_prefix = "52:54:01"
|
||||
macgen_template = vm_data["mac_template"]
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
vmid=vm_id_hex,
|
||||
netid=net_id_hex,
|
||||
)
|
||||
else:
|
||||
mac_prefix = "52:54:00"
|
||||
random_octet_A = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_B = "{:x}".format(random.randint(16, 238))
|
||||
random_octet_C = "{:x}".format(random.randint(16, 238))
|
||||
|
||||
macgen_template = "{prefix}:{octetA}:{octetB}:{octetC}"
|
||||
eth_macaddr = macgen_template.format(
|
||||
prefix=mac_prefix,
|
||||
octetA=random_octet_A,
|
||||
octetB=random_octet_B,
|
||||
octetC=random_octet_C,
|
||||
)
|
||||
|
||||
vm_schema += libvirt_schema.devices_net_interface.format(
|
||||
eth_macaddr=eth_macaddr, eth_bridge=eth_bridge
|
||||
)
|
||||
|
||||
network_id += 1
|
||||
|
||||
# Add default devices
|
||||
vm_schema += libvirt_schema.devices_default
|
||||
|
||||
# Add serial device
|
||||
if vm_data["system_details"]["serial"]:
|
||||
vm_schema += libvirt_schema.devices_serial.format(vm_name=vm_name)
|
||||
|
||||
# Add VNC device
|
||||
if vm_data["system_details"]["vnc"]:
|
||||
if vm_data["system_details"]["vnc_bind"]:
|
||||
vm_vnc_bind = vm_data["system_details"]["vnc_bind"]
|
||||
else:
|
||||
vm_vnc_bind = "127.0.0.1"
|
||||
|
||||
vm_vncport = 5900
|
||||
vm_vnc_autoport = "yes"
|
||||
|
||||
vm_schema += libvirt_schema.devices_vnc.format(
|
||||
vm_vncport=vm_vncport,
|
||||
vm_vnc_autoport=vm_vnc_autoport,
|
||||
vm_vnc_bind=vm_vnc_bind,
|
||||
)
|
||||
|
||||
# Add SCSI controller
|
||||
vm_schema += libvirt_schema.devices_scsi_controller
|
||||
|
||||
# Add footer
|
||||
vm_schema += libvirt_schema.libvirt_footer
|
||||
|
||||
print("Final VM schema:\n{}\n".format(vm_schema))
|
||||
|
||||
# All the following steps may require cleanup later on, so catch them here and do cleanup in a Finally block
|
||||
try:
|
||||
# Phase 5 - definition
|
||||
# * Create the VM in the PVC cluster
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 5, "total": 10, "status": "Defining VM on the cluster"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if define_vm:
|
||||
print("Defining VM on cluster")
|
||||
node_limit = vm_data["system_details"]["node_limit"]
|
||||
if node_limit:
|
||||
node_limit = node_limit.split(",")
|
||||
node_selector = vm_data["system_details"]["node_selector"]
|
||||
node_autostart = vm_data["system_details"]["node_autostart"]
|
||||
migration_method = vm_data["system_details"]["migration_method"]
|
||||
retcode, retmsg = pvc_vm.define_vm(
|
||||
zkhandler,
|
||||
vm_schema.strip(),
|
||||
target_node,
|
||||
node_limit,
|
||||
node_selector,
|
||||
node_autostart,
|
||||
migration_method,
|
||||
vm_profile,
|
||||
initial_state="provision",
|
||||
)
|
||||
print(retmsg)
|
||||
else:
|
||||
print("Skipping VM definition")
|
||||
|
||||
# Phase 6 - disk creation
|
||||
# * Create each Ceph storage volume for the disks
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 6, "total": 10, "status": "Creating storage volumes"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume.get("source_volume") is not None:
|
||||
success, message = pvc_ceph.clone_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
volume["source_volume"],
|
||||
"{}_{}".format(vm_name, volume["disk_id"]),
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
'Failed to clone volume "{}" to "{}".'.format(
|
||||
volume["source_volume"], volume["disk_id"]
|
||||
)
|
||||
)
|
||||
else:
|
||||
success, message = pvc_ceph.add_volume(
|
||||
zkhandler,
|
||||
volume["pool"],
|
||||
"{}_{}".format(vm_name, volume["disk_id"]),
|
||||
"{}G".format(volume["disk_size_gb"]),
|
||||
)
|
||||
print(message)
|
||||
if not success:
|
||||
raise ProvisioningError(
|
||||
'Failed to create volume "{}".'.format(volume["disk_id"])
|
||||
)
|
||||
|
||||
# Phase 7 - disk mapping
|
||||
# * Map each volume to the local host in order
|
||||
# * Format each volume with any specified filesystems
|
||||
# * If any mountpoints are specified, create a temporary mount directory
|
||||
# * Mount any volumes to their respective mountpoints
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 7,
|
||||
"total": 10,
|
||||
"status": "Mapping, formatting, and mounting storage volumes locally",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
for volume in vm_data["volumes"]:
|
||||
dst_volume_name = "{}_{}".format(vm_name, volume["disk_id"])
|
||||
dst_volume = "{}/{}".format(volume["pool"], dst_volume_name)
|
||||
|
||||
if is_ova_install:
|
||||
src_volume_name = volume["volume_name"]
|
||||
src_volume = "{}/{}".format(volume["pool"], src_volume_name)
|
||||
|
||||
print(
|
||||
"Converting {} source volume {} to raw format on {}".format(
|
||||
volume["volume_format"], src_volume, dst_volume
|
||||
)
|
||||
)
|
||||
|
||||
# Map the target RBD device
|
||||
retcode, retmsg = pvc_ceph.map_volume(
|
||||
zkhandler, volume["pool"], dst_volume_name
|
||||
)
|
||||
if not retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to map destination volume "{}": {}'.format(
|
||||
dst_volume_name, retmsg
|
||||
)
|
||||
)
|
||||
# Map the source RBD device
|
||||
retcode, retmsg = pvc_ceph.map_volume(
|
||||
zkhandler, volume["pool"], src_volume_name
|
||||
)
|
||||
if not retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to map source volume "{}": {}'.format(
|
||||
src_volume_name, retmsg
|
||||
)
|
||||
)
|
||||
# Convert from source to target
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"qemu-img convert -C -f {} -O raw {} {}".format(
|
||||
volume["volume_format"],
|
||||
"/dev/rbd/{}".format(src_volume),
|
||||
"/dev/rbd/{}".format(dst_volume),
|
||||
)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to convert {} volume "{}" to raw volume "{}": {}'.format(
|
||||
volume["volume_format"], src_volume, dst_volume, stderr
|
||||
)
|
||||
)
|
||||
|
||||
# Unmap the source RBD device (don't bother later)
|
||||
retcode, retmsg = pvc_ceph.unmap_volume(
|
||||
zkhandler, volume["pool"], src_volume_name
|
||||
)
|
||||
if not retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to unmap source volume "{}": {}'.format(
|
||||
src_volume_name, retmsg
|
||||
)
|
||||
)
|
||||
# Unmap the target RBD device (don't bother later)
|
||||
retcode, retmsg = pvc_ceph.unmap_volume(
|
||||
zkhandler, volume["pool"], dst_volume_name
|
||||
)
|
||||
if not retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to unmap destination volume "{}": {}'.format(
|
||||
dst_volume_name, retmsg
|
||||
)
|
||||
)
|
||||
else:
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if volume.get("filesystem") is None:
|
||||
continue
|
||||
|
||||
filesystem_args_list = list()
|
||||
for arg in volume["filesystem_args"].split():
|
||||
arg_entry, *arg_data = arg.split("=")
|
||||
arg_data = "=".join(arg_data)
|
||||
filesystem_args_list.append(arg_entry)
|
||||
filesystem_args_list.append(arg_data)
|
||||
filesystem_args = " ".join(filesystem_args_list)
|
||||
|
||||
print(
|
||||
"Creating {} filesystem on {}".format(
|
||||
volume["filesystem"], dst_volume
|
||||
)
|
||||
)
|
||||
print("Args: {}".format(filesystem_args))
|
||||
|
||||
# Map the RBD device
|
||||
retcode, retmsg = pvc_ceph.map_volume(
|
||||
zkhandler, volume["pool"], dst_volume_name
|
||||
)
|
||||
if not retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to map volume "{}": {}'.format(dst_volume, retmsg)
|
||||
)
|
||||
|
||||
# Create the filesystem
|
||||
if volume["filesystem"] == "swap":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"mkswap -f /dev/rbd/{}".format(dst_volume)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to create swap on "{}": {}'.format(
|
||||
dst_volume, stderr
|
||||
)
|
||||
)
|
||||
else:
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"mkfs.{} {} /dev/rbd/{}".format(
|
||||
volume["filesystem"], filesystem_args, dst_volume
|
||||
)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to create {} filesystem on "{}": {}'.format(
|
||||
volume["filesystem"], dst_volume, stderr
|
||||
)
|
||||
)
|
||||
|
||||
print(stdout)
|
||||
|
||||
if is_script_install:
|
||||
# Create temporary directory
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("mktemp -d")
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to create a temporary directory: {}".format(stderr)
|
||||
)
|
||||
temp_dir = stdout.strip()
|
||||
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume["source_volume"] is not None:
|
||||
continue
|
||||
|
||||
if not volume["mountpoint"] or volume["mountpoint"] == "swap":
|
||||
continue
|
||||
|
||||
mapped_dst_volume = "/dev/rbd/{}/{}_{}".format(
|
||||
volume["pool"], vm_name, volume["disk_id"]
|
||||
)
|
||||
mount_path = "{}{}".format(temp_dir, volume["mountpoint"])
|
||||
|
||||
# Ensure the mount path exists (within the filesystems)
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"mkdir -p {}".format(mount_path)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to create mountpoint "{}": {}'.format(
|
||||
mount_path, stderr
|
||||
)
|
||||
)
|
||||
|
||||
# Mount filesystems to temporary directory
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"mount {} {}".format(mapped_dst_volume, mount_path)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
'Failed to mount "{}" on "{}": {}'.format(
|
||||
mapped_dst_volume, mount_path, stderr
|
||||
)
|
||||
)
|
||||
|
||||
print(
|
||||
"Successfully mounted {} on {}".format(
|
||||
mapped_dst_volume, mount_path
|
||||
)
|
||||
)
|
||||
|
||||
# Phase 8 - provisioning script execution
|
||||
# * Execute the provisioning script main function ("install") passing any custom arguments
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 8, "total": 10, "status": "Executing provisioning script"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if is_script_install:
|
||||
print("Running installer script")
|
||||
|
||||
# Parse the script arguments
|
||||
script_arguments = dict()
|
||||
for argument in vm_data["script_arguments"]:
|
||||
argument_name, argument_data = argument.split("=")
|
||||
script_arguments[argument_name] = argument_data
|
||||
|
||||
# Parse the runtime arguments
|
||||
if script_run_args is not None:
|
||||
for argument in script_run_args:
|
||||
argument_name, argument_data = argument.split("=")
|
||||
script_arguments[argument_name] = argument_data
|
||||
|
||||
print("Script arguments: {}".format(script_arguments))
|
||||
|
||||
# Run the script
|
||||
try:
|
||||
installer_script.install(
|
||||
vm_name=vm_name,
|
||||
vm_id=vm_id,
|
||||
temporary_directory=temp_dir,
|
||||
disks=vm_data["volumes"],
|
||||
networks=vm_data["networks"],
|
||||
**script_arguments
|
||||
)
|
||||
except Exception as e:
|
||||
raise ProvisioningError("Failed to run install script: {}".format(e))
|
||||
|
||||
except Exception as e:
|
||||
start_vm = False
|
||||
raise e
|
||||
|
||||
# Always perform the cleanup steps
|
||||
finally:
|
||||
# Phase 9 - install cleanup
|
||||
# * Unmount any mounted volumes
|
||||
# * Remove any temporary directories
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 9,
|
||||
"total": 10,
|
||||
"status": "Cleaning up local mounts and directories",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if not is_ova_install:
|
||||
for volume in list(reversed(vm_data["volumes"])):
|
||||
if volume.get("source_volume") is not None:
|
||||
continue
|
||||
|
||||
if is_script_install:
|
||||
# Unmount the volume
|
||||
if (
|
||||
volume.get("mountpoint") is not None
|
||||
and volume.get("mountpoint") != "swap"
|
||||
):
|
||||
print(
|
||||
"Cleaning up mount {}{}".format(
|
||||
temp_dir, volume["mountpoint"]
|
||||
)
|
||||
)
|
||||
|
||||
mount_path = "{}{}".format(temp_dir, volume["mountpoint"])
|
||||
|
||||
# Make sure any bind mounts or submounts are unmounted first
|
||||
if volume["mountpoint"] == "/":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"umount {}/**/**".format(mount_path)
|
||||
)
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"umount {}/**".format(mount_path)
|
||||
)
|
||||
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"umount {}".format(mount_path)
|
||||
)
|
||||
if retcode:
|
||||
print(
|
||||
'Failed to unmount "{}": {}'.format(mount_path, stderr)
|
||||
)
|
||||
|
||||
# Unmap the RBD device
|
||||
if volume["filesystem"]:
|
||||
print(
|
||||
"Cleaning up RBD mapping /dev/rbd/{}/{}_{}".format(
|
||||
volume["pool"], vm_name, volume["disk_id"]
|
||||
)
|
||||
)
|
||||
|
||||
rbd_volume = "/dev/rbd/{}/{}_{}".format(
|
||||
volume["pool"], vm_name, volume["disk_id"]
|
||||
)
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"rbd unmap {}".format(rbd_volume)
|
||||
)
|
||||
if retcode:
|
||||
print(
|
||||
'Failed to unmap volume "{}": {}'.format(rbd_volume, stderr)
|
||||
)
|
||||
|
||||
print("Cleaning up temporary directories and files")
|
||||
|
||||
if is_script_install:
|
||||
# Remove temporary mount directory (don't fail if not removed)
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"rmdir {}".format(temp_dir)
|
||||
)
|
||||
if retcode:
|
||||
print(
|
||||
'Failed to delete temporary directory "{}": {}'.format(
|
||||
temp_dir, stderr
|
||||
)
|
||||
)
|
||||
|
||||
# Remote temporary script (don't fail if not removed)
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"rm -f {}".format(script_file)
|
||||
)
|
||||
if retcode:
|
||||
print(
|
||||
'Failed to delete temporary script file "{}": {}'.format(
|
||||
script_file, stderr
|
||||
)
|
||||
)
|
||||
|
||||
# Phase 10 - startup
|
||||
# * Start the VM in the PVC cluster
|
||||
if start_vm:
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 10, "total": 10, "status": "Starting VM"},
|
||||
)
|
||||
time.sleep(1)
|
||||
retcode, retmsg = pvc_vm.start_vm(zkhandler, vm_name)
|
||||
print(retmsg)
|
||||
|
||||
zkhandler.disconnect()
|
||||
del zkhandler
|
||||
|
||||
return {
|
||||
"status": 'VM "{}" with profile "{}" has been provisioned successfully'.format(
|
||||
vm_name, vm_profile
|
||||
),
|
||||
"current": 10,
|
||||
"total": 10,
|
||||
}
|
||||
|
|
|
@ -0,0 +1,730 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# vmbuilder.py - pvc api vm builder (provisioner) functions
|
||||
# part of the parallel virtual cluster (pvc) system
|
||||
#
|
||||
# copyright (c) 2018-2022 joshua m. boniface <joshua@boniface.me>
|
||||
#
|
||||
# this program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the gnu general public license as published by
|
||||
# the free software foundation, version 3.
|
||||
#
|
||||
# this program is distributed in the hope that it will be useful,
|
||||
# but without any warranty; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
import json
|
||||
import psycopg2
|
||||
import psycopg2.extras
|
||||
import re
|
||||
import os
|
||||
|
||||
# import sys
|
||||
import time
|
||||
import importlib.util
|
||||
import uuid
|
||||
|
||||
from contextlib import contextmanager
|
||||
|
||||
from pvcapid.Daemon import config
|
||||
|
||||
from daemon_lib.zkhandler import ZKHandler
|
||||
|
||||
import daemon_lib.common as pvc_common
|
||||
import daemon_lib.node as pvc_node
|
||||
import daemon_lib.vm as pvc_vm
|
||||
import daemon_lib.network as pvc_network
|
||||
import daemon_lib.ceph as pvc_ceph
|
||||
|
||||
|
||||
#
|
||||
# Exceptions (used by Celery tasks)
|
||||
#
|
||||
class ValidationError(Exception):
|
||||
"""
|
||||
An exception that results from some value being un- or mis-defined.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ClusterError(Exception):
|
||||
"""
|
||||
An exception that results from the PVC cluster being out of alignment with the action.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class ProvisioningError(Exception):
|
||||
"""
|
||||
An exception that results from a failure of a provisioning command.
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
#
|
||||
# VMBuilder class - subclassed by install scripts
|
||||
#
|
||||
class VMBuilder(object):
|
||||
def __init__(
|
||||
self,
|
||||
vm_name,
|
||||
vm_id,
|
||||
vm_profile,
|
||||
vm_data,
|
||||
):
|
||||
self.vm_name = vm_name
|
||||
self.vm_id = vm_id
|
||||
self.vm_uuid = uuid.uuid4()
|
||||
self.vm_profile = vm_profile
|
||||
self.vm_data = vm_data
|
||||
|
||||
#
|
||||
# Primary class functions; implemented by the individual scripts
|
||||
#
|
||||
def setup(self):
|
||||
"""
|
||||
setup(): Perform special setup steps before proceeding
|
||||
OPTIONAL
|
||||
"""
|
||||
pass
|
||||
|
||||
def create(self):
|
||||
"""
|
||||
create(): Create the VM libvirt schema definition which is defined afterwards
|
||||
"""
|
||||
pass
|
||||
|
||||
def prepare(self):
|
||||
"""
|
||||
prepare(): Prepare any disks/volumes for the install step
|
||||
"""
|
||||
pass
|
||||
|
||||
def install(self):
|
||||
"""
|
||||
install(): Perform the installation
|
||||
"""
|
||||
pass
|
||||
|
||||
def cleanup(self):
|
||||
"""
|
||||
cleanup(): Perform any cleanup required after the prepare() step or on failure of the install() step
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
#
|
||||
# Helper functions (as context managers)
|
||||
#
|
||||
@contextmanager
|
||||
def chroot(destination):
|
||||
"""
|
||||
Change root directory to a given destination
|
||||
"""
|
||||
try:
|
||||
real_root = os.open("/", os.O_RDONLY)
|
||||
os.chroot(destination)
|
||||
fake_root = os.open("/", os.O_RDONLY)
|
||||
os.fchdir(fake_root)
|
||||
yield
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
os.fchdir(real_root)
|
||||
os.chroot(".")
|
||||
os.fchdir(fake_root)
|
||||
os.close(fake_root)
|
||||
os.close(real_root)
|
||||
del fake_root
|
||||
del real_root
|
||||
|
||||
|
||||
@contextmanager
|
||||
def open_db(config):
|
||||
try:
|
||||
conn = psycopg2.connect(
|
||||
host=config["database_host"],
|
||||
port=config["database_port"],
|
||||
dbname=config["database_name"],
|
||||
user=config["database_user"],
|
||||
password=config["database_password"],
|
||||
)
|
||||
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||
yield cur
|
||||
except Exception:
|
||||
raise ClusterError("Failed to connect to Postgres")
|
||||
finally:
|
||||
conn.commit()
|
||||
cur.close()
|
||||
conn.close()
|
||||
del conn
|
||||
|
||||
|
||||
@contextmanager
|
||||
def open_zk(config):
|
||||
try:
|
||||
zkhandler = ZKHandler(config)
|
||||
zkhandler.connect()
|
||||
yield zkhandler
|
||||
except Exception:
|
||||
raise ClusterError("Failed to connect to Zookeeper")
|
||||
finally:
|
||||
zkhandler.disconnect()
|
||||
del zkhandler
|
||||
|
||||
|
||||
#
|
||||
# Main VM provisioning function - executed by the Celery worker
|
||||
#
|
||||
def create_vm(
|
||||
self, vm_name, vm_profile, define_vm=True, start_vm=True, script_run_args=[]
|
||||
):
|
||||
print(f"Starting provisioning of VM '{vm_name}' with profile '{vm_profile}'")
|
||||
|
||||
# Phase 1 - setup
|
||||
# * Get the profile elements
|
||||
# * Get the details from these elements
|
||||
# * Assemble a VM configuration dictionary
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 1, "total": 10, "status": "Collecting configuration"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
vm_id = re.findall(r"/(\d+)$/", vm_name)
|
||||
if not vm_id:
|
||||
vm_id = 0
|
||||
else:
|
||||
vm_id = vm_id[0]
|
||||
|
||||
vm_data = dict()
|
||||
|
||||
with open_db(config) as db_cur:
|
||||
# Get the profile information
|
||||
query = "SELECT * FROM profile WHERE name = %s"
|
||||
args = (vm_profile,)
|
||||
db_cur.execute(query, args)
|
||||
profile_data = db_cur.fetchone()
|
||||
if profile_data.get("arguments"):
|
||||
vm_data["script_arguments"] = profile_data.get("arguments").split("|")
|
||||
else:
|
||||
vm_data["script_arguments"] = []
|
||||
|
||||
if profile_data.get("profile_type") == "ova":
|
||||
is_ova_install = True
|
||||
else:
|
||||
is_ova_install = False
|
||||
|
||||
# Get the system details
|
||||
query = "SELECT * FROM system_template WHERE id = %s"
|
||||
args = (profile_data["system_template"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["system_details"] = db_cur.fetchone()
|
||||
|
||||
# Get the MAC template
|
||||
query = "SELECT mac_template FROM network_template WHERE id = %s"
|
||||
args = (profile_data["network_template"],)
|
||||
db_cur.execute(query, args)
|
||||
db_row = db_cur.fetchone()
|
||||
if db_row:
|
||||
vm_data["mac_template"] = db_row.get("mac_template")
|
||||
else:
|
||||
vm_data["mac_template"] = None
|
||||
|
||||
# Get the networks
|
||||
query = "SELECT * FROM network WHERE network_template = %s"
|
||||
args = (profile_data["network_template"],)
|
||||
db_cur.execute(query, args)
|
||||
_vm_networks = db_cur.fetchall()
|
||||
vm_networks = list()
|
||||
|
||||
# Set the eth_bridge for each network
|
||||
for network in _vm_networks:
|
||||
vni = network["vni"]
|
||||
if vni in ["upstream", "cluster", "storage"]:
|
||||
eth_bridge = "br{}".format(vni)
|
||||
else:
|
||||
eth_bridge = "vmbr{}".format(vni)
|
||||
network["eth_bridge"] = eth_bridge
|
||||
vm_networks.append(network)
|
||||
vm_data["networks"] = vm_networks
|
||||
|
||||
# Get the storage volumes
|
||||
# ORDER BY ensures disks are always in the sdX/vdX order, regardless of add order
|
||||
query = "SELECT * FROM storage WHERE storage_template = %s ORDER BY disk_id"
|
||||
args = (profile_data["storage_template"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["volumes"] = db_cur.fetchall()
|
||||
|
||||
# Get the script
|
||||
query = "SELECT script FROM script WHERE id = %s"
|
||||
args = (profile_data["script"],)
|
||||
db_cur.execute(query, args)
|
||||
db_row = db_cur.fetchone()
|
||||
if db_row:
|
||||
vm_data["script"] = db_row.get("script")
|
||||
else:
|
||||
vm_data["script"] = None
|
||||
query = "SELECT * FROM ova WHERE id = %s"
|
||||
args = (profile_data["ova"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["ova_details"] = db_cur.fetchone()
|
||||
|
||||
query = "SELECT * FROM ova_volume WHERE ova = %s"
|
||||
args = (profile_data["ova"],)
|
||||
db_cur.execute(query, args)
|
||||
vm_data["volumes"] = db_cur.fetchall()
|
||||
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("uname -m")
|
||||
vm_data["system_architecture"] = stdout.strip()
|
||||
|
||||
monitor_list = list()
|
||||
coordinator_names = config["storage_hosts"]
|
||||
for coordinator in coordinator_names:
|
||||
monitor_list.append("{}.{}".format(coordinator, config["storage_domain"]))
|
||||
vm_data["ceph_monitor_list"] = monitor_list
|
||||
vm_data["ceph_monitor_port"] = config["ceph_monitor_port"]
|
||||
vm_data["ceph_monitor_secret"] = config["ceph_storage_secret_uuid"]
|
||||
|
||||
# Parse the script arguments
|
||||
script_arguments = dict()
|
||||
for argument in vm_data["script_arguments"]:
|
||||
argument_name, argument_data = argument.split("=")
|
||||
script_arguments[argument_name] = argument_data
|
||||
|
||||
# Parse the runtime arguments
|
||||
if script_run_args is not None:
|
||||
for argument in script_run_args:
|
||||
argument_name, argument_data = argument.split("=")
|
||||
script_arguments[argument_name] = argument_data
|
||||
|
||||
print("Script arguments: {}".format(script_arguments))
|
||||
vm_data["script_arguments"] = script_arguments
|
||||
|
||||
print(
|
||||
"VM configuration data:\n{}".format(
|
||||
json.dumps(vm_data, sort_keys=True, indent=2)
|
||||
)
|
||||
)
|
||||
|
||||
# Phase 2 - verification
|
||||
# * Ensure that at least one node has enough free RAM to hold the VM (becomes main host)
|
||||
# * Ensure that all networks are valid
|
||||
# * Ensure that there is enough disk space in the Ceph cluster for the disks
|
||||
# This is the "safe fail" step when an invalid configuration will be caught
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 2,
|
||||
"total": 10,
|
||||
"status": "Verifying configuration against cluster",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
with open_zk(config) as zkhandler:
|
||||
# Verify that a VM with this name does not already exist
|
||||
if pvc_vm.searchClusterByName(zkhandler, vm_name):
|
||||
raise ClusterError(
|
||||
"A VM with the name '{}' already exists in the cluster.".format(vm_name)
|
||||
)
|
||||
|
||||
# Verify that at least one host has enough free RAM to run the VM
|
||||
_discard, nodes = pvc_node.get_list(zkhandler, None)
|
||||
target_node = None
|
||||
last_free = 0
|
||||
for node in nodes:
|
||||
# Skip the node if it is not ready to run VMs
|
||||
if node["daemon_state"] != "run" or node["domain_state"] != "ready":
|
||||
continue
|
||||
# Skip the node if its free memory is less than the new VM's size, plus a 512MB buffer
|
||||
if node["memory"]["free"] < (vm_data["system_details"]["vram_mb"] + 512):
|
||||
continue
|
||||
# If this node has the most free, use it
|
||||
if node["memory"]["free"] > last_free:
|
||||
last_free = node["memory"]["free"]
|
||||
target_node = node["name"]
|
||||
# Raise if no node was found
|
||||
if not target_node:
|
||||
raise ClusterError(
|
||||
"No ready cluster node contains at least {}+512 MB of free RAM.".format(
|
||||
vm_data["system_details"]["vram_mb"]
|
||||
)
|
||||
)
|
||||
|
||||
print(
|
||||
'Selecting target node "{}" with "{}" MB free RAM'.format(
|
||||
target_node, last_free
|
||||
)
|
||||
)
|
||||
|
||||
# Verify that all configured networks are present on the cluster
|
||||
cluster_networks, _discard = pvc_network.getClusterNetworkList(zkhandler)
|
||||
for network in vm_data["networks"]:
|
||||
vni = str(network["vni"])
|
||||
if vni not in cluster_networks and vni not in [
|
||||
"upstream",
|
||||
"cluster",
|
||||
"storage",
|
||||
]:
|
||||
raise ClusterError(
|
||||
'The network VNI "{}" is not present on the cluster.'.format(vni)
|
||||
)
|
||||
|
||||
print("All configured networks for VM are valid")
|
||||
|
||||
# Verify that there is enough disk space free to provision all VM disks
|
||||
pools = dict()
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume.get("source_volume") is not None:
|
||||
volume_data = pvc_ceph.getVolumeInformation(
|
||||
zkhandler, volume["pool"], volume["source_volume"]
|
||||
)
|
||||
if not volume_data:
|
||||
raise ClusterError(
|
||||
"The source volume {}/{} could not be found.".format(
|
||||
volume["pool"], volume["source_volume"]
|
||||
)
|
||||
)
|
||||
if not volume["pool"] in pools:
|
||||
pools[volume["pool"]] = int(
|
||||
pvc_ceph.format_bytes_fromhuman(volume_data["stats"]["size"])
|
||||
/ 1024
|
||||
/ 1024
|
||||
/ 1024
|
||||
)
|
||||
else:
|
||||
pools[volume["pool"]] += int(
|
||||
pvc_ceph.format_bytes_fromhuman(volume_data["stats"]["size"])
|
||||
/ 1024
|
||||
/ 1024
|
||||
/ 1024
|
||||
)
|
||||
else:
|
||||
if not volume["pool"] in pools:
|
||||
pools[volume["pool"]] = volume["disk_size_gb"]
|
||||
else:
|
||||
pools[volume["pool"]] += volume["disk_size_gb"]
|
||||
|
||||
for pool in pools:
|
||||
try:
|
||||
pool_information = pvc_ceph.getPoolInformation(zkhandler, pool)
|
||||
if not pool_information:
|
||||
raise
|
||||
except Exception:
|
||||
raise ClusterError(
|
||||
'Pool "{}" is not present on the cluster.'.format(pool)
|
||||
)
|
||||
pool_free_space_gb = int(
|
||||
pool_information["stats"]["free_bytes"] / 1024 / 1024 / 1024
|
||||
)
|
||||
pool_vm_usage_gb = int(pools[pool])
|
||||
|
||||
if pool_vm_usage_gb >= pool_free_space_gb:
|
||||
raise ClusterError(
|
||||
'Pool "{}" has only {} GB free and VM requires {} GB.'.format(
|
||||
pool, pool_free_space_gb, pool_vm_usage_gb
|
||||
)
|
||||
)
|
||||
|
||||
print("There is enough space on cluster to store VM volumes")
|
||||
|
||||
if not is_ova_install:
|
||||
# Verify that every specified filesystem is valid
|
||||
used_filesystems = list()
|
||||
for volume in vm_data["volumes"]:
|
||||
if volume["source_volume"] is not None:
|
||||
continue
|
||||
if volume["filesystem"] and volume["filesystem"] not in used_filesystems:
|
||||
used_filesystems.append(volume["filesystem"])
|
||||
|
||||
for filesystem in used_filesystems:
|
||||
if filesystem == "swap":
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("which mkswap")
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to find binary for mkswap: {}".format(stderr)
|
||||
)
|
||||
else:
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
"which mkfs.{}".format(filesystem)
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
"Failed to find binary for mkfs.{}: {}".format(
|
||||
filesystem, stderr
|
||||
)
|
||||
)
|
||||
|
||||
print("All selected filesystems are valid")
|
||||
|
||||
# Phase 3 - provisioning script preparation
|
||||
# * Import the provisioning script as a library with importlib
|
||||
# * Ensure the required function(s) are present
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={"current": 3, "total": 10, "status": "Preparing provisioning script"},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
# Write the script out to a temporary file
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("mktemp")
|
||||
if retcode:
|
||||
raise ProvisioningError("Failed to create a temporary file: {}".format(stderr))
|
||||
script_file = stdout.strip()
|
||||
with open(script_file, "w") as fh:
|
||||
fh.write(vm_data["script"])
|
||||
fh.write("\n")
|
||||
|
||||
# Import the script file
|
||||
loader = importlib.machinery.SourceFileLoader("installer_script", script_file)
|
||||
spec = importlib.util.spec_from_loader(loader.name, loader)
|
||||
installer_script = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(installer_script)
|
||||
|
||||
# Set up the VMBuilderScript object
|
||||
vm_builder = installer_script.VMBuilderScript(
|
||||
vm_name=vm_name,
|
||||
vm_id=vm_id,
|
||||
vm_profile=vm_profile,
|
||||
vm_data=vm_data,
|
||||
)
|
||||
|
||||
print("Provisioning script imported successfully")
|
||||
|
||||
# Create temporary directory for external chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command("mktemp -d")
|
||||
if retcode:
|
||||
raise ProvisioningError(f"Failed to create a temporary directory: {stderr}")
|
||||
temp_dir = stdout.strip()
|
||||
|
||||
# Bind mount / to the chroot location /
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount --bind --options ro / {temp_dir}"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount rootfs onto {temp_dir} for chroot: {stderr}"
|
||||
)
|
||||
|
||||
# Mount tmpfs to the chroot location /tmp
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount --type tmpfs tmpfs {temp_dir}/tmp"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount tmpfs onto {temp_dir}/tmp for chroot: {stderr}"
|
||||
)
|
||||
|
||||
# Bind mount /dev to the chroot location /dev
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount --bind --options ro /dev {temp_dir}/dev"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount devfs onto {temp_dir}/dev for chroot: {stderr}"
|
||||
)
|
||||
|
||||
# Bind mount /run to the chroot location /run
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount --bind --options rw /run {temp_dir}/run"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount runfs onto {temp_dir}/run for chroot: {stderr}"
|
||||
)
|
||||
|
||||
# Bind mount /sys to the chroot location /sys
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(
|
||||
f"mount --bind --options rw /sys {temp_dir}/sys"
|
||||
)
|
||||
if retcode:
|
||||
raise ProvisioningError(
|
||||
f"Failed to mount sysfs onto {temp_dir}/sys for chroot: {stderr}"
|
||||
)
|
||||
|
||||
print("Chroot environment prepared successfully")
|
||||
|
||||
# Phase 4 - script: setup()
|
||||
# * Run pre-setup steps
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 4,
|
||||
"total": 10,
|
||||
"status": "Running script setup() step",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Running script setup() step")
|
||||
|
||||
with chroot(temp_dir):
|
||||
vm_builder.setup()
|
||||
|
||||
# Phase 5 - script: create()
|
||||
# * Prepare the libvirt XML defintion for the VM
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 5,
|
||||
"total": 10,
|
||||
"status": "Running script create() step",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if define_vm:
|
||||
print("Running script create() step")
|
||||
|
||||
with chroot(temp_dir):
|
||||
vm_schema = vm_builder.create()
|
||||
print("Generated VM schema:\n{}\n".format(vm_schema))
|
||||
|
||||
print("Defining VM on cluster")
|
||||
node_limit = vm_data["system_details"]["node_limit"]
|
||||
if node_limit:
|
||||
node_limit = node_limit.split(",")
|
||||
node_selector = vm_data["system_details"]["node_selector"]
|
||||
node_autostart = vm_data["system_details"]["node_autostart"]
|
||||
migration_method = vm_data["system_details"]["migration_method"]
|
||||
with open_zk(config) as zkhandler:
|
||||
retcode, retmsg = pvc_vm.define_vm(
|
||||
zkhandler,
|
||||
vm_schema.strip(),
|
||||
target_node,
|
||||
node_limit,
|
||||
node_selector,
|
||||
node_autostart,
|
||||
migration_method,
|
||||
vm_profile,
|
||||
initial_state="provision",
|
||||
)
|
||||
print(retmsg)
|
||||
else:
|
||||
print("Skipping VM definition due to define_vm=False")
|
||||
|
||||
# Phase 6 - script: prepare()
|
||||
# * Run preparation steps (e.g. disk creation and mapping, filesystem creation, etc.)
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 6,
|
||||
"total": 10,
|
||||
"status": "Running script prepare() step",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Running script prepare() step")
|
||||
|
||||
with chroot(temp_dir):
|
||||
vm_builder.prepare()
|
||||
|
||||
# Phase 7 - script: install()
|
||||
# * Run installation with arguments
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 7,
|
||||
"total": 10,
|
||||
"status": "Running script install() step",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Running script install() step")
|
||||
|
||||
with chroot(temp_dir):
|
||||
vm_builder.install()
|
||||
|
||||
# Phase 8 - script: cleanup()
|
||||
# * Run cleanup steps
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 8,
|
||||
"total": 10,
|
||||
"status": "Running script cleanup() step",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Running script cleanup() step")
|
||||
|
||||
with chroot(temp_dir):
|
||||
vm_builder.cleanup()
|
||||
|
||||
# Phase 9 - general cleanup
|
||||
# * Clean up the chroot from earlier
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 9,
|
||||
"total": 10,
|
||||
"status": "Running upper cleanup steps",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
print("Running upper cleanup steps")
|
||||
|
||||
# Remote temporary script (don't fail if not removed)
|
||||
if not os.remove(script_file):
|
||||
print(f"Failed to delete temporary script file '{script_file}'.")
|
||||
|
||||
# Unmount bind-mounted devfs on the chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(f"umount {temp_dir}/dev")
|
||||
if retcode:
|
||||
raise ProvisioningError(f"Failed to unmount devfs from chroot: {stderr}")
|
||||
|
||||
# Unmount bind-mounted runfs on the chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(f"umount {temp_dir}/run")
|
||||
if retcode:
|
||||
raise ProvisioningError(f"Failed to unmount runfs from chroot: {stderr}")
|
||||
|
||||
# Unmount bind-mounted sysfs on the chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(f"umount {temp_dir}/sys")
|
||||
if retcode:
|
||||
raise ProvisioningError(f"Failed to unmount sysfs from chroot: {stderr}")
|
||||
|
||||
# Unmount bind-mounted tmpfs on the chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(f"umount {temp_dir}/tmp")
|
||||
if retcode:
|
||||
raise ProvisioningError(f"Failed to unmount tmpfs from chroot: {stderr}")
|
||||
|
||||
# Unmount bind-mounted rootfs on the chroot
|
||||
retcode, stdout, stderr = pvc_common.run_os_command(f"umount {temp_dir}")
|
||||
if retcode:
|
||||
raise ProvisioningError(f"Failed to unmount rootfs from chroot: {stderr}")
|
||||
|
||||
# Remove the temp_dir
|
||||
if not os.rmdir(temp_dir):
|
||||
print(f"Failed to delete temporary chroot directory '{temp_dir}'.")
|
||||
|
||||
# Phase 10 - startup
|
||||
# * Start the VM in the PVC cluster
|
||||
self.update_state(
|
||||
state="RUNNING",
|
||||
meta={
|
||||
"current": 10,
|
||||
"total": 10,
|
||||
"status": "Starting VM",
|
||||
},
|
||||
)
|
||||
time.sleep(1)
|
||||
|
||||
if start_vm:
|
||||
print("Starting VM")
|
||||
with open_zk(config) as zkhandler:
|
||||
success, message = pvc_vm.start_vm(zkhandler, vm_name)
|
||||
print(message)
|
Loading…
Reference in New Issue