--- # PVC cluster specification for pvcbootstrapd # # This configuration is entirely optional, and is not required unless you are using pvcbootstrap # to deploy the cluster. It must be filled out and committed before connecting any hosts from # the new cluster. # # This example provides a detailed explanation for, and examples of, the various options that can # be used by this subsystem. # # Bootstrap Host Definitions # # All hosts to be bootstrapped by the pvcbootstrapd system must be present in this list. # # NOTE: Only Redfish-capable BMCs can be automatically provisionned by pvcbootstrapd. Non-Redfish # BMCs can be used, but they must be pre-configured with a system disk RAID, to boot from network, # etc. and the per-host TFTP files must be created manually. # # Each host is specified by its hardware BMC MAC address, usually available on the system asset # tag or on some sort of label on the system board. # # Under the parent tag are a series of required and optional values, which are self-documented. # # System Disks Logic # # The key "bootstrap" -> "config" -> "system_disks" specifies the disk(s) that the system will # be installed to. These disks are specified as a YAML list of one (or more) of the following: # # 1. A fixed Linux "/dev" path, for example "/dev/sda" (SCSI/SAS/SATA), "/dev/nvme0n1" (NVMe), # "/dev/disk/by-id" (fixed-ID paths), or "/dev/disk/by-path" (fixed-location paths). # Generally, the latter options are preferable as they are more consistent and more easily # guessed before a Linux operating system is booted, but all are acceptable depending on the # disk type. # # 2. A "detect:" string, in the form "detect:::". Detect strings # leverage the "lsscsi" tool in the installer to logically determine the desired block device # path from the given information. # # The "" can be any identifier the device will have, for example "INTEL" for an Intel # SSD, "SEAGATE" for a Seagate HDD, "DELLBOSS" for a Dell BOSS (Boot-Optimized Storage System) # virtual volume, "PERC" for a Dell PERC RAID card, etc. This should usually match something # found in the "Vendor" column of "lsscsi" or elsewhere in the output line. Multiple space- # separated "words" are supported but should only be used to avoid ambiguity. # # The "" is a human-readable size, usually matching the label size of the disk (e.g. # 300GB, 800GB, 1.92TB, etc.). This will be matched to within +/- 2% of a real block device # in "lsscsi" to find a match. # # The "" specifies the Nth (0-indexed) match on both the "" and "". So for # example, if there are 3x 800GB Intel SSDs, "detect:INTEL:800GB:2" will match the third. # Note that this ordering is based on SCSI bus ID, and is thus normally consistent and # predictable, but there can be corner cases. # # 3. A logical, 0-indexed disk ID detectable by Redfish, for example "0", "1", etc. On systems # with support for it, up to two (2), but no more, disks can be specified in this list by # these logical IDs. In such a case, the Redfish bootstrap will attempt to find the physical # disks at the given IDs on the first storage (RAID) controller and, if found, create a RAID-1 # virtual disk out of them. This allows easy specification of the situation where you might # want, for example, "the first and second disks" to be turned into a RAID-1, with the rest # used for other purposes. # # Note that only the 3rd method supports this auto-creation of RAID devices; the first two # require an existing (single) disk or virtual device which is visible by Linux. Also note # that the PVC installer does not support software RAID-1 for system volumes, though this # could be added later. # # Once created, the virtual RAID-1 created using this method will be found via a "detect:" # string identical to method 2. # # Hooks # # Hooks are a series of tasks that are run against one or more nodes in the cluster after the # completion of the Ansible configuration. These hooks, specified on a cluster-level, can be # used to automate post-deployment tasks. Hooks are specified as a YAML list of dictionaries. # # Each hook is given a "name" which is used in the log output but which is otherwise unimportant. # # There are several "type"s of hooks, some of which are specialized for common tasks, and others # which can be free-form. The primary types are: # # * osddb Create an OSD DB volume group on a given node from a given block path, specified # by one of the first two (2) methods mentioned above for system disks. # * osd Create a storage OSD on a given node from a given block device path, specified by # one of the first two (2) methods mentioned above for system disk. # If multiple nodes have the same devices, the same task can run against several # at once in one task, otherwise they should be run sequentially, per-node. # * pool Create a storage pool on the cluster with the specified number of PGs. # # Note: The above 3 hooks should always be specified in the given order if they are to be used. # # * network Create a network on the PVC cluster with the specified parameters (see below). # # * copy: Copy one or more files from the provisioner host to the destination node(s). The # source path may be absolute, or relative to the Ansible repository directory. Each # argument is a list of values; list values map one-to-one-to-one between the 3 args. # # * script Run a script on the given host(s). Can be used to run arbitrary commands or other # scripts on the remote system. # The script may be specified in one of 3 ways: # 1. A raw YAML block, containing a valid shebang and the contents of the script. # For a single BASH command, this would be something like: # #!/usr/bin/env bash # mycommand # 2. A "local" source and a "path" to a script to copy to the destination host. # The path may be absolute, or relative to the Ansible repository directory. # 3. A "remote" source and a "path" to the script on the destination host. # # Note: A script hook will run as the "deploy_user" on the remote system. If you require the # command to have root privileges, use "sudo" in the script. # # * webhook Run an HTTP action against a URL with the given data (converted to JSON). Only # runs once regardless of the "target" specified, and runs from the controller. # # A hook can "target" one or more nodes in the cluster. These are specified by their "node # hostname" as specified in the "bootstrap" section in a YAML list. The special value "all" can # be used to represent all nodes in the cluster; if "all" is specified it should be the only value. # If no target is specified, 'all' is assumed. # # The value of "target" is used slightly differently for the osddb, osd, pool, and network (PVC) # hook types above. For osddb and osd, the list of "target"s will be the nodes that the given # block device will be created on with the given parameters, but will actually target the API. # For pool and network hook types, the target is ignored completely and can/should be empty or # "all" for clarity. # # Each hook has a series of "args" which are unique to that particular hook type. These are # self-documented inline below with an example for each hook type. # Bootstrap elements bootstrap: # First node "d8:d3:85:12:34:56": # BMC MAC Address (from asset tag, etc.) node: # Node information hostname: hv1 # The (short) hostname. Must be present in the pvc_nodes list. config: # Node configuration kernel_options: # Additional kernel options for the installer, OPTIONAL - console=ttyS1,115200n # "Use the serial console ttyS1 at 115200 baud" release: buster # The Debian release to install, OPTIONAL mirror: http://ftp.debian.org/debian # The Debian mirror to use, OPTIONAL packages: # List of additional packages to install, OPTIONAL - ca-certificates # "Install the ca-certificates package in the target system" filesystem: ext4 # The filesystem to use for the system partitions, OPTIONAL system_disks: # List of system disks to install to - "detect:Intel:200GB:0" # "Find the first 200GB Intel SSD" bmc: # BMC information username: Administrator # BMC/IPMI administrative username password: SuperSecretPassword # BMC/IPMI administrative password (initial) # NOTE: This is usually the out-of-box password; the production # password will be set later by the Ansible roles. redfish: yes # Can system BMC support Redfish? # NOTE: This is optional; Redfish will be probed if missing. bios_settings: # Optional: Adjust the system BIOS settings, settings must exist to apply BootMode: Uefi # Set BootMode to Uefi SecureBoot: Disabled # Disable SecureBoot manager_settings: # Optional: Adjust manager (iDRAC, etc.) settings IPMILan.1.Enable: Enabled # Enable IPMI-over-LAN (Dell R6515 naming) # Second node "68:b5:99:12:34:78": # BMC MAC Address (from asset tag, etc.) node: # Node information hostname: hv2 # The (short) hostname. Must be present in the pvc_nodes list. config: # Node configuration (optional) kernel_options: # Additional kernel options for the installer, OPTIONAL - console=ttyS1,115200n # "Use the serial console ttyS1 at 115200 baud" release: buster # The Debian release to install, OPTIONAL mirror: http://ftp.debian.org/debian # The Debian mirror to use, OPTIONAL packages: # List of additional packages to install, OPTIONAL - ca-certificates # "Install the ca-certificates package in the target system" filesystem: ext4 # The filesystem to use for the system partitions, OPTIONAL system_disks: # List of system disks to install to - "0" # "Create a RAID out of the first and second physical disks" - "1" bmc: username: Administrator # BMC/IPMI administrative username password: SuperSecretPassword # BMC/IPMI administrative password (initial) # NOTE: This is usually the out-of-box password; the actual live password # will be set later by the Ansible roles. redfish: yes # Can system BMC support Redfish? # NOTE: This is optional; Redfish will be probed if missing. bios_settings: # Optional: Adjust the system BIOS settings, settings must exist to apply BootMode: Uefi # Set BootMode to Uefi SecureBoot: Disabled # Disable SecureBoot manager_settings: # Optional: Adjust manager (iDRAC, etc.) settings IPMILan.1.Enable: Enabled # Enable IPMI-over-LAN (Dell R6515 naming) # Third node "18:a9:05:12:45:90": # BMC MAC Address (from asset tag, etc.) node: # Node information hostname: hv3 # The (short) hostname. Must be present in the pvc_nodes list. config: # Node configuration (optional) kernel_options: # Additional kernel options for the installer, OPTIONAL - console=ttyS1,115200n # "Use the serial console ttyS1 at 115200 baud" release: buster # The Debian release to install, OPTIONAL mirror: http://ftp.debian.org/debian # The Debian mirror to use, OPTIONAL packages: # List of additional packages to install, OPTIONAL - ca-certificates # "Install the ca-certificates package in the target system" filesystem: ext4 # The filesystem to use for the system partitions, OPTIONAL system_disks: # List of system disks to install to - "/dev/sda" # "Use the disk at /dev/sda" bmc: username: Administrator # BMC/IPMI administrative username password: SuperSecretPassword # BMC/IPMI administrative password (initial) # NOTE: This is usually the out-of-box password; the actual live password # will be set later by the Ansible roles. redfish: yes # Can system BMC support Redfish? # NOTE: This is optional; Redfish will be probed if missing. bios_settings: # Optional: Adjust the system BIOS settings, settings must exist to apply BootMode: Uefi # Set BootMode to Uefi SecureBoot: Disabled # Disable SecureBoot manager_settings: # Optional: Adjust manager (iDRAC, etc.) settings IPMILan.1.Enable: Enabled # Enable IPMI-over-LAN (Dell R6515 naming) # Bootstrap hooks (post-configuration) hooks: - name: "Create OSD database volume on the first NVMe device" type: osddb target: - all args: disk: "/dev/nvme0n1" # The disk to be used for the OSD DB volume group - name: "Create OSDs on the first 300GB HDD device on each node" type: osd target: - all args: disk: "detect:LOGICAL:300GB:0" # The disk to be used for the OSD, first 300GB LOGICAL disk weight: 8 # The weight of the OSD ext_db: no # Use external OSD DB - name: "Create OSDs on the first 800GB Intel SSD device on each node" type: osd target: - all args: disk: "detect:INTEL:800GB:0" # The disk to be used for the OSD, first 400GB Intel SSD weight: 4 # The weight of the OSD, note half of first OSD weight ext_db: yes # Use external OSD DB ext_db_ratio: 0.08 # External OSD DB percentage ratio if different from default 0.05 - name: "Create storage pool 'vms'" type: pool target: - all args: name: "vms" # The name of the pool pgs: 128 # The number of placement groups (#OSD * ~250 / 3 / 2, round down to 2^n) tier: "ssd" # The tier of storage devices to use (default, hdd, ssd, nvme if available) - name: "Create bridged public network on vLAN 1000" type: network target: - all args: vni: 1000 # The PVC VNI (vLAN ID) description: "public" # The network description (no whitespace) type: bridged # The type of network (bridged or managed) mtu: 9000 # The network MTU - name: "Create managed deployment network on VXLAN 10000" type: network target: - all args: vni: 10000 # The PVC VNI (VXLAN ID) description: "deployment" # The network description (no whitespace) type: managed # The type of network (bridged or managed) mtu: auto # The network MTU; 'auto' and 'default' preserve default domain: pvc.local # The network domain for DNSMasq dns_servers: # The remote DNS servers - 10.100.100.10 - 10.100.100.11 ip4: yes # Enable IPv4 networking ip4_network: 10.0.0.0/24 # The IPv4 network, required if ip4 ip4_gateway: 10.0.0.1 # The IPv4 gateway, required if ip4 ip4_dhcp: yes # Enable IPv4 DHCP, required if ip4 ip4_dhcp_start: 10.0.0.100 # IPv4 DHCP start address, required if ip4_dhcp ip4_dhcp_end: 10.0.0.199 # IPv4 DHCP end address, required if ip4_dhcp ip6: yes # Enable IPv6 networking ip6_network: 2001:1234:5678::/64 # The IPv6 network, required if ip6 ip6_gateway: 2001:1234:5678::1 # The IPv6 gateway, required if ip6 - name: "Copy a configuration file and script to node 1" type: copy target: - hv1 args: source: - scripts/example.conf - scripts/example.sh destination: - /usr/local/etc/example.conf - /usr/local/sbin/example.sh mode: - 0o600 - 0o755 - name: "Run a quick storage benchmark leveraging node 1 as the runner" type: script target: - hv1 args: script: | #!/usr/bin/env bash pvc storage benchmark run --yes vms - name: "Run a quick Python script on all nodes" type: script args: script: | #!/usr/bin/env python print("Hello, world!") - name: "Run a more complex Python script on nodes 2 and 3" type: script target: - hv2 - hv3 args: source: local # Copy the script first from the local system (full path or relative under the Ansible repository) path: "scripts/mytask.py" # This is the path to the script, which must have a valid shebang. - name: "Run a more complex BASH script on nodes 1" type: script target: - hv1 args: source: remote path: "sudo /usr/local/bin/dostuff" arguments: - "-c look" - "arg1" - "arg2" - name: "Inform a Mattermost channel of completion" type: webhook args: uri: "https://mymattermost.company.tld/hooks/xxx-generatedkey-xxx" action: post # One of "get", "post", "put", "patch", "delete", "options" (must be valid for Requests library) body: # This body will be converted directly from YAML into JSON to send channel: "deployments" username: "pvcbootstrapd" text: "Your cluster 'clusterX' is done :tada:"