pvc-ansible/group_vars/default/bootstrap.yml

203 lines
9.6 KiB
YAML
Raw Normal View History

---
# PVC cluster specification for pvcbootstrapd
#
# This configuration is entirely optional, and is not required unless you are using pvcbootstrap to
# deploy the cluster. It must be filled out and committed before connecting any hosts from
# the new cluster.
#
# Bootstrap Host Definitions
#
# All hosts to be bootstrapped by the pvcbootstrapd system must be present in this list.
#
# NOTE: Only Redfish-capable BMCs can be automatically provisionned by pvcbootstrapd. Non-Redfish
# BMCs can be used, but they must be pre-configured with a system disk RAID, to boot from network,
# etc. and the per-host TFTP files must be created manually.
#
# Disks Logic
#
# There are 3 categories of disks that can be specified for use in the pvcbootstrapd system:
# * System disk (for the host OS)
# * Data disks (for VM data, i.e. Ceph OSDs)
# * Ceph database disk (if applicable, for OSD journaling/database)
#
# The first two categories are not optional; at least one system disk and one data disk per
# coordinator node must be specified for a usable cluster.
#
# OSD Database Disks
#
# OSD database disks are optional, and only required in some specific situations with some specific
# guest workloads. Only one (1) device may be specified, and this should usually be an NVMe device
# with a Linux path (e.g. '/dev/nvme0n1', see format specifications below).
#
# The post-bootstrap hooks will use this disk, if present, to create an "osd-db-vg" device within
# PVC before creating OSDs, and this database VG will then be used for all OSDs on the node.
#
# Data Disks
#
# At least one (1) data disk must be specified, and these should be fast devices (SATA, SAS, or
# NVMe SSD); the former two can use any format below, but the latter should use a Linux path.
#
# These disk(s) will be used for OSD volumes within PVC, and will be provisioned by the post-
# bootstrap hooks.
#
# System Disks
#
# At least one (1) and at most two (2) system disks must be specified, and these can be any
# devices (SSDs, SD Cards, etc.), though faster system disks will result in a more responsive
# system.
#
# If one system disk is specified, the system will be installed directly onto it.
#
# If two system disks are specified, and the controller supports it, a hardware RAID will be
# created out of the disks for use as the resulting single system disk.
#
# PVC does not supports software RAID for node system disks; of the controller does not support
# Redfish and hardware RAID therein, only the first listed disk will be used.
#
# Drive Specification Formats
#
# The disks in each group can be specified in one of 3 ways:
#
# 1. A full Linux block path beginning with '/dev/'. Any path can be used but, if possible,
# reliable paths like '/dev/disk/by-path' or '/dev/disk/by-id' are recommended unless there is
# no chance for ambiguity (e.g. only a single sdX drive is present for the system disk).
#
# These direct paths are the most reliable way to specify NVMe disks due to the predicable
# format of Linux "/dev/nvmeX" drive names.
#
# 2. On Redfish-capable hosts only, a chassis disk ID number, usually 0-7 (0-indexed), indicating
# the position of the disk on the primary storage controller. When specified this way, the
# system will query the active storage devices from Redfish and then automatically construct
# a "detect:" string for each one based on its vendor/model, size and ID.
#
# Note that on most chassis, the bays are labeled "1-8"; these must be converted by
# subtracting 1 from them to get 0-indexed IDs. So for example the drive in bay "1" would
# have the index of 0, and so on.
#
# These ID numbers are the most relible way to specify SATA/SAS disks on Redfish-capable
# nodes and should be preferred for their simplicity there.
#
# The pvcbootstrapd setup will use the values provided by Redfish to craft a "detect:" string
# for the block device (or virtual RAID array, if two devices are specified for the system
# disk entries).
#
# 3. A "detect:" string. This is a specially-formatted string which can be used by the installer
# to search the output of 'lsscsi' for the given device. A "detect:" string is formatted as
# such:
# detect:<Controller-or-Model-Name>:<0-indexed-ID>:<Capacity-in-human-units>
#
# For example, to detect the *second* 800GB Intel SSD:
# detect:INTEL:1:800GB
# Or to detect a (single) Dell BOSS card's virtual RAID-1 of two 240GB M.2 SSDs:
# detect:DELLBOSS:0:240GB
# Or to detect the first RAID-1 volume on a Dell PERC controller of two 200GB SSDs:
# detect:PERC:0:200GB
#
# The controller/vendor name will usually be fully capitalized. This string can contain
# multiple space-separated elements, though usually only the first word is required, for
# example "DELLBOSS VD" -> "DELLBOSS" or "PERC H330 Mini" -> "PERC", unless there is
# potential for ambiguity.
#
# The ID is the Nth entry *of multiple identical devices*, and should usually be "0" unless
# there are multiple identically-sized devices with the same vendor. If there is only a set
# of identical drives, these will usually match in position to the drive bay because they will
# be sorted by the PCIe bus identifier, but this does not map 1-to-1 with chassis disk IDs as
# used by specification format 2, and must always be 0-indexed based on a given vendor+size.
#
# The size should be the expected human-readable size of the disk with a suffix attached, for
# example "800GB", "240GB", "1.92TB", "2TB", etc.. This value will be used to match, within
# +/-2%, the actual reported sizes of the block devices (which are usually slightly smaller).
#
# These "detect:" strings are the most reliable way to determine a SATA/SSD disk or hardware
# RAID volume independent of Redfish, and should generally be used for these device types on
# non-Redfish capable systems. NVMe devices can *not* use this format and should always use
# the predicable '/dev/nvmeX' names instead as mentioned in specification format 1.
bootstrap:
# First node
"d8:d7:d6:d5:d4:d1": # BMC MAC Address (from asset tag, etc.)
node: # Node information
hostname: hv1 # The (short) hostname. Must be present in the pvc_nodes list.
config: # Node configuration (optional)
kernel_options: # Additional options for the installer kernel command line
- "console=ttyS1,115200n"
release: buster # The Debian release to install, OPTIONAL
mirror: http://ftp.debian.org # The Debian mirror to use, OPTIONAL
packages: # List of additional packages to install, OPTIONAL
- ca-certificates
filesystem: ext4 # The filesystem to use for the system partitions, OPTIONAL
bmc:
username: Administrator # BMC/IPMI administrative username
password: SuperSecret # BMC/IPMI administrative password (initial)
# NOTE: This is usually the out-of-box password; the actual
# password will be set later by the Ansible roles.
redfish: yes # Can system BMC support Redfish?
bios_settings: # An optional listing of BIOS settings to alter before bootstrap
BootMode: "Uefi" # NOTE: Must be valid Redfish BIOS options or will be ignored.
SecureBoot: "Disabled"
disks: # Disk definitions (see explanations above)
system: # List of system disk(s)
2021-12-19 20:37:32 -05:00
- "detect:Internal SD-CARD:0:64GB"
data: # List of data disks for OSDs
- "/dev/disk/by-path/pci-0000:01:00.0-sas-phy0-lun-0"
- "/dev/disk/by-path/pci-0000:01:00.0-sas-phy4-lun-0"
osd_db: # List of OSD database disk (maximum one)
- "/dev/nvme0n1"
# Second node
"d8:d7:d6:d5:d4:d2":
node:
hostname: hv2
config:
bmc:
username: Administrator
password: SuperSecret
redfish: yes
disks:
system:
- "detect:DELLBOSS:0:240GB"
data:
- "detect:INTEL:0:960GB"
- "detect:INTEL:1:960GB"
# Third node
"d8:d7:d6:d5:d4:d3":
node:
hostname: hv3
config:
bmc:
username: Administrator
password: SuperSecret
redfish: no
disks:
system:
- "/dev/sda"
data:
- "/dev/nvme0n1"
- "/dev/nvme0n2"
#
# Hooks
#
# Hooks are sets of tasks (commands, etc.) to be run against the cluster once the bootstrap Ansible run has
# been completed. These are useful to perform post-Ansible setup tasks, for instance creating storage OSDs and
# pools, deploying VMs, running hooks, executing webhooks, etc.
#
# Each hook can define which host(s) it should be run on or against depending on the handler.
# There are several fixed, defined hooks which are known to the cluster, while all other hooks must be defined
# with their full specifications.
#
# The hook type can be one of the defined hooks, "pvc", "script", "python", or "webhook".
# * "pvc" hooks use the PVC CLI on one node to execute the desired command, without the leading "pvc".
# * "script" hooks are a set of shell commands that will be run in a ROOT shell on the given node(s).
# * "python" hooks are a set of Python commands that will be run in a ROOT interpreter on the given node(s).
# * "webhook" hooks send POST events with the specified body to the specified URL from pvcprovisionerd itself.
#
# Hooks are always executed in the order they are presented.
hooks:
- name: install virtual router
type: script
node: hv1
action: |
pvc provisioner profile list