456 lines
11 KiB
YAML
456 lines
11 KiB
YAML
---
|
|
# PVC system configuration - example file
|
|
#
|
|
# This configuration file defines the details of a PVC cluster.
|
|
# It is used by several daemons on the system, including pvcnoded, pvcapid, pvcworkerd, and pvchealthd.
|
|
#
|
|
# This file will normally be written by the PVC Ansible framework; this example is provided for reference
|
|
|
|
# Paths configuration
|
|
path:
|
|
|
|
# Plugin directory
|
|
plugin_directory: "/usr/share/pvc/plugins"
|
|
|
|
# Dynamic directory
|
|
dynamic_directory: "/run/pvc"
|
|
|
|
# System log directory
|
|
system_log_directory: "/var/log/pvc"
|
|
|
|
# VM Console log directory (set by Libvirt)
|
|
console_log_directory: "/var/log/libvirt"
|
|
|
|
# Ceph configuration directory (set by Ceph/Ansible)
|
|
ceph_directory: "/etc/ceph"
|
|
|
|
# Subsystem configuration
|
|
# Changing these values can be used to turn on or off various parts of PVC
|
|
# Normally, all should be enabled ("yes") except in very custom clusters
|
|
subsystem:
|
|
|
|
# Enable or disable hypervisor functionality
|
|
enable_hypervisor: yes
|
|
|
|
# Enable or disable virtual networking and routing functionality
|
|
enable_networking: yes
|
|
|
|
# Enable or disable Ceph storage management functionality
|
|
enable_storage: yes
|
|
|
|
# Enable or disable the worker client
|
|
enable_worker: yes
|
|
|
|
# Enable or disable the API client, if installed, when node is Primary
|
|
enable_api: yes
|
|
|
|
# Enable or disable the Prometheus metrics endpoints in the API; if disabled, these return 404
|
|
enable_prometheus: yes
|
|
|
|
# Cluster configuration
|
|
cluster:
|
|
|
|
# The name of the cluster
|
|
name: pvc1
|
|
|
|
# The full list of nodes in this cluster
|
|
all_nodes:
|
|
- pvchv1
|
|
- pvchv2
|
|
- pvchv3
|
|
|
|
# The list of coorrdinator nodes in this cluster (subset of nodes)
|
|
coordinator_nodes:
|
|
- pvchv1
|
|
- pvchv2
|
|
- pvchv3
|
|
|
|
# Hardcoded networks (upstream/cluster/storage)
|
|
networks:
|
|
|
|
# Upstream network, used for inbound and outbound connectivity, API management, etc.
|
|
upstream:
|
|
|
|
# Domain name
|
|
domain: "mydomain.net"
|
|
|
|
# Device
|
|
device: ens4
|
|
|
|
# MTU
|
|
mtu: 1500
|
|
|
|
# IPv4 configuration
|
|
ipv4:
|
|
|
|
# CIDR netmask
|
|
netmask: 24
|
|
|
|
# Network address
|
|
network_address: 10.0.0.0
|
|
|
|
# Floating address
|
|
floating_address: 10.0.0.250
|
|
|
|
# Upstream/default gateway address
|
|
gateway_address: 10.0.0.254
|
|
|
|
# Node IP selection mechanism (either "by-id", or a static IP, no netmask, in the above network)
|
|
node_ip_selection: by-id
|
|
|
|
# Cluster network, used for inter-node communication (VM- and Network-layer), unrouted
|
|
cluster:
|
|
|
|
# Domain name
|
|
domain: "pvc.local"
|
|
|
|
# Device
|
|
device: ens4
|
|
|
|
# MTU
|
|
mtu: 1500
|
|
|
|
# IPv4 configuration
|
|
ipv4:
|
|
|
|
# CIDR netmask
|
|
netmask: 24
|
|
|
|
# Network address
|
|
network_address: 10.0.1.0
|
|
|
|
# Floating address
|
|
floating_address: 10.0.1.250
|
|
|
|
# Node IP selection mechanism (either "by-id", or a static IP, no netmask, in the above network)
|
|
node_ip_selection: by-id
|
|
|
|
# Storage network, used for inter-node communication (Storage-layer), unrouted
|
|
storage:
|
|
|
|
# Domain name
|
|
domain: "storage.local"
|
|
|
|
# Device
|
|
device: ens4
|
|
|
|
# MTU
|
|
mtu: 1500
|
|
|
|
# IPv4 configuration
|
|
ipv4:
|
|
|
|
# CIDR netmask
|
|
netmask: 24
|
|
|
|
# Network address
|
|
network_address: 10.0.2.0
|
|
|
|
# Floating address
|
|
floating_address: 10.0.2.250
|
|
|
|
# Node IP selection mechanism (either "by-id", or a static IP, no netmask, in the above network)
|
|
node_ip_selection: by-id
|
|
|
|
# Database configuration
|
|
database:
|
|
|
|
# Zookeeper client configuration
|
|
zookeeper:
|
|
|
|
# Port number
|
|
port: 2181
|
|
|
|
# KeyDB/Redis client configuration
|
|
keydb:
|
|
|
|
# Port number
|
|
port: 6379
|
|
|
|
# Hostname; use `cluster` network floating IP address
|
|
hostname: 127.0.0.1
|
|
|
|
# Path, usually "/0"
|
|
path: "/0"
|
|
|
|
# PostgreSQL client configuration
|
|
postgres:
|
|
|
|
# Port number
|
|
port: 5432
|
|
|
|
# Hostname; use `cluster` network floating IP address
|
|
hostname: 127.0.0.1
|
|
|
|
# Credentials
|
|
credentials:
|
|
|
|
# API database
|
|
api:
|
|
|
|
# Database name
|
|
database: pvcapi
|
|
|
|
# Username
|
|
username: pvcapi
|
|
|
|
# Password
|
|
password: pvcapiPassw0rd
|
|
|
|
# DNS database
|
|
dns:
|
|
|
|
# Database name
|
|
database: pvcdns
|
|
|
|
# Username
|
|
username: pvcdns
|
|
|
|
# Password
|
|
password: pvcdnsPassw0rd
|
|
|
|
# Timer information
|
|
timer:
|
|
|
|
# VM shutdown timeout (seconds)
|
|
vm_shutdown_timeout: 180
|
|
|
|
# Node keepalive interval (seconds)
|
|
keepalive_interval: 5
|
|
|
|
# Monitoring interval (seconds)
|
|
monitoring_interval: 15
|
|
|
|
# Fencing configuration
|
|
fencing:
|
|
|
|
# Disable fencing or not on IPMI failure at startup
|
|
# Setting this value to "no" will allow fencing to be enabled even if does not respond during node daemon
|
|
# startup. This will allow future fencing to be attempted if it later recovers.
|
|
disable_on_ipmi_failure: no
|
|
|
|
# Fencing intervals
|
|
intervals:
|
|
|
|
# Fence intervals (number of keepalives)
|
|
fence_intervals: 6
|
|
|
|
# Suicide intervals (number of keepalives; 0 to disable)
|
|
suicide_intervals: 0
|
|
|
|
# Fencing actions
|
|
actions:
|
|
|
|
# Successful fence action ("migrate" or "none")
|
|
successful_fence: migrate
|
|
|
|
# Failed fence action ("migrate" or "none")
|
|
failed_fence: none
|
|
|
|
# IPMI details
|
|
ipmi:
|
|
|
|
# Hostname format; use a "{node_id}" entry for a template, or a literal hostname
|
|
hostname: "pvchv{node_id}-lom.mydomain.tld"
|
|
|
|
# IPMI username
|
|
username: admin
|
|
|
|
# IPMI password
|
|
password: S3cur3IPMIP4ssw0rd
|
|
|
|
|
|
# VM migration configuration
|
|
migration:
|
|
|
|
# Target selection default value (mem, memprov, load, vcpus, vms)
|
|
target_selector: mem
|
|
|
|
# Logging configuration
|
|
logging:
|
|
|
|
# Enable or disable debug logging (all services)
|
|
debug_logging: yes
|
|
|
|
# Enable or disable file logging
|
|
file_logging: no
|
|
|
|
# Enable or disable stdout logging (to journald)
|
|
stdout_logging: yes
|
|
|
|
# Enable or disable Zookeeper logging (for "pvc node log" functionality)
|
|
zookeeper_logging: yes
|
|
|
|
# Enable or disable ANSI colour sequences in logs
|
|
log_colours: yes
|
|
|
|
# Enable or disable dates in logs
|
|
log_dates: yes
|
|
|
|
# Enale or disable keepalive event logging
|
|
log_keepalives: yes
|
|
|
|
# Enable or disable cluster detail logging during keepalive events
|
|
log_cluster_details: yes
|
|
|
|
# Enable or disable monitoring detail logging during healthcheck events
|
|
log_monitoring_details: yes
|
|
|
|
# Number of VM console log lines to store in Zookeeper (per VM)
|
|
console_log_lines: 1000
|
|
|
|
# Number of node log lines to store in Zookeeper (per node)
|
|
node_log_lines: 2000
|
|
|
|
# Guest networking configuration
|
|
guest_networking:
|
|
|
|
# Bridge device for "bridged"-type networks
|
|
bridge_device: ens4
|
|
|
|
# Bridge device MTU
|
|
bridge_mtu: 1500
|
|
|
|
# Enable or disable SR-IOV functionality
|
|
sriov_enable: no
|
|
|
|
# SR-IOV configuration (list of PFs)
|
|
sriov_device:
|
|
|
|
# SR-IOV device; if this device isn't found, it is ignored on a given node
|
|
- device: ens1f1
|
|
|
|
# SR-IOV device MTU
|
|
mtu: 9000
|
|
|
|
# Number of VFs on this device
|
|
vfcount: 4
|
|
|
|
# Ceph configuration
|
|
ceph:
|
|
|
|
# Main config file name
|
|
ceph_config_file: "ceph.conf"
|
|
|
|
# Admin keyring file name
|
|
ceph_keyring_file: "ceph.client.admin.keyring"
|
|
|
|
# Monitor port, usually 6789
|
|
monitor_port: 6789
|
|
|
|
# Monitor host(s), enable only you want to use hosts other than the coordinators
|
|
#monitor_hosts:
|
|
# - pvchv1
|
|
# - pvchv2
|
|
# - pvchv3
|
|
|
|
# Storage secret UUID, generated during Ansible cluster bootstrap
|
|
secret_uuid: ""
|
|
|
|
# API configuration
|
|
api:
|
|
|
|
# API listening configuration
|
|
listen:
|
|
|
|
# Listen address, usually upstream floating IP
|
|
address: 10.0.0.250
|
|
|
|
# Listen port, usually 7370
|
|
port: 7370
|
|
|
|
# Authentication configuration
|
|
authentication:
|
|
|
|
# Enable or disable authentication
|
|
enabled: yes
|
|
|
|
# Secret key for API cookies (long and secure password or UUID)
|
|
secret_key: "1234567890abcdefghijklmnopqrstuvwxyz"
|
|
|
|
# Authentication source (token, others in future)
|
|
source: token
|
|
|
|
# Token configuration
|
|
token:
|
|
|
|
# A friendly description
|
|
- description: "testing"
|
|
|
|
# The token (long and secure password or UUID)
|
|
token: "1234567890abcdefghijklmnopqrstuvwxyz"
|
|
|
|
# SSL configuration
|
|
ssl:
|
|
|
|
# Enable or disable SSL operation
|
|
enabled: no
|
|
|
|
# Certificate file path
|
|
certificate: ""
|
|
|
|
# Private key file path
|
|
private_key: ""
|
|
|
|
# Automatic backups
|
|
autobackup:
|
|
|
|
# Backup root path on the node, used as the remote mountpoint
|
|
# Must be an absolute path beginning with '/'
|
|
# If remote_mount is enabled, the remote mount will be mounted on this directory
|
|
# If remote_mount is enabled, it is recommended to use a path under `/tmp` for this
|
|
# If remote_mount is disabled, a real filesystem must be mounted here (PVC system volumes are small!)
|
|
backup_root_path: "/tmp/backups"
|
|
|
|
# Suffix to the backup root path, used to allow multiple PVC systems to write to a single root path
|
|
# Must begin with '/'; leave empty to use the backup root path directly
|
|
# Note that most remote mount options can fake this if needed, but provided to ensure local compatability
|
|
backup_root_suffix: "/mycluster"
|
|
|
|
# VM tag(s) to back up
|
|
# Only VMs with at least one of the given tag(s) will be backed up; all others will be skipped
|
|
backup_tags:
|
|
- "backup"
|
|
- "mytag"
|
|
|
|
# Backup schedule: when and what format to take backups
|
|
backup_schedule:
|
|
|
|
full_interval: 7 # Number of total backups between full backups; others are incremental
|
|
# > If this number is 1, every backup will be a full backup and no incremental
|
|
# backups will be taken
|
|
# > If this number is 2, every second backup will be a full backup, etc.
|
|
|
|
full_retention: 2 # Keep this many full backups; the oldest will be deleted when a new one is
|
|
# taken, along with all child incremental backups of that backup
|
|
# > Should usually be at least 2 when using incrementals (full_interval > 1) to
|
|
# avoid there being too few backups after cleanup from a new full backup
|
|
|
|
# Automatic mount settings
|
|
# These settings permit running an arbitrary set of commands, ideally a "mount" command or similar, to
|
|
# ensure that a remote filesystem is mounted on the backup root path
|
|
# While the examples here show absolute paths, that is not required; they will run with the $PATH of the
|
|
# executing environment (either the "pvc" command on a CLI or a cron/systemd timer)
|
|
# A "{backup_root_path}" f-string/str.format type variable MAY be present in any cmds string to represent
|
|
# the above configured root backup path, which is interpolated at runtime
|
|
# If multiple commands are given, they will be executed in the order given; if no commands are given,
|
|
# nothing is executed, but the keys MUST be present
|
|
auto_mount:
|
|
|
|
enabled: no # Enable automatic mount/unmount support
|
|
|
|
# These commands are executed at the start of the backup run and should mount a filesystem
|
|
mount_cmds:
|
|
|
|
# This example shows an NFS mount leveraging the backup_root_path variable
|
|
- "/usr/sbin/mount.nfs -o nfsvers=3 10.0.0.10:/backups {backup_root_path}"
|
|
|
|
# These commands are executed at the end of the backup run and should unmount a filesystem
|
|
unmount_cmds:
|
|
|
|
# This example shows a generic umount leveraging the backup_root_path variable
|
|
- "/usr/bin/umount {backup_root_path}"
|
|
|
|
# VIM modeline, requires "set modeline" in your VIMRC
|
|
# vim: expandtab shiftwidth=2 tabstop=2 filetype=yaml
|