pvc/node-daemon/pvcd.sample.yaml

160 lines
7.0 KiB
YAML
Raw Normal View History

2019-07-06 02:24:28 -04:00
---
2019-03-10 20:40:45 -04:00
# pvcd cluster configuration file example
#
# This configuration file specifies details for this node in PVC. Multiple node
# blocks can be added but only the one matching the current system nodename will
# be used by the local daemon. Default values are not supported; the values in
# this sample configuration are considered defaults and, with adjustment of the
# nodename section and coordinators list, can be used as-is on a Debian system.
#
# Copy this example to /etc/pvc/pvcd.conf and edit to your needs
pvc:
# node: The (short) hostname of the node, set during provisioning
node: pvc-hv1
# functions: The daemon functions to enable
functions:
# enable_hypervisor: Enable or disable hypervisor functionality
# This should never be False except in very advanced usecases
enable_hypervisor: True
# enable_networking: Enable or disable virtual networking and routing functionality
enable_networking: True
# enable_storage: Enable or disable Ceph storage management functionality
enable_storage: True
# enable_api: Enable or disable the API client, if installed, when node is Primary
enable_api: True
2019-03-10 20:40:45 -04:00
# cluster: Cluster-level configuration
cluster:
# coordinators: The list of cluster coordinator hostnames
coordinators:
- pvc-hv1
- pvc-hv2
- pvc-hv3
# networks: Cluster-level network configuration
# OPTIONAL if enable_networking: False
2019-03-10 20:40:45 -04:00
networks:
# upstream: Upstream routed network for in- and out-bound upstream networking
upstream:
# domain: Upstream domain name, may be None
domain: "mydomain.net"
# network: Upstream network block
network: "1.1.1.0/24"
# floating_ip: Upstream floating IP address for the primary coordinator
floating_ip: "1.1.1.10/24"
# gateway: Upstream static default gateway, if applicable
gateway: "1.1.1.1"
2019-03-10 20:40:45 -04:00
# cluster: Cluster internal network for node communication and client virtual networks
cluster:
# domain: Cluster internal domain name
domain: "pvc.local"
# network: Cluster internal network block
network: "10.255.0.0/24"
# floating_ip: Cluster internal floating IP address for the primary coordinator
floating_ip: "10.255.0.254/24"
# storage: Cluster internal network for storage traffic
storage:
# domain: Cluster storage domain name
domain: "pvc.storage"
# network: Cluster storage network block
network: "10.254.0.0/24"
# floating_ip: Cluster storage floating IP address for the primary coordinator
floating_ip: "10.254.0.254/24"
2019-03-10 20:40:45 -04:00
# coordinator: Coordinator-specific configuration
# OPTIONAL if enable_networking: False
2019-03-10 20:40:45 -04:00
coordinator:
# dns: DNS aggregator subsystem
dns:
# database: Patroni PostgreSQL database configuration
2019-03-10 20:40:45 -04:00
database:
# host: PostgreSQL hostname, invariably 'localhost'
2019-03-10 20:40:45 -04:00
host: localhost
# port: PostgreSQL port, invariably 'localhost'
port: 5432
# name: PostgreSQL database name, invariably 'pvcdns'
2019-03-10 20:40:45 -04:00
name: pvcdns
# user: PostgreSQL username, invariable 'pvcdns'
2019-03-10 20:40:45 -04:00
user: pvcdns
# pass: PostgreSQL user password, randomly generated
2019-03-10 20:40:45 -04:00
pass: pvcdns
# system: Local PVC instance configuration
system:
# fencing: Node fencing configuration
fencing:
# intervals: Intervals for fencing determination
intervals:
# keepalive_interval: Number of seconds between keepalive/status updates
keepalive_interval: 5
# fence_intervals: Number of keepalive_intervals to declare a node dead and fence it
fence_intervals: 6
# suicide_intervals: Numer of keepalive_intervals before a node considers itself dead and self-fences, 0 to disable
suicide_intervals: 0
# actions: Actions to take after a fence trigger
actions:
# successful_fence: Action to take after successfully fencing a node, options: migrate, None
successful_fence: migrate
# failed_fence: Action to take after failing to fence a node, options: migrate, None
failed_fence: None
# ipmi: Local system IPMI options
ipmi:
# host: Hostname/IP of the local system's IPMI interface, must be reachable
host: pvc-hv1-lom
# user: Local system IPMI username
user: admin
# pass: Local system IPMI password
pass: Passw0rd
# migration: Migration option configuration
migration:
# target_selector: Criteria to select the ideal migration target, options: mem, load, vcpus, vms
target_selector: mem
# configuration: Local system configurations
configuration:
# directories: PVC system directories
directories:
# dynamic_directory: Temporary in-memory directory for active configurations
dynamic_directory: "/run/pvc"
# log_directory: Logging directory
log_directory: "/var/log/pvc"
# console_log_directory: Libvirt console logging directory
console_log_directory: "/var/log/libvirt"
2019-03-10 20:40:45 -04:00
# logging: PVC logging configuration
logging:
# file_logging: Enable or disable logging to files under log_directory
file_logging: True
# stdout_logging: Enable or disable logging to stdout (i.e. journald)
stdout_logging: True
2019-06-18 12:44:07 -04:00
# log_keepalives: Enable or disable keepalive logging
log_keepalives: True
# log_keepalive_cluster_details: Enable or disable node status logging during keepalive
log_keepalive_cluster_details: True
# log_keepalive_storage_details: Enable or disable node storage logging during keepalive
log_keepalive_storage_details: True
# console_log_lines: Number of console log lines to store in Zookeeper per VM
console_log_lines: 1000
2019-03-10 20:40:45 -04:00
# networking: PVC networking configuration
# OPTIONAL if enable_networking: False
2019-03-10 20:40:45 -04:00
networking:
# upstream: Upstream physical interface device
upstream:
# device: Upstream interface device name
device: ens4
# mtu: Upstream interface MTU; use 9000 for jumbo frames (requires switch support)
mtu: 1500
# address: Upstream interface IP address, options: None, by-id, <static>/<mask>
address: None
# cluster: Cluster (VNIC) physical interface device
cluster:
# device: Cluster (VNIC) interface device name
device: ens4
# mtu: Cluster (VNIC) interface MTU; use 9000 for jumbo frames (requires switch support)
mtu: 1500
# address: Cluster (VNIC) interface IP address, options: None, by-id, <static>/<mask>
address: by-id
# storage: Storage (Ceph OSD) physical interface device
storage:
# device: Storage (Ceph OSD) interface device name
device: ens4
# mtu: Storage (Ceph OSD) interface MTU; use 9000 for jumbo frames (requires switch support)
mtu: 1500
# address: Storage (Ceph OSD) interface IP address, options: None, by-id, <static>/<mask>
address: by-id