Move to unified pvc.conf configuration file
This commit is contained in:
@ -1,216 +0,0 @@
|
||||
---
|
||||
# pvcnoded configuration file example
|
||||
#
|
||||
# This configuration file specifies details for this node in PVC. Multiple node
|
||||
# blocks can be added but only the one matching the current system nodename will
|
||||
# be used by the local daemon. Default values are not supported; the values in
|
||||
# this sample configuration are considered defaults and, with adjustment of the
|
||||
# nodename section and coordinators list, can be used as-is on a Debian system.
|
||||
#
|
||||
# Copy this example to /etc/pvc/pvcnoded.conf and edit to your needs
|
||||
|
||||
pvc:
|
||||
# node: The (short) hostname of the node, set during provisioning
|
||||
node: pvchv1
|
||||
# debug: Enable or disable debug output
|
||||
debug: False
|
||||
# functions: The daemon functions to enable
|
||||
functions:
|
||||
# enable_hypervisor: Enable or disable hypervisor functionality
|
||||
# This should never be False except in very advanced usecases
|
||||
enable_hypervisor: True
|
||||
# enable_networking: Enable or disable virtual networking and routing functionality
|
||||
enable_networking: True
|
||||
# enable_storage: Enable or disable Ceph storage management functionality
|
||||
enable_storage: True
|
||||
# enable_api: Enable or disable the API client, if installed, when node is Primary
|
||||
enable_api: True
|
||||
# cluster: Cluster-level configuration
|
||||
cluster:
|
||||
# coordinators: The list of cluster coordinator hostnames
|
||||
coordinators:
|
||||
- pvchv1
|
||||
- pvchv2
|
||||
- pvchv3
|
||||
# networks: Cluster-level network configuration
|
||||
# OPTIONAL if enable_networking: False
|
||||
networks:
|
||||
# upstream: Upstream routed network for in- and out-bound upstream networking
|
||||
upstream:
|
||||
# domain: Upstream domain name, may be None
|
||||
domain: "mydomain.net"
|
||||
# network: Upstream network block
|
||||
network: "1.1.1.0/24"
|
||||
# floating_ip: Upstream floating IP address for the primary coordinator
|
||||
floating_ip: "1.1.1.10/24"
|
||||
# gateway: Upstream static default gateway, if applicable
|
||||
gateway: "1.1.1.1"
|
||||
# cluster: Cluster internal network for node communication and client virtual networks
|
||||
cluster:
|
||||
# domain: Cluster internal domain name
|
||||
domain: "pvc.local"
|
||||
# network: Cluster internal network block
|
||||
network: "10.255.0.0/24"
|
||||
# floating_ip: Cluster internal floating IP address for the primary coordinator
|
||||
floating_ip: "10.255.0.254/24"
|
||||
# storage: Cluster internal network for storage traffic
|
||||
storage:
|
||||
# domain: Cluster storage domain name
|
||||
domain: "pvc.storage"
|
||||
# network: Cluster storage network block
|
||||
network: "10.254.0.0/24"
|
||||
# floating_ip: Cluster storage floating IP address for the primary coordinator
|
||||
floating_ip: "10.254.0.254/24"
|
||||
# coordinator: Coordinator-specific configuration
|
||||
# OPTIONAL if enable_networking: False
|
||||
coordinator:
|
||||
# dns: DNS aggregator subsystem
|
||||
dns:
|
||||
# database: Patroni PostgreSQL database configuration
|
||||
database:
|
||||
# host: PostgreSQL hostname, invariably 'localhost'
|
||||
host: localhost
|
||||
# port: PostgreSQL port, invariably 'localhost'
|
||||
port: 5432
|
||||
# name: PostgreSQL database name, invariably 'pvcdns'
|
||||
name: pvcdns
|
||||
# user: PostgreSQL username, invariable 'pvcdns'
|
||||
user: pvcdns
|
||||
# pass: PostgreSQL user password, randomly generated
|
||||
pass: pvcdns
|
||||
# metadata: Metadata API subsystem
|
||||
metadata:
|
||||
# database: Patroni PostgreSQL database configuration
|
||||
database:
|
||||
# host: PostgreSQL hostname, invariably 'localhost'
|
||||
host: localhost
|
||||
# port: PostgreSQL port, invariably 'localhost'
|
||||
port: 5432
|
||||
# name: PostgreSQL database name, invariably 'pvcapi'
|
||||
name: pvcapi
|
||||
# user: PostgreSQL username, invariable 'pvcapi'
|
||||
user: pvcapi
|
||||
# pass: PostgreSQL user password, randomly generated
|
||||
pass: pvcapi
|
||||
# system: Local PVC instance configuration
|
||||
system:
|
||||
# intervals: Intervals for keepalives and fencing
|
||||
intervals:
|
||||
# vm_shutdown_timeout: Number of seconds for a VM to 'shutdown' before being forced off
|
||||
vm_shutdown_timeout: 180
|
||||
# keepalive_interval: Number of seconds between keepalive/status updates
|
||||
keepalive_interval: 5
|
||||
# monitoring_interval: Number of seconds between monitoring check updates
|
||||
monitoring_interval: 60
|
||||
# fence_intervals: Number of keepalive_intervals to declare a node dead and fence it
|
||||
fence_intervals: 6
|
||||
# suicide_intervals: Numer of keepalive_intervals before a node considers itself dead and self-fences, 0 to disable
|
||||
suicide_intervals: 0
|
||||
# fencing: Node fencing configuration
|
||||
fencing:
|
||||
# actions: Actions to take after a fence trigger
|
||||
actions:
|
||||
# successful_fence: Action to take after successfully fencing a node, options: migrate, None
|
||||
successful_fence: migrate
|
||||
# failed_fence: Action to take after failing to fence a node, options: migrate, None
|
||||
failed_fence: None
|
||||
# ipmi: Local system IPMI options
|
||||
ipmi:
|
||||
# host: Hostname/IP of the local system's IPMI interface, must be reachable
|
||||
host: pvchv1-lom
|
||||
# user: Local system IPMI username
|
||||
user: admin
|
||||
# pass: Local system IPMI password
|
||||
pass: Passw0rd
|
||||
# migration: Migration option configuration
|
||||
migration:
|
||||
# target_selector: Criteria to select the ideal migration target, options: mem, memprov, load, vcpus, vms
|
||||
target_selector: mem
|
||||
# configuration: Local system configurations
|
||||
configuration:
|
||||
# directories: PVC system directories
|
||||
directories:
|
||||
# plugin_directory: Directory containing node monitoring plugins
|
||||
plugin_directory: "/usr/share/pvc/plugins"
|
||||
# dynamic_directory: Temporary in-memory directory for active configurations
|
||||
dynamic_directory: "/run/pvc"
|
||||
# log_directory: Logging directory
|
||||
log_directory: "/var/log/pvc"
|
||||
# console_log_directory: Libvirt console logging directory
|
||||
console_log_directory: "/var/log/libvirt"
|
||||
# logging: PVC logging configuration
|
||||
logging:
|
||||
# file_logging: Enable or disable logging to files under log_directory
|
||||
file_logging: True
|
||||
# stdout_logging: Enable or disable logging to stdout (i.e. journald)
|
||||
stdout_logging: True
|
||||
# zookeeper_logging: Enable ot disable logging to Zookeeper (for `pvc node log` functionality)
|
||||
zookeeper_logging: True
|
||||
# log_colours: Enable or disable ANSI colours in log output
|
||||
log_colours: True
|
||||
# log_dates: Enable or disable date strings in log output
|
||||
log_dates: True
|
||||
# log_keepalives: Enable or disable keepalive logging
|
||||
log_keepalives: True
|
||||
# log_keepalive_cluster_details: Enable or disable node status logging during keepalive
|
||||
log_keepalive_cluster_details: True
|
||||
# log_keepalive_plugin_details: Enable or disable node health plugin logging during keepalive
|
||||
log_keepalive_plugin_details: True
|
||||
# console_log_lines: Number of console log lines to store in Zookeeper per VM
|
||||
console_log_lines: 1000
|
||||
# node_log_lines: Number of node log lines to store in Zookeeper per node
|
||||
node_log_lines: 2000
|
||||
# networking: PVC networking configuration
|
||||
# OPTIONAL if enable_networking: False
|
||||
networking:
|
||||
# bridge_device: Underlying device to use for bridged vLAN networks; usually the device of <cluster>
|
||||
bridge_device: ens4
|
||||
# bridge_mtu: The MTU of the underlying device used for bridged vLAN networks, and thus the maximum
|
||||
# MTU of the overlying bridge devices.
|
||||
bridge_mtu: 1500
|
||||
# sriov_enable: Enable or disable (default if absent) SR-IOV network support
|
||||
sriov_enable: False
|
||||
# sriov_device: Underlying device(s) to use for SR-IOV networks; can be bridge_device or other NIC(s)
|
||||
sriov_device:
|
||||
# The physical device name
|
||||
- phy: ens1f1
|
||||
# The preferred MTU of the physical device; OPTIONAL - defaults to the interface default if unset
|
||||
mtu: 9000
|
||||
# The number of VFs to enable on this device
|
||||
# NOTE: This defines the maximum number of VMs which can be provisioned on this physical device; VMs
|
||||
# are allocated to these VFs manually by the administrator and thus all nodes should have the
|
||||
# same number
|
||||
# NOTE: This value cannot be changed at runtime on Intel(R) NICs; the node will need to be restarted
|
||||
# if this value changes
|
||||
vfcount: 8
|
||||
# upstream: Upstream physical interface device
|
||||
upstream:
|
||||
# device: Upstream interface device name
|
||||
device: ens4
|
||||
# mtu: Upstream interface MTU; use 9000 for jumbo frames (requires switch support)
|
||||
mtu: 1500
|
||||
# address: Upstream interface IP address, options: by-id, <static>/<mask>
|
||||
address: by-id
|
||||
# cluster: Cluster (VNIC) physical interface device
|
||||
cluster:
|
||||
# device: Cluster (VNIC) interface device name
|
||||
device: ens4
|
||||
# mtu: Cluster (VNIC) interface MTU; use 9000 for jumbo frames (requires switch support)
|
||||
mtu: 1500
|
||||
# address: Cluster (VNIC) interface IP address, options: by-id, <static>/<mask>
|
||||
address: by-id
|
||||
# storage: Storage (Ceph OSD) physical interface device
|
||||
storage:
|
||||
# device: Storage (Ceph OSD) interface device name
|
||||
device: ens4
|
||||
# mtu: Storage (Ceph OSD) interface MTU; use 9000 for jumbo frames (requires switch support)
|
||||
mtu: 1500
|
||||
# address: Storage (Ceph OSD) interface IP address, options: by-id, <static>/<mask>
|
||||
address: by-id
|
||||
# storage; PVC storage configuration
|
||||
# OPTIONAL if enable_storage: False
|
||||
storage:
|
||||
# ceph_config_file: The config file containing the Ceph cluster configuration
|
||||
ceph_config_file: "/etc/ceph/ceph.conf"
|
||||
# ceph_admin_keyring: The file containing the Ceph client admin keyring
|
||||
ceph_admin_keyring: "/etc/ceph/ceph.client.admin.keyring"
|
Reference in New Issue
Block a user