2019-06-18 12:59:36 -04:00
---
2020-08-20 21:39:44 -04:00
# Logging configuration (uncomment to override defaults)
2020-08-20 21:42:44 -04:00
# These default options are generally best for most clusters; override these if you want more granular
# control over the logging output of the PVC system.
2020-08-20 21:39:44 -04:00
#pvc_log_to_file: False # Log to a file in /var/log/pvc
#pvc_log_to_stdout: True # Log to stdout (i.e. journald)
2021-07-18 17:42:00 -04:00
#pvc_log_to_zookeeper: True # Log to Zookeeper (required for 'node log' commands)
2020-08-20 21:39:44 -04:00
#pvc_log_colours: True # Log colourful prompts for states instead of text
#pvc_log_dates: False # Log dates (useful with log_to_file, not useful with log_to_stdout as journald adds these)
#pvc_log_keepalives: True # Log keepalive event every pvc_keepalive_interval seconds
#pvc_log_keepalive_cluster_details: True # Log cluster details (VMs, load, etc.) duing keepalive events
#pvc_log_keepalive_storage_details: True # Log storage details (OSDs, pools, health) during keepalive events
#pvc_log_console_lines: 1000 # The number of VM console log lines to store in Zookeeper for 'vm log' commands.
2021-07-18 17:42:00 -04:00
#pvc_log_node_lines: 2000 # The number of node log lines to store in Zookeeper for 'node log' commands.
2020-08-20 21:39:44 -04:00
# Timing and fencing configuration (uncomment to override defaults)
2020-08-20 21:42:44 -04:00
# These default options are generally best for most clusters; override these if you want more granular
# control over the timings of various areas of the cluster, for instance if your hardware is slow or error-prone.
2020-08-20 21:39:44 -04:00
#pvc_vm_shutdown_timeout: 180 # Number of seconds before a 'shutdown' VM is forced off
#pvc_keepalive_interval: 5 # Number of seconds between keepalive ticks
#pvc_fence_intervals: 6 # Number of keepalive ticks before a node is considered dead
#pvc_suicide_intervals: 0 # Number of keepalive ticks before a node consideres itself dead (0 to disable)
#pvc_fence_successful_action: migrate # What to do with VMs when a fence is successful (migrate, None)
#pvc_fence_failed_action: None # What to do with VMs when a fence is failed (migrate, None) - migrate is DANGEROUS without pvc_suicide_intervals set to < pvc_fence_intervals
#pvc_fence_migrate_target_selector: mem # The selector to use for migrating VMs after a fence
2019-07-04 11:19:24 -04:00
2020-08-26 14:08:58 -04:00
# Client API basic configuration
2019-12-24 09:07:11 -05:00
pvc_api_listen_address : "{{ pvc_upstream_floatingip }}"
2019-07-06 02:49:25 -04:00
pvc_api_listen_port : "7370"
2020-08-26 14:08:58 -04:00
pvc_api_secret_key : "" # Use pwgen to generate
# Client API user tokens
# Create a token (random UUID or password) for each user you wish to have access to the PVC API.
# The first token will always be used for the "local" connection, and thus at least one token MUST be defined.
2019-12-24 09:07:11 -05:00
pvc_api_enable_authentication : True
2019-07-08 11:33:14 -04:00
pvc_api_tokens :
2021-09-26 14:49:22 -04:00
# - description: "myuser"
# token: "a3945326-d36c-4024-83b3-2a8931d7785a"
2020-08-26 14:08:58 -04:00
# PVC API SSL configuration
# Use these options to enable SSL for the API listener, providing security over WAN connections.
# There are two options for defining the SSL certificate and key to use:
# a) Set both pvc_api_ssl_cert_path and pvc_api_ssl_key_path to paths to an existing SSL combined (CA + cert) certificate and key, respectively, on the system.
# b) Set both pvc_api_ssl_cert and pvc_api_ssl_key to the raw PEM-encoded contents of an SSL combined (CA + cert) certificate and key, respectively, which will be installed under /etc/pvc.
# If the _path options are non-empty, the raw entries are ignored and will not be used.
2019-07-08 11:33:14 -04:00
pvc_api_enable_ssl : False
2020-08-26 14:08:58 -04:00
pvc_api_ssl_cert_path :
2019-07-08 11:33:14 -04:00
pvc_api_ssl_cert : >
# A RAW CERTIFICATE FILE, installed to /etc/pvc/api-cert.pem
2020-08-26 14:08:58 -04:00
pvc_api_ssl_key_path :
2019-07-08 11:33:14 -04:00
pvc_api_ssl_key : >
# A RAW KEY FILE, installed to /etc/pvc/api-key.pem
2019-07-06 02:49:25 -04:00
2019-07-08 11:33:14 -04:00
# Ceph storage configuration
2019-06-18 12:59:36 -04:00
pvc_ceph_storage_secret_uuid : "" # Use uuidgen to generate
2019-07-04 11:19:24 -04:00
2019-07-08 11:33:14 -04:00
# Database configuration
2019-06-18 12:59:36 -04:00
pvc_dns_database_name : "pvcdns"
pvc_dns_database_user : "pvcdns"
pvc_dns_database_password : "" # Use pwgen to generate
2020-08-25 13:01:48 -04:00
pvc_api_database_name : "pvcapi"
pvc_api_database_user : "pvcapi"
pvc_api_database_password : "" # Use pwgen to generate
2019-06-18 12:59:36 -04:00
pvc_replication_database_user : "replicator"
pvc_replication_database_password : "" # Use pwgen to generate
pvc_superuser_database_user : "postgres"
pvc_superuser_database_password : "" # Use pwgen to generate
2019-07-04 11:19:24 -04:00
# Network routing configuration
# > The ASN should be a private ASN number.
# > The list of routers are those which will learn routes to the PVC client networks via BGP;
# they should speak BGP and allow sessions from the PVC nodes.
pvc_asn : "65500"
pvc_routers :
- "192.168.100.1"
2021-09-26 14:49:22 -04:00
# PVC Node list
2019-07-04 11:19:24 -04:00
# > Every node configured with this playbook must be specified in this list.
2019-06-18 12:59:36 -04:00
pvc_nodes :
2020-08-20 21:39:44 -04:00
- hostname : "pvchv1" # This name MUST match the Ansible inventory_hostname
2019-06-18 12:59:36 -04:00
is_coordinator : yes
node_id : 1
router_id : "192.168.100.11"
upstream_ip : "192.168.100.11"
cluster_ip : "10.0.0.1"
storage_ip : "10.0.1.1"
2021-06-06 00:53:14 -04:00
ipmi_host : "{{ ipmi['hosts']['pvchv1']['hostname'] }}" # Note the node inventory hostname key in here
2021-06-05 01:30:16 -04:00
ipmi_user : "{{ ipmi['users']['pvc']['username'] }}"
ipmi_password : "{{ ipmi['users']['pvc']['password'] }}"
2019-06-18 12:59:36 -04:00
- hostname : "pvchv2"
is_coordinator : yes
node_id : 2
router_id : "192.168.100.12"
upstream_ip : "192.168.100.12"
cluster_ip : "10.0.0.2"
storage_ip : "10.0.1.2"
2021-06-06 00:53:14 -04:00
ipmi_host : "{{ ipmi['hosts']['pvchv2']['hostname'] }}" # Note the node inventory hostname key in here
2021-06-05 01:30:16 -04:00
ipmi_user : "{{ ipmi['users']['pvc']['username'] }}"
ipmi_password : "{{ ipmi['users']['pvc']['password'] }}"
2019-06-18 12:59:36 -04:00
- hostname : "pvchv3"
is_coordinator : yes
node_id : 3
router_id : "192.168.100.13"
upstream_ip : "192.168.100.13"
cluster_ip : "10.0.0.3"
storage_ip : "10.0.1.3"
2021-06-06 00:53:14 -04:00
ipmi_host : "{{ ipmi['hosts']['pvchv3']['hostname'] }}" # Note the node inventory hostname key in here
2021-06-05 01:30:16 -04:00
ipmi_user : "{{ ipmi['users']['pvc']['username'] }}"
ipmi_password : "{{ ipmi['users']['pvc']['password'] }}"
2019-07-04 11:19:24 -04:00
2020-01-06 14:35:25 -05:00
# Bridge device entry
# This device is passed to PVC and is used when creating bridged networks. Normal managed networks are
# created on top of the "cluster" interface defined below, however bridged networks must be created
# directly on an underlying non-vLAN network device. This can be the same underlying device as the
# upstream/cluster/storage networks (especially if the upstream network device is not a vLAN itself),
# or a different device separate from the other 3 main networks.
pvc_bridge_device : bondU # Replace based on your network configuration
2021-06-22 03:47:03 -04:00
# SR-IOV device configuration
# SR-IOV enables the passing of hardware-virtualized network devices (VFs), created on top of SR-IOV-enabled
# physical NICs (PFs), into virtual machines. SR-IOV is a complex topic, and will not be discussed in detail
# here. Instead, the SR-IOV mode is disabled by default and a commented out example configuration is shown.
pvc_sriov_enable : False
#pvc_sriov_device:
# - phy: ens1f0
# mtu: 9000
# vfcount: 6
2021-09-26 14:49:22 -04:00
# Memory tuning
# > ADVANCED TUNING: For most users, this is unnecessary and PVC will run fine with the default memory
# allocations. Uncomment these options only low-memory situations (nodes with <32GB RAM).
#
# OSD memory limit - 939524096 (~900MB) is the lowest possible value; default is 4GB.
# > This option is *only* applied at cluster bootstrap and cannot be changed later
# here, only by editing the `files/ceph/<cluster>/ceph.conf` file directly.
#pvc_osd_memory_limit: 939524096
#
# Zookeeper heap memory limit, sets Xms and Xmx values to the Java process; default is 512M.
# > WARNING: Unless you have an extremely limited amount of RAM, changing this setting is NOT RECOMMENDED.
# Lowering the heap limit may cause poor performance or crashes in Zookeeper during some tasks.
#pvc_zookeeper_heap_limit: 128M # 1/4 of default
#
# Zookeeper stack memory limit, sets Xss value to the Java process; default is 1024M.
# > WARNING: Unless you have an extremely limited amount of RAM, changing this setting is NOT RECOMMENDED.
# Lowering the stack limit may cause poor performance or crashes in Zookeeper during some tasks.
#pvc_zookeeper_stack_limit: 256M # 1/4 of default
# CPU pinning configuration via cset
# > ADVANCED TUNING: For most users, this is unnecessary and PVC will run fine with the default scheduling.
2021-09-29 20:48:40 -04:00
# Uncomment these options only for testing or if you are certain you meet the following conditions.
# > These options will tune cpuset (installed by default) to limit Ceph OSDs to certain CPU cores, while
# simultaneously limiting other system tasks and VMs to the remaining CPU cores. In effect it dedicates the
# specified CPU cores to Ceph OSDs only to ensure those processes can have dedicated CPU time.
# > Generally speaking, except in cases where extremely high random read throughput is required and in which
# the node(s) have a very large number of physical cores, this setting will not improve performance, and
# may in fact hurt performance. For more details please see the documentation.
# > For optimal performance when using this setting, you should dedicate exactly 2 cores, and their
# respective SMT threads if applicable, to each OSD. For instance, with 2 OSDs, 4 real cores (and their
# corresponding SMT threads if applicable) should be specified. More cores has been seen to, in some cases
# drop performance further. For more details please see the documentation.
# > Use the 'virsh capabilities' command to confim the exact CPU IDs (and SMT "siblings") for these lists.
2021-09-26 14:49:22 -04:00
#
pvc_shield_osds_enable : False
#pvc_shield_osds_cset:
# # This example host has 2x 6-core SMT-enabled CPUs; we want to use cores 0 (+SMT 12) and 2 (+SMT 14), which are
2021-09-29 20:48:40 -04:00
# # both on physical CPU 0, for 1x OSD.
2021-09-26 14:49:22 -04:00
# - hostname: pvchv1
# osd_cset:
# - 0
# - 2
# - 12
# - 14
2021-09-29 20:48:40 -04:00
# # These example hosts have 1x 8-core SMT-enabled CPUs; we want to use cores 0 (+SMT 8) and 1 (+SMT 9) for 1x OSD.
2021-09-26 14:49:22 -04:00
# - hostname: pvchv2
# osd_cset:
# - 0
# - 1
# - 8
# - 9
# - hostname: pvchv3
# osd_cset:
# - 0
# - 1
# - 8
# - 9
2019-07-04 11:19:24 -04:00
# Configuration file networks
2021-09-26 14:49:22 -04:00
# > Taken from base.yml's configuration; DO NOT MODIFY THIS SECTION.
2019-07-04 11:19:24 -04:00
pvc_upstream_device : "{{ networks['upstream']['device'] }}"
pvc_upstream_mtu : "{{ networks['upstream']['mtu'] }}"
pvc_upstream_domain : "{{ networks['upstream']['domain'] }}"
2019-12-24 14:15:14 -05:00
pvc_upstream_netmask : "{{ networks['upstream']['netmask'] }}"
2019-07-04 11:19:24 -04:00
pvc_upstream_subnet : "{{ networks['upstream']['subnet'] }}"
pvc_upstream_floatingip : "{{ networks['upstream']['floating_ip'] }}"
pvc_upstream_gatewayip : "{{ networks['upstream']['gateway_ip'] }}"
pvc_cluster_device : "{{ networks['cluster']['device'] }}"
pvc_cluster_mtu : "{{ networks['cluster']['mtu'] }}"
pvc_cluster_domain : "{{ networks['cluster']['domain'] }}"
2019-12-24 14:15:14 -05:00
pvc_cluster_netmask : "{{ networks['cluster']['netmask'] }}"
2019-07-04 11:19:24 -04:00
pvc_cluster_subnet : "{{ networks['cluster']['subnet'] }}"
pvc_cluster_floatingip : "{{ networks['cluster']['floating_ip'] }}"
pvc_storage_device : "{{ networks['storage']['device'] }}"
pvc_storage_mtu : "{{ networks['storage']['mtu'] }}"
pvc_storage_domain : "{{ networks['storage']['domain'] }}"
2019-12-24 14:15:14 -05:00
pvc_storage_netmask : "{{ networks['storage']['netmask'] }}"
2019-07-04 11:19:24 -04:00
pvc_storage_subnet : "{{ networks['storage']['subnet'] }}"
pvc_storage_floatingip : "{{ networks['storage']['floating_ip'] }}"