2023-09-01 15:42:20 -04:00
---
2023-09-01 15:42:24 -04:00
# Logging configuration (uncomment to override defaults)
2024-10-25 23:41:04 -04:00
# These default options are generally best for most clusters; override these if you want more granular
# control over the logging output of the PVC system.
#pvc_log_to_stdout: True # Log to stdout (i.e. journald)
#pvc_log_to_file: False # Log to files in /var/log/pvc
#pvc_log_to_zookeeper: True # Log to Zookeeper; required for 'node log' commands to function, but writes a lot of data to Zookeeper - disable if using very small system disks
#pvc_log_colours: True # Log colourful prompts for states instead of text
#pvc_log_dates: False # Log dates (useful with log_to_file, not useful with log_to_stdout as journald adds these)
#pvc_log_keepalives: True # Log each keepalive event every pvc_keepalive_interval seconds
#pvc_log_keepalive_cluster_details: True # Log cluster details (VMs, OSDs, load, etc.) duing keepalive events
#pvc_log_keepalive_plugin_details: True # Log health plugin details (messages) suring keepalive events
#pvc_log_console_lines: 1000 # The number of VM console log lines to store in Zookeeper for 'vm log' commands.
#pvc_log_node_lines: 2000 # The number of node log lines to store in Zookeeper for 'node log' commands.
2023-09-01 15:42:26 -04:00
2023-09-01 15:42:24 -04:00
# Timing and fencing configuration (uncomment to override defaults)
2024-10-25 23:41:04 -04:00
# These default options are generally best for most clusters; override these if you want more granular
# control over the timings of various areas of the cluster, for instance if your hardware is slow or error-prone.
# DO NOT lower most of these values; this will NOT provide "more reliability", but the contrary.
#pvc_vm_shutdown_timeout: 180 # Number of seconds before a 'shutdown' VM is forced off
#pvc_keepalive_interval: 5 # Number of seconds between keepalive ticks
#pvc_monitoring_interval: 15 # Number of seconds between monitoring plugin runs
#pvc_fence_intervals: 6 # Number of keepalive ticks before a node is considered dead
#pvc_suicide_intervals: 0 # Number of keepalive ticks before a node consideres itself dead and forcibly restarts itself (0 to disable, recommended)
#pvc_fence_successful_action: migrate # What to do with VMs when a fence is successful (migrate, None)
#pvc_fence_failed_action: None # What to do with VMs when a fence is failed (migrate, None) - migrate is DANGEROUS without pvc_suicide_intervals set to < pvc_fence_intervals
#pvc_migrate_target_selector: mem # The selector to use for migrating VMs if not explicitly set by the VM; one of mem, memfree, load, vcpus, vms
2023-09-01 15:42:21 -04:00
2023-09-01 15:42:24 -04:00
# Client API basic configuration
2024-10-25 23:41:04 -04:00
pvc_api_listen_address : "{{ pvc_upstream_floatingip }}" # This should usually be the upstream floating IP
pvc_api_listen_port : "7370" # This can be any port, including low ports, if desired, but be mindful of port conflicts
pvc_api_secret_key : "" # Use pwgen to generate
2023-09-01 15:42:24 -04:00
# Client API user tokens
# Create a token (random UUID or password) for each user you wish to have access to the PVC API.
# The first token will always be used for the "local" connection, and thus at least one token MUST be defined.
2024-10-25 23:41:04 -04:00
# WARNING: All tokens function at the same privilege level and provide FULL CONTROL over the cluster. Keep them secret!
2023-09-01 15:42:22 -04:00
pvc_api_enable_authentication : True
2023-09-01 15:42:21 -04:00
pvc_api_tokens :
2024-10-25 23:41:04 -04:00
- description : "myuser" # The description is purely cosmetic for current iterations of PVC
token : "a3945326-d36c-4024-83b3-2a8931d7785a" # The token should be random for security; use uuidgen or pwgen to generate
2023-09-01 15:42:24 -04:00
# PVC API SSL configuration
# Use these options to enable SSL for the API listener, providing security over WAN connections.
# There are two options for defining the SSL certificate and key to use:
2024-10-25 23:41:04 -04:00
# a) Set both pvc_api_ssl_cert_path and pvc_api_ssl_key_path to paths to an existing SSL combined (CA + intermediate + cert) certificate and key, respectively, on the system.
# b) Set both pvc_api_ssl_cert and pvc_api_ssl_key to the raw PEM-encoded contents of an SSL combined (CA + intermediate + cert) certificate and key, respectively, which will be installed under /etc/pvc.
2023-09-01 15:42:24 -04:00
# If the _path options are non-empty, the raw entries are ignored and will not be used.
2024-10-25 23:41:04 -04:00
pvc_api_enable_ssl : False # Enable SSL listening; this is highly recommended when using the API over the Internet!
pvc_api_ssl_cert_path : "" # Set a path here, or...
2023-09-01 15:42:21 -04:00
pvc_api_ssl_cert : >
2024-10-25 23:41:04 -04:00
# Enter a A RAW CERTIFICATE FILE content, installed to /etc/pvc/api-cert.pem
pvc_api_ssl_key_path : "" # Set a path here, or...
2023-09-01 15:42:21 -04:00
pvc_api_ssl_key : >
2024-10-25 23:41:04 -04:00
# Enter a A RAW KEY FILE content, installed to /etc/pvc/api-key.pem
2023-09-01 15:42:21 -04:00
2023-09-01 15:42:21 -04:00
# Ceph storage configuration
2023-09-01 15:42:20 -04:00
pvc_ceph_storage_secret_uuid : "" # Use uuidgen to generate
2023-09-01 15:42:21 -04:00
2023-09-01 15:42:21 -04:00
# Database configuration
2024-10-25 23:41:04 -04:00
pvc_dns_database_name : "pvcdns" # Should usually be "pvcdns" unless there is good reason to change it
pvc_dns_database_user : "pvcdns" # Should usually be "pvcdns" unless there is good reason to change it
2023-09-01 15:42:20 -04:00
pvc_dns_database_password : "" # Use pwgen to generate
2024-10-25 23:41:04 -04:00
pvc_api_database_name : "pvcapi" # Should usually be "pvcapi" unless there is good reason to change it
pvc_api_database_user : "pvcapi" # Should usually be "pvcapi" unless there is good reason to change it
2023-09-01 15:42:24 -04:00
pvc_api_database_password : "" # Use pwgen to generate
2024-10-25 23:41:04 -04:00
pvc_replication_database_user : "replicator" # Should be "replicator" for Patroni
2023-09-01 15:42:20 -04:00
pvc_replication_database_password : "" # Use pwgen to generate
2024-10-25 23:41:04 -04:00
pvc_superuser_database_user : "postgres" # Should be "postgres"
2023-09-01 15:42:20 -04:00
pvc_superuser_database_password : "" # Use pwgen to generate
2023-09-01 15:42:21 -04:00
# Network routing configuration
2024-10-25 23:41:04 -04:00
# The ASN should be a private ASN number, usually "65500"
# The list of routers are those which will learn routes to the PVC client networks via BGP;
# they should speak BGP and allow sessions from the PVC nodes.
# If you do not have any upstream BGP routers, e.g. if you wish to use static routing to and from managed networks, leave the list empty
2023-09-01 15:42:21 -04:00
pvc_asn : "65500"
pvc_routers :
2024-10-25 23:41:04 -04:00
- "10.100.0.254"
2023-09-01 15:42:21 -04:00
2023-09-01 15:42:27 -04:00
# PVC Node list
2024-10-25 23:41:04 -04:00
# Every node configured with this playbook must be specified in this list.
2023-09-01 15:42:20 -04:00
pvc_nodes :
2024-10-25 23:41:04 -04:00
- hostname : "hv1" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
is_coordinator : yes # Coordinators should be set to "yes", hypervisors to "no"
node_id : 1 # Should match the number portion of the hostname
router_id : "10.100.0.1"
upstream_ip : "10.100.0.1"
2023-09-01 15:42:20 -04:00
cluster_ip : "10.0.0.1"
storage_ip : "10.0.1.1"
2024-10-25 23:41:04 -04:00
ipmi_host : "{{ ipmi['hosts']['hv1']['hostname'] }}" # Note the node hostname as the key here
2023-09-01 15:42:25 -04:00
ipmi_user : "{{ ipmi['users']['pvc']['username'] }}"
ipmi_password : "{{ ipmi['users']['pvc']['password'] }}"
2024-10-25 23:41:04 -04:00
cpu_tuning : # Example of cpu_tuning overrides per-node, only relevant if enabled; see below
system_cpus : 2 # Number of CPU cores (+ their hyperthreads) to allocate to the system
osd_cpus : 2 # Number of CPU cores (+ their hyperthreads) to allocate to the storage OSDs
- hostname : "hv2" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
2023-09-01 15:42:20 -04:00
is_coordinator : yes
node_id : 2
2024-10-25 23:41:04 -04:00
router_id : "10.100.0.2"
upstream_ip : "10.100.0.2"
2023-09-01 15:42:20 -04:00
cluster_ip : "10.0.0.2"
storage_ip : "10.0.1.2"
2024-10-25 23:41:04 -04:00
ipmi_host : "{{ ipmi['hosts']['hv2']['hostname'] }}" # Note the node hostname as the key here
2023-09-01 15:42:25 -04:00
ipmi_user : "{{ ipmi['users']['pvc']['username'] }}"
ipmi_password : "{{ ipmi['users']['pvc']['password'] }}"
2024-10-25 23:41:04 -04:00
- hostname : "hv3" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
2023-09-01 15:42:20 -04:00
is_coordinator : yes
node_id : 3
2024-10-25 23:41:04 -04:00
router_id : "10.100.0.3"
upstream_ip : "10.100.0.3"
2023-09-01 15:42:20 -04:00
cluster_ip : "10.0.0.3"
storage_ip : "10.0.1.3"
2024-10-25 23:41:04 -04:00
ipmi_host : "{{ ipmi['hosts']['hv3']['hostname'] }}" # Note the node hostname as the key here
2023-09-01 15:42:25 -04:00
ipmi_user : "{{ ipmi['users']['pvc']['username'] }}"
ipmi_password : "{{ ipmi['users']['pvc']['password'] }}"
2023-09-01 15:42:21 -04:00
2023-09-01 15:42:22 -04:00
# Bridge device entry
2024-10-25 23:41:04 -04:00
# This device is used when creating bridged networks. Normal managed networks are created on top of the
# "cluster" interface defined below, however bridged networks must be created directly on an underlying
# non-vLAN network device. This can be the same underlying device as the upstream/cluster/storage networks
# (especially if the upstream network device is not a vLAN itself), or a different device separate from the
# other 3 main networks.
pvc_bridge_device : bond0 # Replace based on your network configuration
pvc_bridge_mtu : 1500 # Replace based on your network configuration; bridges will have this MTU by default unless otherwise specified
2023-09-01 15:42:22 -04:00
2023-09-01 15:42:26 -04:00
# SR-IOV device configuration
# SR-IOV enables the passing of hardware-virtualized network devices (VFs), created on top of SR-IOV-enabled
# physical NICs (PFs), into virtual machines. SR-IOV is a complex topic, and will not be discussed in detail
# here. Instead, the SR-IOV mode is disabled by default and a commented out example configuration is shown.
pvc_sriov_enable : False
#pvc_sriov_device:
# - phy: ens1f0
# mtu: 9000
# vfcount: 6
2023-09-01 15:42:27 -04:00
# Memory tuning
# > ADVANCED TUNING: For most users, this is unnecessary and PVC will run fine with the default memory
# allocations. Uncomment these options only low-memory situations (nodes with <32GB RAM).
#
# OSD memory limit - 939524096 (~900MB) is the lowest possible value; default is 4GB.
# > This option is *only* applied at cluster bootstrap and cannot be changed later
# here, only by editing the `files/ceph/<cluster>/ceph.conf` file directly.
#pvc_osd_memory_limit: 939524096
#
# Zookeeper heap memory limit, sets Xms and Xmx values to the Java process; default is 512M.
# > WARNING: Unless you have an extremely limited amount of RAM, changing this setting is NOT RECOMMENDED.
# Lowering the heap limit may cause poor performance or crashes in Zookeeper during some tasks.
#pvc_zookeeper_heap_limit: 128M # 1/4 of default
#
# Zookeeper stack memory limit, sets Xss value to the Java process; default is 1024M.
# > WARNING: Unless you have an extremely limited amount of RAM, changing this setting is NOT RECOMMENDED.
# Lowering the stack limit may cause poor performance or crashes in Zookeeper during some tasks.
#pvc_zookeeper_stack_limit: 256M # 1/4 of default
2023-09-01 15:42:29 -04:00
# CPU tuning
2023-09-01 15:42:29 -04:00
# > ADVANCED TUNING: These options are strongly recommended due to the performance gains possible, but
# most users would be able to use the default without too much issue. Read the following notes
# carefully to determine if this setting should be enabled in your cluster.
2024-06-29 00:46:11 -04:00
# > NOTE: CPU tuning playbooks require jmespath (e.g. python3-jmespath) installed on the controller node
2023-09-01 15:42:29 -04:00
# > Defines CPU tuning/affinity options for various subsystems within PVC. This is useful to
# help limit the impact that noisy elements may have on other elements, e.g. busy VMs on
# OSDs, or system processes on latency-sensitive VMs.
# > To enable tuning, set enabled to yes.
# > Within "nodes", two counts are specified:
# * system_cpus: The number of CPUs to assign to the "system" slice, i.e. all non-VM,
# non-OSD processes on the system. Should usually be at least 2, and be
# higher on the coordinators of larger clusters (i.e. >5 nodes).
# * osd_cpus: The number of CPUs to assign to the "osd" slice, i.e. all OSD processes.
# Should be at least 1 per OSD, and ideally 2 per OSD for best performance.
# A third count, for the VM CPUs, is autogenerated based on the total node CPU count and
# the above two values (using all remaining CPUs).
# > Tuning is done based on cores; for systems with SMT (>1 thread-per-core), all SMTs within
2023-09-01 15:42:29 -04:00
# a given core are also assigned to the same CPU set. So for example, if the "system" group is
# assigned 2 system_cpus, there are 16 cores, and there are 2 threads per core, the list will be:
2023-09-01 15:42:29 -04:00
# 0,1,16,17
# leveraging the assumption that Linux puts all cores before all threads.
# > This tuning section under "nodes" is global to the cluster; to override these values on
# a per-node basis, use the corresponding "cpu_tuning" section of a given "pvc_nodes" entry
# as shown below.
# > If disabled after being enabled, the tuning configurations on each node will be removed
# on the next run. A reboot of all nodes is required to fully disable the tuning.
2023-09-01 15:42:29 -04:00
# > IMPORTANT NOTE: Enabling CPU tuning requires a decent number of CPU cores on the system. For very
# low-spec systems (i.e. less than at least 12 cores per node), it is advisable to leave this tuning
# off, as otherwise very few cores will actually be allocated to VMs. With a larger number (>16 or so),
# this tuning is likely to greatly increase storage performance, though balance between the VM workload
# and total number of cores must be carefully considered.
2023-09-01 15:42:29 -04:00
cpu_tuning :
2023-09-01 15:42:29 -04:00
enabled : no # Disable or enable CPU tuning; recommended to enable for optimal storage performance
2023-09-01 15:42:29 -04:00
nodes :
system_cpus : 2 # Set based on your actual system configuration (min 2, increase on coordinators if many nodes)
2023-09-01 15:42:29 -04:00
osd_cpus : 2 # Set based on your actual number of OSDs (for optimal performance, 2 per OSD)
2023-09-01 15:42:29 -04:00
2023-10-27 02:07:49 -04:00
# PVC VM autobackups
# > PVC supports autobackups, which can perform automatic snapshot-level VM backups of selected
# virtual machines based on tags. The backups are fully managed on a consistent schedule, and
# include both full and incremental varieties.
# > To solve the shared storage issue and ensure backups are taken off-cluster, automaticmounting
# of remote filesystems is supported by autobackup.
pvc_autobackup :
# Enable or disable autobackup
# > If disabled, no timers or "/etc/pvc/autobackup.yaml" configuration will be installed, and any
# existing timers or configuration will be REMOVED on each run (even if manually created).
# > Since autobackup is an integrated PVC CLI feature, the command will always be available regardless
# of this setting, but without this option enabled, the lack of a "/etc/pvc/autobackup.yaml" will
# prevent its use.
enabled : no
# Set the backup root path and (optional) suffix
# > This directory will be used for autobackups, optionally suffixed with the suffix if it is present
# > If remote mounting is enabled, the remote filesystem will be mounted at the root path; if it is
# not enabled, there must be a valid large(!) filesystem mounted on all coordinator nodes at this
# path.
# > The suffix can be used to allow a single backup root path to back up multiple clusters without
# conflicts should those clusters share VM names. It is optional unless this matches your situation.
# > The path "/tmp/backups" is usually recommended for remote mounting
# > NOTE: If you specify it, the suffix must begin with a '/', but is relative to the root path!
2024-01-09 09:54:38 -05:00
root_path : "/tmp/backups"
root_suffix : "/mycluster"
2023-10-27 02:07:49 -04:00
# Set the VM tag(s) which will be selected for autobackup
# > Autobackup selects VMs based on their tags. If a VM has a tag present in this list, it will be
# selected for autobackup at runtime; if not it will be ignored.
# > Usually, the tag "autobackup" here is sufficient; the administrator should then add this tag
# to any VM(s) they want to use autobackups. However, any tag may be specified to keep the tag list
# cleaner and more focused, should the administrator choose to.
2024-01-09 09:54:38 -05:00
tags :
2023-10-27 02:07:49 -04:00
- autobackup
# Autobackup scheduling
schedule :
# Backups are performed at regular intervals via a systemd timer
# > Optionally, forced-full backups can also be specified, which ensures consistent rotation
# between VMs regardless of when they are added; if forced_full_time is empty or missing, this
# feature is disabled
# > This default schedule performs a (forced) full backup every Monday at midnight, then normal backups
# every other day at midnight (these may be full or incremental depending on the options below
# > These options use a systemd timer date string; see "man systemd.time" for details
normal_time : "Tue..Sun *-*-* 0:0:00"
forced_full_time : "Mon *-*-* 0:0:00"
# The interval between full backups determines which backups are full and which are incrementals
# > When a backup is run, if there are this many (inclusive) backups since the last full backup,
# then a new full backup is taken and rotation occurs; otherwise, an incremental backup is taken
# > For example, a value of 1 means every backup is a full backup; a value of 2 means every other
# bakcup is a full backup; a value of 7 means every 7th backup is a full backup (i.e. once per week
# with a daily backup time).
full_interval : 7
# The retention count specifies how many full backups should be kept
# > Retention cleanup is run after each full backup, and thus, that backup is counted in this number
# > For example, a value of 2 means that there will always be at least 2 full backups. When a new
# full backup is taken, the oldest (i.e. 3rd) full backup is removed.
# > When a full backup is removed, all incremental backups with that full backup as their parent are
# also removed.
# > Thus, this schedule combined with a full_interval of 7 ensures there is always 2 full weekly backups,
# plus at least 1 full week's worth of incremental backups.
full_retention : 2
2024-01-11 00:32:38 -05:00
# Set reporting options for autobackups
# NOTE: By default, pvc-ansible installs a local Postfix MTA and Postfix sendmail to send emails
# This may not be what you want! If you want an alternate sendmail MTA (e.g. msmtp) you must install it
# yourself in a custom role!
reporting :
# Enable or disable email reporting; if disabled ("no"), no reports are ever sent
enabled : no
# Email a report to these addresses; at least one MUST be specified if enabled
emails :
- myuser@domain.tld
- otheruser@domain.tld
# Email a report on the specified jobs
report_on :
# Send a report on a forced_full backup (usually, weekly)
forced_full : yes
# Send a report on a normal backup (usually, daily)
normal : yes
2023-10-27 02:07:49 -04:00
# Configure automatic mounting support
# > PVC autobackup features the ability to automatically and dynamically mount and unmount remote
# filesystems, or, indeed, perform any arbitrary pre- or post-run tasks, using a set of arbitrary
# commands
# > Automatic mountoing is optional if you choose to use a static mount on all PVC coordinators
# > While the examples here show absolute paths, that is not required; they will run with the $PATH of the
# executing environment (either the "pvc" command on a CLI or a cron/systemd timer)
# > A "{backup_root_path}" f-string/str.format type variable MAY be present in any cmds string to represent
# the above configured root backup path, and is which is interpolated at runtime
# > If multiple commands are given, they will be executed in the order given; if no commands are given,
# nothing is executed, but the keys MUST be present
auto_mount :
# Enable or disable automatic mounting
enabled : no
# These Debian packages will be automatically installed if automatic mounting is enabled
packages :
# This example installs nfs-common, required for NFS mounts
2024-01-09 09:54:38 -05:00
# - nfs-common
2023-10-27 02:07:49 -04:00
# These commands are executed at the start of the backup run and should mount a filesystem or otherwise
# prepare the system for the backups
mount_cmds :
# This example shows an NFS mount leveraging the backup_root_path variable
2024-01-09 09:54:38 -05:00
# - "/usr/sbin/mount.nfs -o nfsvers=3 10.0.0.10:/backups {backup_root_path}"
2023-11-08 12:33:21 -05:00
# This example shows an SSHFS mount leveraging the backup_root_path variable
2024-01-09 09:54:38 -05:00
# - "/usr/bin/sshfs user@hostname:/path {backup_root_path} -o default_permissions -o sshfs_sync -o IdentityFile=/path/to/id_rsa"
2023-10-27 02:07:49 -04:00
# These commands are executed at the end of the backup run and should unmount a filesystem
unmount_cmds :
# This example shows a generic umount leveraging the backup_root_path variable
2024-01-09 09:54:38 -05:00
# - "/usr/bin/umount {backup_root_path}"
2023-11-08 12:33:21 -05:00
# This example shows an fusermount3 unmount (e.g. for SSHFS) leveraging the backup_root_path variable
2024-01-09 09:54:38 -05:00
# - "/usr/bin/fusermount3 -u {backup_root_path}"
2023-10-27 02:07:49 -04:00
2024-11-15 01:51:04 -05:00
# PVC VM automirrors
# > PVC supports automirrors, which can perform automatic snapshot-level VM mirrors of selected
# virtual machines based on tags. The mirrors are fully managed on a consistent schedule, and
# include both full and incremental varieties.
# > To solve the shared storage issue and ensure mirrors are taken off-cluster, automaticmounting
# of remote filesystems is supported by automirror.
pvc_automirror :
# Enable or disable automirror
# > If disabled, no timers or "/etc/pvc/automirror.yaml" configuration will be installed, and any
# existing timers or configuration will be REMOVED on each run (even if manually created).
# > Since automirror is an integrated PVC CLI feature, the command will always be available regardless
# of this setting, but without this option enabled, the lack of a "/etc/pvc/automirror.yaml" will
# prevent its use.
enabled : no
# List of possible remote clusters to mirror to
destinations :
# The name of the cluster, used in tags (e.g. 'automirror:cluster2')
cluster2 :
# The destination address, either an IP or an FQDN the destination API is reachable at
address : pvc.cluster2.mydomain.tld
# The destination port (usually 7370)
port : 7370
# The API prefix (usually '/api/v1') without a trailing slash
prefix : "/api/v1"
# The API key of the destination
key : 00000000 -0000 -0000 -0000 -000000000000
# Whether or not to use SSL for the connection
ssl : yes
# Whether or not to verify SSL for the connection
verify_ssl : yes
# Storage pool for VMs on the destination
pool : vms
# The default destination to send to, for VMs tagged without an explicit cluster
# > This is required even if there is only one destination!
default_destination : cluster2
# Set the VM tag(s) which will be selected for automirror
# > Automirror selects VMs based on their tags. If a VM has a tag present in this list, it will be
# selected for automirror at runtime; if not it will be ignored.
# > Usually, the tag "automirror" here is sufficient; the administrator should then add this tag
# to any VM(s) they want to use automirrors. However, any tag may be specified to keep the tag list
# cleaner and more focused, should the administrator choose to.
# > A cluster can be explicitly set by suffixing `:clustername` from the cluster list to the tag.
# > If a VM has multiple `:clustername` tags, it will be mirrored to all of them sequentially
# using the same source snapshot.
tags :
- automirror
# Automirror scheduling
schedule :
# Mirrors are performed at regular intervals via a systemd timer
# > This default schedule performs a mirror every 4 hours starting at midnight
# > These options use a systemd timer date string; see "man systemd.time" for details
time : "*-*-* 00/4:00:00"
# The retention count specifies how many mirrors should be kept, based on the destination count
# for a given VM and cluster pairing to prevent failed mirrors from being cleaned up on the source
# > Retention cleanup is run after each full mirror, and thus, that mirror is counted in this number
# > For example, a value of 7 means that there will always be at least 7 mirrors on the remote side.
# When a new full mirror is taken, the oldest (i.e. 8rd) mirror is removed.
# > Thus, this schedule combined with this retention will ensure there's always 24 hours of mirrors.
retention : 7
# Set reporting options for automirrors
# NOTE: By default, pvc-ansible installs a local Postfix MTA and Postfix sendmail to send emails
# This may not be what you want! If you want an alternate sendmail MTA (e.g. msmtp) you must install it
# yourself in a custom role!
reporting :
# Enable or disable email reporting; if disabled ("no"), no reports are ever sent
enabled : no
# Email a report to these addresses; at least one MUST be specified if enabled
emails :
- myuser@domain.tld
- otheruser@domain.tld
# Email a report on the specified job results
# > These options are like this for clarity. Functionally, only "error" changes anything:
# * If yes & yes, all results send a report.
# * If yes & no, all results send a report.
# * If no & yes, only errors send a report.
# * If no & no, no reports are ever sent; functionally equivalent to setting enabled:no above.
report_on :
# Report on a successful job (all snapshots were sent successfully)
success : no
# Report on an error (at least one snapshot was not sent successfully)
error : yes
2023-09-01 15:42:21 -04:00
# Configuration file networks
2023-09-01 15:42:27 -04:00
# > Taken from base.yml's configuration; DO NOT MODIFY THIS SECTION.
2023-09-01 15:42:21 -04:00
pvc_upstream_device : "{{ networks['upstream']['device'] }}"
pvc_upstream_mtu : "{{ networks['upstream']['mtu'] }}"
pvc_upstream_domain : "{{ networks['upstream']['domain'] }}"
2023-09-01 15:42:22 -04:00
pvc_upstream_netmask : "{{ networks['upstream']['netmask'] }}"
2023-09-01 15:42:21 -04:00
pvc_upstream_subnet : "{{ networks['upstream']['subnet'] }}"
pvc_upstream_floatingip : "{{ networks['upstream']['floating_ip'] }}"
pvc_upstream_gatewayip : "{{ networks['upstream']['gateway_ip'] }}"
pvc_cluster_device : "{{ networks['cluster']['device'] }}"
pvc_cluster_mtu : "{{ networks['cluster']['mtu'] }}"
pvc_cluster_domain : "{{ networks['cluster']['domain'] }}"
2023-09-01 15:42:22 -04:00
pvc_cluster_netmask : "{{ networks['cluster']['netmask'] }}"
2023-09-01 15:42:21 -04:00
pvc_cluster_subnet : "{{ networks['cluster']['subnet'] }}"
pvc_cluster_floatingip : "{{ networks['cluster']['floating_ip'] }}"
pvc_storage_device : "{{ networks['storage']['device'] }}"
pvc_storage_mtu : "{{ networks['storage']['mtu'] }}"
pvc_storage_domain : "{{ networks['storage']['domain'] }}"
2023-09-01 15:42:22 -04:00
pvc_storage_netmask : "{{ networks['storage']['netmask'] }}"
2023-09-01 15:42:21 -04:00
pvc_storage_subnet : "{{ networks['storage']['subnet'] }}"
pvc_storage_floatingip : "{{ networks['storage']['floating_ip'] }}"