pvc-ansible/roles/pvc/defaults/main.yml

184 lines
7.8 KiB
YAML

---
# Logging configuration (uncomment to override defaults)
pvc_log_to_file: False # Log to a file in /var/log/pvc
pvc_log_to_stdout: True # Log to stdout (i.e. journald)
pvc_log_to_zookeeper: True # Log to Zookeeper (required for 'node log' commands)
pvc_log_colours: True # Log colourful prompts for states instead of text
pvc_log_dates: False # Log dates (useful with log_to_file, not useful with log_to_stdout as journald adds these)
pvc_log_keepalives: True # Log keepalive event every pvc_keepalive_interval seconds
pvc_log_keepalive_cluster_details: True # Log cluster details (VMs, OSDs, load, etc.) duing keepalive events
pvc_log_keepalive_plugin_details: True # Log health plugin details (messages) suring keepalive events
pvc_log_console_lines: 1000 # The number of VM console log lines to store in Zookeeper for 'vm log' commands.
pvc_log_node_lines: 2000 # The number of node log lines to store in Zookeeper for 'node log' commands.
# Timing and fencing configuration (uncomment to override defaults)
pvc_vm_shutdown_timeout: 180 # Number of seconds before a 'shutdown' VM is forced off
pvc_keepalive_interval: 5 # Number of seconds between keepalive ticks
pvc_monitoring_interval: 15 # Number of seconds between monitoring plugin runs
pvc_fence_intervals: 6 # Number of keepalive ticks before a node is considered dead
pvc_suicide_intervals: 0 # Number of keepalive ticks before a node consideres itself dead (0 to disable)
pvc_fence_successful_action: migrate # What to do with VMs when a fence is successful (migrate, None)
pvc_fence_failed_action: None # What to do with VMs when a fence is failed (migrate, None) - migrate is DANGEROUS without pvc_suicide_intervals set to < pvc_fence_intervals
pvc_migrate_target_selector: mem # The selector to use for migrating VMs
# Enable Prometheus exporters and corresponding API endpoints
enable_prometheus_exporters: yes
# Memory tuning
pvc_osd_memory_limit: 4294967296
pvc_zookeeper_heap_limit: 512M
pvc_zookeeper_stack_limit: 1024M
# Ceph storage
ceph_storage_secret_key: ""
ceph_storage_secret_uuid: ""
# Database
pvc_dns_database_name: "pvcdns"
pvc_dns_database_user: "pvcdns"
pvc_dns_database_password: "PVCdnsPassw0rd"
pvc_api_database_name: "pvcapi"
pvc_api_database_user: "pvcapi"
pvc_api_database_password: "PVCprovPassw0rd"
# CPU tuning
# This is left commented so the section of the tasks never runs; enable or disable it in your per-cluster configs
# CPU tune options defined per-node are placed in the pvc_nodes section below under cpu_tuning; global versions
# may be placed here instead.
# Whether a node has hyperthreading is determined automatically; if so, for each (real) CPU core assigned to a
# subscection, its corresponding hyperthread is also assigned to that section.
# machine_cpus is later used in the templates; the value of this field is autogenerated as:
# total_system_cpus - osd_cpus - system_cpus
#cpu_tuning:
# enabled: yes # Enable or disable CPU tuning for processes
# nodes: # Nodes configuration; default options, can be overridden by per-node tuning below
# system_cpus: 2 # The number of CPUs to assign to the "system" slice
# # This slice includes all non-VM, non-OSD processes including databases, node daemons, system processes, non-OSD Ceph processes, etc.
# # At least 2 cores should be assigned to this slice.
# osd_cpus: 2 # The number of CPUs to assign to the "osd" slice
# # This slice includes all OSD processes
# # At least 1 core per OSD should be assigned to this slice.
# VM Autobackup
# This is uncommented but disabled so this is not installed by default; enable it in your per-cluster configs
# Autobackup allows the exporting of VM backups automatically to an external disk target.
# These values are default; ensure you modify them in your own group_vars to match your system!
pvc_autobackup:
enabled: no
root_path: /tmp/backups
root_suffix: /mycluster
tags:
- autobackup
schedule:
normal_time: "Tue..Sun *-*-* 0:0:00"
forced_full_time: "Mon *-*-* 0:0:00"
full_interval: 7
full_retention: 2
reporting:
enabled: no
report_on:
forced_full: no
normal: no
auto_mount:
enabled: no
packages:
# Example: Install nfs-common for NFS mounts
# - nfs-common
mount_cmds:
# Example: Mount an NFSv3 path from 10.0.0.10:/backups to {backup_root_path}
# - "/usr/sbin/mount.nfs -o nfsvers=3 10.0.0.10:/backups {backup_root_path}"
unmount_cmds:
# Example: Unmount the {backup_root_path}
# - "/usr/bin/umount {backup_root_path}"
# PVC VM automirrors
# This is uncommented but disabled so this is not installed by default; enable it in your per-cluster configs
# Automirror allows the sending of VM snapshots automatically to an external cluster.
# These values are default; ensure you modify them in your own group_vars to match your system!
pvc_automirror:
enabled: no
destinations:
cluster2:
address: pvc.cluster2.mydomain.tld
port: 7370
prefix: "/api/v1"
key: 00000000-0000-0000-0000-000000000000
ssl: yes
verify_ssl: yes
pool: vms
default_destination: cluster2
tags:
- automirror
schedule:
time: "*-*-* 00/4:00:00"
retention: 7
reporting:
enabled: no
emails:
- myuser@domain.tld
- otheruser@domain.tld
report_on:
success: no
error: yes
# Coordinators & Nodes list
pvc_nodes:
- hostname: "pvc1" # The full ansible inventory hostname of the node
is_coordinator: yes # If the node is a coordinator or not
node_id: 1 # The sequential node ID, usually matches the numerical part of the hostname
router_id: "10.0.0.1" # The router ID of the node; can be the IP Address of the Cluster network, or the node_id, or some other unique number
cluster_ip: "by-id" # The Cluster network IP of the host; by-id uses the network then adds the node as node_id within that network (e.g. pvc1 becomes x.y.z.1)
storage_ip: "by-id" # The Storage network IP of the host; by-id as above
upstream_ip: "" # The Upstream network IP of the host; by-id as above
ipmi_host: "pvc1-lom" # The IPMI hostname of the node
ipmi_user: "" # The IPMI username to use
ipmi_password: "" # The IPMI password to use
cpu_tuning: # Per-node CPU tuning options; if set, overrides the global options above; useful if a node has different CPU characteristics
system_cpus: 1
osd_cpus: 2
- hostname: "pvc2"
is_coordinator: yes
node_id: 2
router_id: "10.0.0.2"
cluster_ip: "by-id"
storage_ip: "by-id"
upstream_ip: ""
ipmi:
host: "pvc2-lom"
user: ""
password: ""
- hostname: "pvc3"
is_coordinator: yes
node_id: 3
router_id: "10.0.0.3"
cluster_ip: "by-id"
storage_ip: "by-id"
upstream_ip: ""
ipmi:
host: "pvc3-lom"
user: ""
password: ""
# Networks
pvc_asn: "65001"
pvc_routers:
- ""
pvc_cluster_device: "eth0"
pvc_cluster_domain: "pvc.local"
pvc_cluster_subnet: "10.0.0.0/24"
pvc_cluster_floatingip: "10.0.0.251"
pvc_storage_device: "eth1"
pvc_storage_domain: "pvc.storage"
pvc_storage_subnet: "10.0.1.0/24"
pvc_storage_floatingip: "10.0.1.251"
pvc_upstream_device: "eth2"
pvc_upstream_domain: ""
pvc_upstream_subnet: ""
pvc_upstream_floatingip: ""
pvc_upstream_gatewayip: ""
# Devices
pvc_bridge_device: bond0
pvc_sriov_enable: False
pvc_sriov_device: