93 lines
3.3 KiB
YAML
93 lines
3.3 KiB
YAML
---
|
|
# Logging configuration (uncomment to override defaults)
|
|
pvc_log_to_file: False # Log to a file in /var/log/pvc
|
|
pvc_log_to_stdout: True # Log to stdout (i.e. journald)
|
|
pvc_log_colours: True # Log colourful prompts for states instead of text
|
|
pvc_log_dates: False # Log dates (useful with log_to_file, not useful with log_to_stdout as journald adds these)
|
|
pvc_log_keepalives: True # Log keepalive event every pvc_keepalive_interval seconds
|
|
pvc_log_keepalive_cluster_details: True # Log cluster details (VMs, load, etc.) duing keepalive events
|
|
pvc_log_keepalive_storage_details: True # Log storage details (OSDs, pools, health) during keepalive events
|
|
pvc_log_console_lines: 1000 # The number of VM console log lines to store in Zookeeper for 'vm log' commands.
|
|
|
|
# Timing and fencing configuration (uncomment to override defaults)
|
|
pvc_vm_shutdown_timeout: 180 # Number of seconds before a 'shutdown' VM is forced off
|
|
pvc_keepalive_interval: 5 # Number of seconds between keepalive ticks
|
|
pvc_fence_intervals: 6 # Number of keepalive ticks before a node is considered dead
|
|
pvc_suicide_intervals: 0 # Number of keepalive ticks before a node consideres itself dead (0 to disable)
|
|
pvc_fence_successful_action: migrate # What to do with VMs when a fence is successful (migrate, None)
|
|
pvc_fence_failed_action: None # What to do with VMs when a fence is failed (migrate, None) - migrate is DANGEROUS without pvc_suicide_intervals set to < pvc_fence_intervals
|
|
pvc_fence_migrate_target_selector: mem # The selector to use for migrating VMs after a fence
|
|
|
|
# Memory tuning
|
|
pvc_osd_memory_limit: 4294967296
|
|
pvc_zookeeper_heap_limit: 256M
|
|
pvc_zookeeper_stack_limit: 512M
|
|
|
|
# Ceph storage
|
|
ceph_storage_secret_key: ""
|
|
ceph_storage_secret_uuid: ""
|
|
|
|
# Database
|
|
pvc_dns_database_name: "pvcdns"
|
|
pvc_dns_database_user: "pvcdns"
|
|
pvc_dns_database_password: "PVCdnsPassw0rd"
|
|
pvc_api_database_name: "pvcapi"
|
|
pvc_api_database_user: "pvcapi"
|
|
pvc_api_database_password: "PVCprovPassw0rd"
|
|
|
|
# Coordinators
|
|
pvc_nodes:
|
|
- hostname: "pvc1"
|
|
is_coordinator: yes
|
|
node_id: 1
|
|
router_id: "10.0.0.1"
|
|
cluster_ip: "by-id"
|
|
storage_ip: "by-id"
|
|
upstream_ip: ""
|
|
ipmi_host: "pvc1-lom"
|
|
ipmi_user: ""
|
|
ipmi_password: ""
|
|
- hostname: "pvc2"
|
|
is_coordinator: yes
|
|
node_id: 2
|
|
router_id: "10.0.0.2"
|
|
cluster_ip: "by-id"
|
|
storage_ip: "by-id"
|
|
upstream_ip: ""
|
|
ipmi_host: "pvc2-lom"
|
|
ipmi_user: ""
|
|
ipmi_password: ""
|
|
- hostname: "pvc3"
|
|
is_coordinator: yes
|
|
node_id: 3
|
|
router_id: "10.0.0.3"
|
|
cluster_ip: "by-id"
|
|
storage_ip: "by-id"
|
|
upstream_ip: ""
|
|
ipmi_host: "pvc3-lom"
|
|
ipmi_user: ""
|
|
ipmi_password: ""
|
|
|
|
# Networks
|
|
pvc_asn: "65001"
|
|
pvc_routers:
|
|
- ""
|
|
pvc_cluster_device: "eth0"
|
|
pvc_cluster_domain: "pvc.local"
|
|
pvc_cluster_subnet: "10.0.0.0/24"
|
|
pvc_cluster_floatingip: "10.0.0.251/24"
|
|
pvc_storage_device: "eth1"
|
|
pvc_storage_domain: "pvc.storage"
|
|
pvc_storage_subnet: "10.0.1.0/24"
|
|
pvc_storage_floatingip: "10.0.1.251/24"
|
|
pvc_upstream_device: "eth2"
|
|
pvc_upstream_domain: ""
|
|
pvc_upstream_subnet: ""
|
|
pvc_upstream_floatingip: ""
|
|
pvc_upstream_gatewayip: ""
|
|
|
|
# Devices
|
|
pvc_bridge_device: bond0
|
|
pvc_sriov_enable: False
|
|
pvc_sriov_device:
|