--- # Logging configuration (uncomment to override defaults) # These default options are generally best for most clusters; override these if you want more granular # control over the logging output of the PVC system. #pvc_log_to_file: False # Log to a file in /var/log/pvc #pvc_log_to_stdout: True # Log to stdout (i.e. journald) #pvc_log_to_zookeeper: True # Log to Zookeeper (required for 'node log' commands) #pvc_log_colours: True # Log colourful prompts for states instead of text #pvc_log_dates: False # Log dates (useful with log_to_file, not useful with log_to_stdout as journald adds these) #pvc_log_keepalives: True # Log keepalive event every pvc_keepalive_interval seconds #pvc_log_keepalive_cluster_details: True # Log cluster details (VMs, load, etc.) duing keepalive events #pvc_log_keepalive_storage_details: True # Log storage details (OSDs, pools, health) during keepalive events #pvc_log_console_lines: 1000 # The number of VM console log lines to store in Zookeeper for 'vm log' commands. #pvc_log_node_lines: 2000 # The number of node log lines to store in Zookeeper for 'node log' commands. # Timing and fencing configuration (uncomment to override defaults) # These default options are generally best for most clusters; override these if you want more granular # control over the timings of various areas of the cluster, for instance if your hardware is slow or error-prone. #pvc_vm_shutdown_timeout: 180 # Number of seconds before a 'shutdown' VM is forced off #pvc_keepalive_interval: 5 # Number of seconds between keepalive ticks #pvc_fence_intervals: 6 # Number of keepalive ticks before a node is considered dead #pvc_suicide_intervals: 0 # Number of keepalive ticks before a node consideres itself dead (0 to disable) #pvc_fence_successful_action: migrate # What to do with VMs when a fence is successful (migrate, None) #pvc_fence_failed_action: None # What to do with VMs when a fence is failed (migrate, None) - migrate is DANGEROUS without pvc_suicide_intervals set to < pvc_fence_intervals #pvc_fence_migrate_target_selector: mem # The selector to use for migrating VMs after a fence # Client API basic configuration pvc_api_listen_address: "{{ pvc_upstream_floatingip }}" pvc_api_listen_port: "7370" pvc_api_secret_key: "" # Use pwgen to generate # Client API user tokens # Create a token (random UUID or password) for each user you wish to have access to the PVC API. # The first token will always be used for the "local" connection, and thus at least one token MUST be defined. pvc_api_enable_authentication: True pvc_api_tokens: # - description: "myuser" # token: "a3945326-d36c-4024-83b3-2a8931d7785a" # PVC API SSL configuration # Use these options to enable SSL for the API listener, providing security over WAN connections. # There are two options for defining the SSL certificate and key to use: # a) Set both pvc_api_ssl_cert_path and pvc_api_ssl_key_path to paths to an existing SSL combined (CA + cert) certificate and key, respectively, on the system. # b) Set both pvc_api_ssl_cert and pvc_api_ssl_key to the raw PEM-encoded contents of an SSL combined (CA + cert) certificate and key, respectively, which will be installed under /etc/pvc. # If the _path options are non-empty, the raw entries are ignored and will not be used. pvc_api_enable_ssl: False pvc_api_ssl_cert_path: pvc_api_ssl_cert: > # A RAW CERTIFICATE FILE, installed to /etc/pvc/api-cert.pem pvc_api_ssl_key_path: pvc_api_ssl_key: > # A RAW KEY FILE, installed to /etc/pvc/api-key.pem # Ceph storage configuration pvc_ceph_storage_secret_uuid: "" # Use uuidgen to generate # Database configuration pvc_dns_database_name: "pvcdns" pvc_dns_database_user: "pvcdns" pvc_dns_database_password: "" # Use pwgen to generate pvc_api_database_name: "pvcapi" pvc_api_database_user: "pvcapi" pvc_api_database_password: "" # Use pwgen to generate pvc_replication_database_user: "replicator" pvc_replication_database_password: "" # Use pwgen to generate pvc_superuser_database_user: "postgres" pvc_superuser_database_password: "" # Use pwgen to generate # Network routing configuration # > The ASN should be a private ASN number. # > The list of routers are those which will learn routes to the PVC client networks via BGP; # they should speak BGP and allow sessions from the PVC nodes. pvc_asn: "65500" pvc_routers: - "192.168.100.1" # PVC Node list # > Every node configured with this playbook must be specified in this list. pvc_nodes: - hostname: "pvchv1" # This name MUST match the Ansible inventory_hostname is_coordinator: yes node_id: 1 router_id: "192.168.100.11" upstream_ip: "192.168.100.11" cluster_ip: "10.0.0.1" storage_ip: "10.0.1.1" ipmi_host: "{{ ipmi['hosts']['pvchv1']['hostname'] }}" # Note the node inventory hostname key in here ipmi_user: "{{ ipmi['users']['pvc']['username'] }}" ipmi_password: "{{ ipmi['users']['pvc']['password'] }}" - hostname: "pvchv2" is_coordinator: yes node_id: 2 router_id: "192.168.100.12" upstream_ip: "192.168.100.12" cluster_ip: "10.0.0.2" storage_ip: "10.0.1.2" ipmi_host: "{{ ipmi['hosts']['pvchv2']['hostname'] }}" # Note the node inventory hostname key in here ipmi_user: "{{ ipmi['users']['pvc']['username'] }}" ipmi_password: "{{ ipmi['users']['pvc']['password'] }}" - hostname: "pvchv3" is_coordinator: yes node_id: 3 router_id: "192.168.100.13" upstream_ip: "192.168.100.13" cluster_ip: "10.0.0.3" storage_ip: "10.0.1.3" ipmi_host: "{{ ipmi['hosts']['pvchv3']['hostname'] }}" # Note the node inventory hostname key in here ipmi_user: "{{ ipmi['users']['pvc']['username'] }}" ipmi_password: "{{ ipmi['users']['pvc']['password'] }}" # Bridge device entry # This device is passed to PVC and is used when creating bridged networks. Normal managed networks are # created on top of the "cluster" interface defined below, however bridged networks must be created # directly on an underlying non-vLAN network device. This can be the same underlying device as the # upstream/cluster/storage networks (especially if the upstream network device is not a vLAN itself), # or a different device separate from the other 3 main networks. pvc_bridge_device: bondU # Replace based on your network configuration pvc_bridge_mtu: 1500 # Replace based on your network configuration # SR-IOV device configuration # SR-IOV enables the passing of hardware-virtualized network devices (VFs), created on top of SR-IOV-enabled # physical NICs (PFs), into virtual machines. SR-IOV is a complex topic, and will not be discussed in detail # here. Instead, the SR-IOV mode is disabled by default and a commented out example configuration is shown. pvc_sriov_enable: False #pvc_sriov_device: # - phy: ens1f0 # mtu: 9000 # vfcount: 6 # Memory tuning # > ADVANCED TUNING: For most users, this is unnecessary and PVC will run fine with the default memory # allocations. Uncomment these options only low-memory situations (nodes with <32GB RAM). # # OSD memory limit - 939524096 (~900MB) is the lowest possible value; default is 4GB. # > This option is *only* applied at cluster bootstrap and cannot be changed later # here, only by editing the `files/ceph//ceph.conf` file directly. #pvc_osd_memory_limit: 939524096 # # Zookeeper heap memory limit, sets Xms and Xmx values to the Java process; default is 512M. # > WARNING: Unless you have an extremely limited amount of RAM, changing this setting is NOT RECOMMENDED. # Lowering the heap limit may cause poor performance or crashes in Zookeeper during some tasks. #pvc_zookeeper_heap_limit: 128M # 1/4 of default # # Zookeeper stack memory limit, sets Xss value to the Java process; default is 1024M. # > WARNING: Unless you have an extremely limited amount of RAM, changing this setting is NOT RECOMMENDED. # Lowering the stack limit may cause poor performance or crashes in Zookeeper during some tasks. #pvc_zookeeper_stack_limit: 256M # 1/4 of default # CPU pinning configuration via cset # > ADVANCED TUNING: For most users, this is unnecessary and PVC will run fine with the default scheduling. # Uncomment these options only for testing or if you are certain you meet the following conditions. # > These options will tune cpuset (installed by default) to limit Ceph OSDs to certain CPU cores, while # simultaneously limiting other system tasks and VMs to the remaining CPU cores. In effect it dedicates the # specified CPU cores to Ceph OSDs only to ensure those processes can have dedicated CPU time. # > Generally speaking, except in cases where extremely high random read throughput is required and in which # the node(s) have a very large number of physical cores, this setting will not improve performance, and # may in fact hurt performance. For more details please see the documentation. # > For optimal performance when using this setting, you should dedicate exactly 2 cores, and their # respective SMT threads if applicable, to each OSD. For instance, with 2 OSDs, 4 real cores (and their # corresponding SMT threads if applicable) should be specified. More cores has been seen to, in some cases # drop performance further. For more details please see the documentation. # > Use the 'virsh capabilities' command to confim the exact CPU IDs (and SMT "siblings") for these lists. # pvc_shield_osds_enable: False #pvc_shield_osds_cset: # # This example host has 2x 6-core SMT-enabled CPUs; we want to use cores 0 (+SMT 12) and 2 (+SMT 14), which are # # both on physical CPU 0, for 1x OSD. # - hostname: pvchv1 # osd_cset: # - 0 # - 2 # - 12 # - 14 # # These example hosts have 1x 8-core SMT-enabled CPUs; we want to use cores 0 (+SMT 8) and 1 (+SMT 9) for 1x OSD. # - hostname: pvchv2 # osd_cset: # - 0 # - 1 # - 8 # - 9 # - hostname: pvchv3 # osd_cset: # - 0 # - 1 # - 8 # - 9 # Configuration file networks # > Taken from base.yml's configuration; DO NOT MODIFY THIS SECTION. pvc_upstream_device: "{{ networks['upstream']['device'] }}" pvc_upstream_mtu: "{{ networks['upstream']['mtu'] }}" pvc_upstream_domain: "{{ networks['upstream']['domain'] }}" pvc_upstream_netmask: "{{ networks['upstream']['netmask'] }}" pvc_upstream_subnet: "{{ networks['upstream']['subnet'] }}" pvc_upstream_floatingip: "{{ networks['upstream']['floating_ip'] }}" pvc_upstream_gatewayip: "{{ networks['upstream']['gateway_ip'] }}" pvc_cluster_device: "{{ networks['cluster']['device'] }}" pvc_cluster_mtu: "{{ networks['cluster']['mtu'] }}" pvc_cluster_domain: "{{ networks['cluster']['domain'] }}" pvc_cluster_netmask: "{{ networks['cluster']['netmask'] }}" pvc_cluster_subnet: "{{ networks['cluster']['subnet'] }}" pvc_cluster_floatingip: "{{ networks['cluster']['floating_ip'] }}" pvc_storage_device: "{{ networks['storage']['device'] }}" pvc_storage_mtu: "{{ networks['storage']['mtu'] }}" pvc_storage_domain: "{{ networks['storage']['domain'] }}" pvc_storage_netmask: "{{ networks['storage']['netmask'] }}" pvc_storage_subnet: "{{ networks['storage']['subnet'] }}" pvc_storage_floatingip: "{{ networks['storage']['floating_ip'] }}"