Move to unified pvc.conf configuration file

This commit is contained in:
Joshua Boniface 2023-11-26 14:14:00 -05:00
parent 460a2dd09f
commit dab7396196
9 changed files with 407 additions and 360 deletions

View File

@ -1,80 +0,0 @@
---
# pvcapid configuration file example
#
# This configuration file specifies details for the PVC API daemon running on
# this machine. Default values are not supported; the values in this sample
# configuration are considered defaults and can be used as-is.
#
# Copy this example to /etc/pvc/pvcapid.conf and edit to your needs
pvc:
# debug: Enable/disable API debug mode
debug: True
# coordinators: The list of cluster coordinator hostnames
coordinators:
- pvchv1
- pvchv2
- pvchv3
# api: Configuration of the API listener
api:
# listen_address: IP address(es) to listen on; use 0.0.0.0 for all interfaces
listen_address: "127.0.0.1"
# listen_port: TCP port to listen on, usually 7370
listen_port: "7370"
# authentication: Authentication and security settings
authentication:
# enabled: Enable or disable authentication (True/False)
enabled: False
# secret_key: Per-cluster secret key for API cookies; generate with uuidgen or pwgen
secret_key: ""
# tokens: a list of authentication tokens; leave as an empty list to disable authentication
tokens:
# description: token description for management
- description: "testing"
# token: random token for authentication; generate with uuidgen or pwgen
token: ""
# ssl: SSL configuration
ssl:
# enabled: Enabled or disable SSL operation (True/False)
enabled: False
# cert_file: SSL certificate file
cert_file: ""
# key_file: SSL certificate key file
key_file: ""
# provisioner: Configuration of the Provisioner API listener
provisioner:
# database: Backend database configuration
database:
# host: PostgreSQL hostname, usually 'localhost'
host: localhost
# port: PostgreSQL port, invariably '5432'
port: 5432
# name: PostgreSQL database name, invariably 'pvcapi'
name: pvcapi
# user: PostgreSQL username, invariable 'pvcapi'
user: pvcapi
# pass: PostgreSQL user password, randomly generated
pass: pvcapi
# queue: Celery backend queue using the PVC Zookeeper cluster
queue:
# host: Redis hostname, usually 'localhost'
host: localhost
# port: Redis port, invariably '6279'
port: 6379
# path: Redis queue path, invariably '/0'
path: /0
# ceph_cluster: Information about the Ceph storage cluster
ceph_cluster:
# storage_hosts: The list of hosts that the Ceph monitors are valid on; if empty (the default),
# uses the list of coordinators
storage_hosts:
- pvchv1
- pvchv2
- pvchv3
# storage_domain: The storage domain name, concatenated with the coordinators list names
# to form monitor access strings
storage_domain: "pvc.storage"
# ceph_monitor_port: The port that the Ceph monitor on each coordinator listens on
ceph_monitor_port: 6789
# ceph_storage_secret_uuid: Libvirt secret UUID for Ceph storage access
ceph_storage_secret_uuid: ""

View File

@ -1,52 +0,0 @@
---
# Root level configuration key
autobackup:
# Backup root path on the node, used as the remote mountpoint
# Must be an absolute path beginning with '/'
# If remote_mount is enabled, the remote mount will be mounted on this directory
# If remote_mount is enabled, it is recommended to use a path under `/tmp` for this
# If remote_mount is disabled, a real filesystem must be mounted here (PVC system volumes are small!)
backup_root_path: "/tmp/backups"
# Suffix to the backup root path, used to allow multiple PVC systems to write to a single root path
# Must begin with '/'; leave empty to use the backup root path directly
# Note that most remote mount options can fake this if needed, but provided to ensure local compatability
backup_root_suffix: "/mycluster"
# VM tag(s) to back up
# Only VMs with at least one of the given tag(s) will be backed up; all others will be skipped
backup_tags:
- "backup"
- "mytag"
# Backup schedule: when and what format to take backups
backup_schedule:
full_interval: 7 # Number of total backups between full backups; others are incremental
# > If this number is 1, every backup will be a full backup and no incremental
# backups will be taken
# > If this number is 2, every second backup will be a full backup, etc.
full_retention: 2 # Keep this many full backups; the oldest will be deleted when a new one is
# taken, along with all child incremental backups of that backup
# > Should usually be at least 2 when using incrementals (full_interval > 1) to
# avoid there being too few backups after cleanup from a new full backup
# Automatic mount settings
# These settings permit running an arbitrary set of commands, ideally a "mount" command or similar, to
# ensure that a remote filesystem is mounted on the backup root path
# While the examples here show absolute paths, that is not required; they will run with the $PATH of the
# executing environment (either the "pvc" command on a CLI or a cron/systemd timer)
# A "{backup_root_path}" f-string/str.format type variable MAY be present in any cmds string to represent
# the above configured root backup path, which is interpolated at runtime
# If multiple commands are given, they will be executed in the order given; if no commands are given,
# nothing is executed, but the keys MUST be present
auto_mount:
enabled: no # Enable automatic mount/unmount support
# These commands are executed at the start of the backup run and should mount a filesystem
mount_cmds:
# This example shows an NFS mount leveraging the backup_root_path variable
- "/usr/sbin/mount.nfs -o nfsvers=3 10.0.0.10:/backups {backup_root_path}"
# These commands are executed at the end of the backup run and should unmount a filesystem
unmount_cmds:
# This example shows a generic umount leveraging the backup_root_path variable
- "/usr/bin/umount {backup_root_path}"

View File

@ -1 +0,0 @@
client-cli/autobackup.sample.yaml usr/share/pvc

View File

@ -1,7 +1,6 @@
api-daemon/pvcapid.py usr/share/pvc
api-daemon/pvcapid-manage*.py usr/share/pvc
api-daemon/pvc-api-db-upgrade usr/share/pvc
api-daemon/pvcapid.sample.yaml usr/share/pvc
api-daemon/pvcapid usr/share/pvc
api-daemon/pvcapid.service lib/systemd/system
api-daemon/pvcworkerd.service lib/systemd/system

View File

@ -15,9 +15,6 @@ if systemctl is-active --quiet pvcworkerd.service; then
systemctl start pvcworkerd.service
fi
if [ ! -f /etc/pvc/pvcapid.yaml ]; then
echo "NOTE: The PVC client API daemon (pvcapid.service) and the PVC Worker daemon (pvcworkerd.service) have not been started; create a config file at /etc/pvc/pvcapid.yaml, then run the database configuration (/usr/share/pvc/pvc-api-db-upgrade) and start them manually."
if [ ! -f /etc/pvc/pvc.conf ]; then
echo "NOTE: The PVC client API daemon (pvcapid.service) and the PVC Worker daemon (pvcworkerd.service) have not been started; create a config file at /etc/pvc/pvc.conf, then run the database configuration (/usr/share/pvc/pvc-api-db-upgrade) and start them manually."
fi
# Clean up any old sample configs
rm /etc/pvc/pvcapid.sample.yaml || true

View File

@ -1,5 +1,4 @@
node-daemon/pvcnoded.py usr/share/pvc
node-daemon/pvcnoded.sample.yaml usr/share/pvc
node-daemon/pvcnoded usr/share/pvc
node-daemon/pvcnoded.service lib/systemd/system
node-daemon/pvc.target lib/systemd/system

View File

@ -12,8 +12,5 @@ systemctl enable /lib/systemd/system/pvc.target
if systemctl is-active --quiet pvcnoded.service; then
echo "NOTE: The PVC node daemon (pvcnoded.service) has not been restarted; this is up to the administrator."
else
echo "NOTE: The PVC node daemon (pvcnoded.service) has not been started; create a config file at /etc/pvc/pvcnoded.yaml then start it."
echo "NOTE: The PVC node daemon (pvcnoded.service) has not been started; create a config file at /etc/pvc/pvc.conf then start it."
fi
# Clean up any old sample configs
rm /etc/pvc/pvcnoded.sample.yaml || true

View File

@ -1,216 +0,0 @@
---
# pvcnoded configuration file example
#
# This configuration file specifies details for this node in PVC. Multiple node
# blocks can be added but only the one matching the current system nodename will
# be used by the local daemon. Default values are not supported; the values in
# this sample configuration are considered defaults and, with adjustment of the
# nodename section and coordinators list, can be used as-is on a Debian system.
#
# Copy this example to /etc/pvc/pvcnoded.conf and edit to your needs
pvc:
# node: The (short) hostname of the node, set during provisioning
node: pvchv1
# debug: Enable or disable debug output
debug: False
# functions: The daemon functions to enable
functions:
# enable_hypervisor: Enable or disable hypervisor functionality
# This should never be False except in very advanced usecases
enable_hypervisor: True
# enable_networking: Enable or disable virtual networking and routing functionality
enable_networking: True
# enable_storage: Enable or disable Ceph storage management functionality
enable_storage: True
# enable_api: Enable or disable the API client, if installed, when node is Primary
enable_api: True
# cluster: Cluster-level configuration
cluster:
# coordinators: The list of cluster coordinator hostnames
coordinators:
- pvchv1
- pvchv2
- pvchv3
# networks: Cluster-level network configuration
# OPTIONAL if enable_networking: False
networks:
# upstream: Upstream routed network for in- and out-bound upstream networking
upstream:
# domain: Upstream domain name, may be None
domain: "mydomain.net"
# network: Upstream network block
network: "1.1.1.0/24"
# floating_ip: Upstream floating IP address for the primary coordinator
floating_ip: "1.1.1.10/24"
# gateway: Upstream static default gateway, if applicable
gateway: "1.1.1.1"
# cluster: Cluster internal network for node communication and client virtual networks
cluster:
# domain: Cluster internal domain name
domain: "pvc.local"
# network: Cluster internal network block
network: "10.255.0.0/24"
# floating_ip: Cluster internal floating IP address for the primary coordinator
floating_ip: "10.255.0.254/24"
# storage: Cluster internal network for storage traffic
storage:
# domain: Cluster storage domain name
domain: "pvc.storage"
# network: Cluster storage network block
network: "10.254.0.0/24"
# floating_ip: Cluster storage floating IP address for the primary coordinator
floating_ip: "10.254.0.254/24"
# coordinator: Coordinator-specific configuration
# OPTIONAL if enable_networking: False
coordinator:
# dns: DNS aggregator subsystem
dns:
# database: Patroni PostgreSQL database configuration
database:
# host: PostgreSQL hostname, invariably 'localhost'
host: localhost
# port: PostgreSQL port, invariably 'localhost'
port: 5432
# name: PostgreSQL database name, invariably 'pvcdns'
name: pvcdns
# user: PostgreSQL username, invariable 'pvcdns'
user: pvcdns
# pass: PostgreSQL user password, randomly generated
pass: pvcdns
# metadata: Metadata API subsystem
metadata:
# database: Patroni PostgreSQL database configuration
database:
# host: PostgreSQL hostname, invariably 'localhost'
host: localhost
# port: PostgreSQL port, invariably 'localhost'
port: 5432
# name: PostgreSQL database name, invariably 'pvcapi'
name: pvcapi
# user: PostgreSQL username, invariable 'pvcapi'
user: pvcapi
# pass: PostgreSQL user password, randomly generated
pass: pvcapi
# system: Local PVC instance configuration
system:
# intervals: Intervals for keepalives and fencing
intervals:
# vm_shutdown_timeout: Number of seconds for a VM to 'shutdown' before being forced off
vm_shutdown_timeout: 180
# keepalive_interval: Number of seconds between keepalive/status updates
keepalive_interval: 5
# monitoring_interval: Number of seconds between monitoring check updates
monitoring_interval: 60
# fence_intervals: Number of keepalive_intervals to declare a node dead and fence it
fence_intervals: 6
# suicide_intervals: Numer of keepalive_intervals before a node considers itself dead and self-fences, 0 to disable
suicide_intervals: 0
# fencing: Node fencing configuration
fencing:
# actions: Actions to take after a fence trigger
actions:
# successful_fence: Action to take after successfully fencing a node, options: migrate, None
successful_fence: migrate
# failed_fence: Action to take after failing to fence a node, options: migrate, None
failed_fence: None
# ipmi: Local system IPMI options
ipmi:
# host: Hostname/IP of the local system's IPMI interface, must be reachable
host: pvchv1-lom
# user: Local system IPMI username
user: admin
# pass: Local system IPMI password
pass: Passw0rd
# migration: Migration option configuration
migration:
# target_selector: Criteria to select the ideal migration target, options: mem, memprov, load, vcpus, vms
target_selector: mem
# configuration: Local system configurations
configuration:
# directories: PVC system directories
directories:
# plugin_directory: Directory containing node monitoring plugins
plugin_directory: "/usr/share/pvc/plugins"
# dynamic_directory: Temporary in-memory directory for active configurations
dynamic_directory: "/run/pvc"
# log_directory: Logging directory
log_directory: "/var/log/pvc"
# console_log_directory: Libvirt console logging directory
console_log_directory: "/var/log/libvirt"
# logging: PVC logging configuration
logging:
# file_logging: Enable or disable logging to files under log_directory
file_logging: True
# stdout_logging: Enable or disable logging to stdout (i.e. journald)
stdout_logging: True
# zookeeper_logging: Enable ot disable logging to Zookeeper (for `pvc node log` functionality)
zookeeper_logging: True
# log_colours: Enable or disable ANSI colours in log output
log_colours: True
# log_dates: Enable or disable date strings in log output
log_dates: True
# log_keepalives: Enable or disable keepalive logging
log_keepalives: True
# log_keepalive_cluster_details: Enable or disable node status logging during keepalive
log_keepalive_cluster_details: True
# log_keepalive_plugin_details: Enable or disable node health plugin logging during keepalive
log_keepalive_plugin_details: True
# console_log_lines: Number of console log lines to store in Zookeeper per VM
console_log_lines: 1000
# node_log_lines: Number of node log lines to store in Zookeeper per node
node_log_lines: 2000
# networking: PVC networking configuration
# OPTIONAL if enable_networking: False
networking:
# bridge_device: Underlying device to use for bridged vLAN networks; usually the device of <cluster>
bridge_device: ens4
# bridge_mtu: The MTU of the underlying device used for bridged vLAN networks, and thus the maximum
# MTU of the overlying bridge devices.
bridge_mtu: 1500
# sriov_enable: Enable or disable (default if absent) SR-IOV network support
sriov_enable: False
# sriov_device: Underlying device(s) to use for SR-IOV networks; can be bridge_device or other NIC(s)
sriov_device:
# The physical device name
- phy: ens1f1
# The preferred MTU of the physical device; OPTIONAL - defaults to the interface default if unset
mtu: 9000
# The number of VFs to enable on this device
# NOTE: This defines the maximum number of VMs which can be provisioned on this physical device; VMs
# are allocated to these VFs manually by the administrator and thus all nodes should have the
# same number
# NOTE: This value cannot be changed at runtime on Intel(R) NICs; the node will need to be restarted
# if this value changes
vfcount: 8
# upstream: Upstream physical interface device
upstream:
# device: Upstream interface device name
device: ens4
# mtu: Upstream interface MTU; use 9000 for jumbo frames (requires switch support)
mtu: 1500
# address: Upstream interface IP address, options: by-id, <static>/<mask>
address: by-id
# cluster: Cluster (VNIC) physical interface device
cluster:
# device: Cluster (VNIC) interface device name
device: ens4
# mtu: Cluster (VNIC) interface MTU; use 9000 for jumbo frames (requires switch support)
mtu: 1500
# address: Cluster (VNIC) interface IP address, options: by-id, <static>/<mask>
address: by-id
# storage: Storage (Ceph OSD) physical interface device
storage:
# device: Storage (Ceph OSD) interface device name
device: ens4
# mtu: Storage (Ceph OSD) interface MTU; use 9000 for jumbo frames (requires switch support)
mtu: 1500
# address: Storage (Ceph OSD) interface IP address, options: by-id, <static>/<mask>
address: by-id
# storage; PVC storage configuration
# OPTIONAL if enable_storage: False
storage:
# ceph_config_file: The config file containing the Ceph cluster configuration
ceph_config_file: "/etc/ceph/ceph.conf"
# ceph_admin_keyring: The file containing the Ceph client admin keyring
ceph_admin_keyring: "/etc/ceph/ceph.client.admin.keyring"

404
pvc.sample.conf Normal file
View File

@ -0,0 +1,404 @@
---
# PVC system configuration - example file
#
# This configuration file defines the details of a PVC cluster.
# It is used by several daemons on the system, including pvcnoded, pvcapid, pvcworkerd, and pvchealthd.
#
# This file will normally be written by the PVC Ansible framework; this example is provided for reference
# Cluster configuration
cluster:
# The name of the cluster
name: pvc1
# The full list of nodes in this cluster
all_nodes:
- pvchv1
- pvchv2
- pvchv3
# The list of coorrdinator nodes in this cluster (subset of nodes)
coordinator_nodes:
- pvchv1
- pvchv2
- pvchv3
# Hardcoded networks (upstream/cluster/storage)
networks:
# Upstream network, used for inbound and outbound connectivity, API management, etc.
upstream:
# Domain name
domain: "mydomain.net"
# Device
device: ens4
# MTU
mtu: 1500
# IPv4 configuration
ipv4:
# CIDR netmask
netmask: 24
# Network address
network_address: 10.0.0.0
# Floating address
floating_address: 10.0.0.250
# Upstream/default gateway address
gateway_address: 10.0.0.254
# Node IP selection mechanism (either by-id or static; static requires additional configuration)
node_ip_selection: by-id
# Cluster network, used for inter-node communication (VM- and Network-layer), unrouted
cluster:
# Domain name
domain: "pvc.local"
# Device
device: ens4
# MTU
mtu: 1500
# IPv4 configuration
ipv4:
# CIDR netmask
netmask: 24
# Network address
network_address: 10.0.1.0
# Floating address
floating_address: 10.0.1.250
# Node IP selection mechanism (either by-id or static; static requires additional configuration)
node_ip_selection: by-id
# Storage network, used for inter-node communication (Storage-layer), unrouted
storage:
# Domain name
domain: "storage.local"
# Device
device: ens4
# MTU
mtu: 1500
# IPv4 configuration
ipv4:
# CIDR netmask
netmask: 24
# Network address
network_address: 10.0.2.0
# Floating address
floating_address: 10.0.2.250
# Node IP selection mechanism (either by-id or static; static requires additional configuration)
node_ip_selection: by-id
# Database configuration
database:
# Zookeeper client configuration
zookeeper:
# Port number
port: 2181
# KeyDB/Redis client configuration
keydb:
# Port number
port: 6379
# Hostname; use `cluster` network floating IP address
hostname: 10.0.1.250
# Path, usually "/0"
path: "/0"
# PostgreSQL client configuration
postgres:
# Port number
port: 5432
# Hostname; use `cluster` network floating IP address
hostname: 10.0.1.250
# Credentials
credentials:
# API database
pvcapi:
# Username
username: pvcapi
# Password
password: pvcapiPassw0rd
# DNS database
pvcdns:
# Username
username: pvcdns
# Password
password: pvcdnsPassw0rd
# Timer information
timer:
# VM shutdown timeout (seconds)
vm_shutdown_timeout: 180
# Node keepalive interval (seconds)
keepalive_interval: 5
# Monitoring interval (seconds)
monitoring_interval: 60
# Fencing configuration
fencing:
# Disable fencing or not on IPMI failure at startup for remainder of daemon run
disable_on_ipmi_failure: no
# Fencing intervals
intervals:
# Fence intervals (number of keepalives)
fence_intervals: 6
# Suicide intervals (number of keepalives; 0 to disable)
suicide_intervals: 0
# Fencing actions
actions:
# Successful fence action ("migrate" or "none")
successful_fence: migrate
# Failed fence action ("migrate" or "none")
failed_fence: none
# VM migration configuration
migration:
# Target selection default value (mem, memprov, load, vcpus, vms)
target_selector: mem
# Paths configuration
path:
# Node name file
# Contains the (short) hostname of this particular node
node_name_file: "/etc/pvc/node"
# Node Static IP file
# Contains the last octet IP address of this node for use if node_ip_selection is "static" for any network
node_ip_file: "/etc/pvc/ipaddr"
# Plugin directory
plugin_directory: "/usr/share/pvc/plugins"
# Dynamic directory
dynamic_directory: "/run/pvc"
# System log directory
system_log_directory: "/var/log/pvc"
# VM Console log directory (set by Libvirt)
console_log_directory: "/var/log/libvirt"
# Ceph configuration directory (set by Ceph/Ansible)
ceph_directory: "/etc/ceph"
# Logging configuration
logging:
# Enable or disable debug logging (all services)
debug_logging: yes
# Enable or disable file logging
file_logging: no
# Enable or disable stdout logging (to journald)
stdout_logging: yes
# Enable or disable Zookeeper logging (for "pvc node log" functionality)
zookeeper_logging: yes
# Enable or disable ANSI colour sequences in logs
log_colours: yes
# Enable or disable dates in logs
log_dates: yes
# Enale or disable keepalive event logging
log_keepalives: yes
# Enable or disable cluster detail logging during keepalive events
log_cluster_details: yes
# Number of VM console log lines to store in Zookeeper (per VM)
console_log_lines: 1000
# Number of node log lines to store in Zookeeper (per node)
node_log_lines: 2000
# Guest networking configuration
guest_networking:
# Bridge device for "bridged"-type networks
bridge_device: ens4
# Bridge device MTU
bridge_mtu: 1500
# Enable or disable SR-IOV functionality
sriov_enable: no
# SR-IOV configuration (list of PFs)
sriov_device:
# SR-IOV device; if this device isn't found, it is ignored on a given node
- device: ens1f1
# SR-IOV device MTU
mtu: 9000
# Number of VFs on this device
vfcount: 9
# Ceph configuration
ceph:
# Monitor port, usually 6789
monitor_port: 6789
# Storage secret UUID, generated during Ansible cluster bootstrap
secret_uuid: ""
# API configuration
api:
# Secret key for API cookies (long and secure password or UUID)
secret_key: "1234567890abcdefghijklmnopqrstuvwxyz"
# API listening configuration
listen:
# Listen address, usually upstream floating IP
address: 10.0.0.250
# Listen port, usually 7370
port: 7370
# Authentication configuration
authentication:
# Enable or disable authentication
enabled: yes
# Authentication source (token, others in future)
source: token
# Token configuration
token:
# A friendly description
- description: "testing"
# The token (long and secure password or UUID)
token: "1234567890abcdefghijklmnopqrstuvwxyz"
# SSL configuration
ssl:
# Enable or disable SSL operation
enabled: no
# Certificate file path
certificate: ""
# Private key file path
private_key: ""
# Automatic backups
autobackup:
# Backup root path on the node, used as the remote mountpoint
# Must be an absolute path beginning with '/'
# If remote_mount is enabled, the remote mount will be mounted on this directory
# If remote_mount is enabled, it is recommended to use a path under `/tmp` for this
# If remote_mount is disabled, a real filesystem must be mounted here (PVC system volumes are small!)
backup_root_path: "/tmp/backups"
# Suffix to the backup root path, used to allow multiple PVC systems to write to a single root path
# Must begin with '/'; leave empty to use the backup root path directly
# Note that most remote mount options can fake this if needed, but provided to ensure local compatability
backup_root_suffix: "/mycluster"
# VM tag(s) to back up
# Only VMs with at least one of the given tag(s) will be backed up; all others will be skipped
backup_tags:
- "backup"
- "mytag"
# Backup schedule: when and what format to take backups
backup_schedule:
full_interval: 7 # Number of total backups between full backups; others are incremental
# > If this number is 1, every backup will be a full backup and no incremental
# backups will be taken
# > If this number is 2, every second backup will be a full backup, etc.
full_retention: 2 # Keep this many full backups; the oldest will be deleted when a new one is
# taken, along with all child incremental backups of that backup
# > Should usually be at least 2 when using incrementals (full_interval > 1) to
# avoid there being too few backups after cleanup from a new full backup
# Automatic mount settings
# These settings permit running an arbitrary set of commands, ideally a "mount" command or similar, to
# ensure that a remote filesystem is mounted on the backup root path
# While the examples here show absolute paths, that is not required; they will run with the $PATH of the
# executing environment (either the "pvc" command on a CLI or a cron/systemd timer)
# A "{backup_root_path}" f-string/str.format type variable MAY be present in any cmds string to represent
# the above configured root backup path, which is interpolated at runtime
# If multiple commands are given, they will be executed in the order given; if no commands are given,
# nothing is executed, but the keys MUST be present
auto_mount:
enabled: no # Enable automatic mount/unmount support
# These commands are executed at the start of the backup run and should mount a filesystem
mount_cmds:
# This example shows an NFS mount leveraging the backup_root_path variable
- "/usr/sbin/mount.nfs -o nfsvers=3 10.0.0.10:/backups {backup_root_path}"
# These commands are executed at the end of the backup run and should unmount a filesystem
unmount_cmds:
# This example shows a generic umount leveraging the backup_root_path variable
- "/usr/bin/umount {backup_root_path}"
# VIM modeline, requires "set modeline" in your VIMRC
# vim: expandtab shiftwidth=2 tabstop=2 filetype=yaml