Add templating of network interfaces

Closes #1
This commit is contained in:
Joshua Boniface 2023-09-01 15:42:21 -04:00
parent cfbe724458
commit c9393ba957
5 changed files with 137 additions and 35 deletions

View File

@ -1,17 +1,69 @@
--- ---
# Basic information # Cluster domain for node FQDNs
local_domain: upstream.local local_domain: upstream.local
# IPMI user and password
# > For the password, use pwgen to generate.
# > Set these in the IPMI configuration as a user with permissions to reboot the host.
username_ipmi_host: pvc username_ipmi_host: pvc
passwd_ipmi_host: "" # Use pwgen to generate (set in IPMI config) passwd_ipmi_host: ""
passwdhash_root: "" # Use pwgen to generate and openssl passwd -1 -salt xyz <pw> to hash
# Logrotate # Root user password (as /etc/shadow hash)
# > Use pwgen to generate and openssl passwd -1 -salt xyz <pw> to hash.
passwdhash_root: ""
# Log rotation configuration
logrotate_keepcount: 7 logrotate_keepcount: 7
logrotate_interval: daily logrotate_interval: daily
# Email
# Root email name (usually "root")
username_email_root: root username_email_root: root
# Administrative users
# Administrative shell users for the cluster
admin_users: admin_users:
- name: "myuser" - name: "myuser"
uid: 500 uid: 500
keys: keys:
- "ssh-ed25519 MyKey 2019-06" - "ssh-ed25519 MyKey 2019-06"
# Node network definitions (used by /etc/network/interfaces and PVC)
# > The "type" can be one of three NIC types: "nic" for raw NIC devices, "bond" for ifenslave bonds,
# or "vlan" for vLAN interfaces. The PVC role will write out an interfaces file matching these specs.
# > Three names are reserved for the PVC-specific interfaces: upstream, cluster, and storage; others
# may be used at will to describe the other devices.
# > All devices should be using the newer device name format (i.e. enp1s0f0 instead of eth0).
# > In this example configuration, the "upstream" device is an LACP bond of the first two onboard NICs,
# with the two other PVC networks being vLANs on top of this device.
# > Usually, the Upstream network provides Internet connectivity for nodes in the cluster, and all
# nodes are part of it regardless of function for this reason; an optional, advanced, configuration
# will have only coordinators in the upstream network, however this configuration is out of the scope
# of this role.
networks:
"upstream":
device: "bondU"
type: "bond"
bond_mode: "802.3ad"
bond_devices:
- "enp1s0f0"
- "enp1s0f1"
mtu: 1500
domain: "{{ local_domain }}"
subnet: "192.168.100.0/24"
floating_ip: "192.168.100.10/24"
gateway_ip: "192.168.100.1"
"cluster":
device: "vlan1001"
type: "vlan"
raw_device: "bondU"
mtu: 1500
domain: "pvc-cluster.local"
subnet: "10.0.0.0/24"
floating_ip: "10.0.0.254/24"
"storage":
device: "vlan1002"
type: "vlan"
raw_device: "bondU"
mtu: 1500
domain: "pvc-storage.local"
subnet: "10.0.1.0/24"
floating_ip: "10.0.1.254/24"

View File

@ -1,7 +1,16 @@
--- ---
# Logging
pvc_log_to_file: True
pvc_log_to_stdout: True
pvc_log_keepalives: True
pvc_log_keepalive_cluster_details: True
pvc_log_keepalive_storage_details: True
pvc_log_console_lines: 1000
# Ceph storage # Ceph storage
pvc_ceph_storage_secret_uuid: "" # Use uuidgen to generate pvc_ceph_storage_secret_uuid: "" # Use uuidgen to generate
# Database
# Database configurations
pvc_dns_database_name: "pvcdns" pvc_dns_database_name: "pvcdns"
pvc_dns_database_user: "pvcdns" pvc_dns_database_user: "pvcdns"
pvc_dns_database_password: "" # Use pwgen to generate pvc_dns_database_password: "" # Use pwgen to generate
@ -9,7 +18,17 @@ pvc_replication_database_user: "replicator"
pvc_replication_database_password: "" # Use pwgen to generate pvc_replication_database_password: "" # Use pwgen to generate
pvc_superuser_database_user: "postgres" pvc_superuser_database_user: "postgres"
pvc_superuser_database_password: "" # Use pwgen to generate pvc_superuser_database_password: "" # Use pwgen to generate
# Coordinators
# Network routing configuration
# > The ASN should be a private ASN number.
# > The list of routers are those which will learn routes to the PVC client networks via BGP;
# they should speak BGP and allow sessions from the PVC nodes.
pvc_asn: "65500"
pvc_routers:
- "192.168.100.1"
# Node list
# > Every node configured with this playbook must be specified in this list.
pvc_nodes: pvc_nodes:
- hostname: "pvchv1" - hostname: "pvchv1"
is_coordinator: yes is_coordinator: yes
@ -50,30 +69,22 @@ pvc_nodes:
ipmi_host: "pvchv3-lom.{{ local_domain }}" ipmi_host: "pvchv3-lom.{{ local_domain }}"
ipmi_user: "{{ username_ipmi_host }}" ipmi_user: "{{ username_ipmi_host }}"
ipmi_password: "{{ passwd_ipmi_host }}" ipmi_password: "{{ passwd_ipmi_host }}"
# Networks
pvc_asn: "65500" # Configuration file networks
pvc_routers: # > Taken from base.yml's configuration; do not modify this section.
- "192.168.100.1" pvc_upstream_device: "{{ networks['upstream']['device'] }}"
pvc_upstream_device: "enp1s0f0" # Set to your actual NIC device (or bond, vLAN, etc.) pvc_upstream_mtu: "{{ networks['upstream']['mtu'] }}"
pvc_upstream_mtu: "1500" pvc_upstream_domain: "{{ networks['upstream']['domain'] }}"
pvc_upstream_domain: "{{ local_domain }}" pvc_upstream_subnet: "{{ networks['upstream']['subnet'] }}"
pvc_upstream_subnet: "192.168.100.0/23" pvc_upstream_floatingip: "{{ networks['upstream']['floating_ip'] }}"
pvc_upstream_floatingip: "192.168.100.10/23" pvc_upstream_gatewayip: "{{ networks['upstream']['gateway_ip'] }}"
pvc_upstream_gatewayip: "192.168.100.1" pvc_cluster_device: "{{ networks['cluster']['device'] }}"
pvc_cluster_device: "vlan1001" # Set to your actual NIC device (usually a vLAN) pvc_cluster_mtu: "{{ networks['cluster']['mtu'] }}"
pvc_cluster_mtu: "1500" pvc_cluster_domain: "{{ networks['cluster']['domain'] }}"
pvc_cluster_domain: "pvc.local" pvc_cluster_subnet: "{{ networks['cluster']['subnet'] }}"
pvc_cluster_subnet: "10.0.0.0/24" pvc_cluster_floatingip: "{{ networks['cluster']['floating_ip'] }}"
pvc_cluster_floatingip: "10.0.0.254/24" pvc_storage_device: "{{ networks['storage']['device'] }}"
pvc_storage_device: "vlan1002" # Set to your actual NIC device (usually a vLAN) pvc_storage_mtu: "{{ networks['storage']['mtu'] }}"
pvc_storage_mtu: "1500" pvc_storage_domain: "{{ networks['storage']['domain'] }}"
pvc_storage_domain: "pvc.storage" pvc_storage_subnet: "{{ networks['storage']['subnet'] }}"
pvc_storage_subnet: "10.0.1.0/24" pvc_storage_floatingip: "{{ networks['storage']['floating_ip'] }}"
pvc_storage_floatingip: "10.0.1.254/24"
# Logging
pvc_log_to_file: True
pvc_log_to_stdout: True
pvc_log_keepalives: True
pvc_log_keepalive_cluster_details: True
pvc_log_keepalive_storage_details: True
pvc_log_console_lines: 1000

View File

@ -196,6 +196,22 @@
# System configuration # System configuration
# #
# networking
- name: install base interfaces file
template:
src: etc/network/interfaces.j2
dest: /etc/network/interfaces
tags: network
- name: install per-interface files
template:
src: etc/network/interfaces-perif.j2
dest: /etc/network/interfaces.d/{{ network.key }}
with_dict: "{{ networks }}"
loop_control:
loop_var: network
tags: network
# capabilities # capabilities
- name: set ping capabilities - name: set ping capabilities
capabilities: capabilities:

View File

@ -0,0 +1,13 @@
# PVC node interface-specific interface file - {{ network.key }}
# {{ ansible_managed }}
auto {{ network.value['device'] }}
iface {{ network.value['device'] }} inet manual
post-up ip link set $IFACE mtu {{ network.value['mtu'] }}
{% if network.value['type'] == 'bond' %}
bond_mode {{ network.value['bond_mode'] }}
slaves {% for device in network.value['bond_devices'] %}{{ device }} {% endfor %}
{% endif %}
{% if network.value['type'] == 'vlan' %}
vlan_raw_device {{ network.value['raw_device'] }}
{% endif %}

View File

@ -0,0 +1,10 @@
# PVC node interfaces file
# {{ ansible_managed }}
# Loopback interface
auto lo
iface lo inet loopback
iface lo inet6 loopback
# Include directory
source-directory /etc/network/interfaces.d