From 243c910d6d2a9fcd0012a700fbf04f35bdd710d3 Mon Sep 17 00:00:00 2001 From: Joshua Boniface Date: Thu, 11 Nov 2021 17:14:34 -0500 Subject: [PATCH] Unify and standardize inventory_hostname This was causing some confusing conflicts, so create a new fact called "this_node" which is inventory_hostname.split('.')[0], i.e. the short name, and use that everywhere instead of an FQDN or true inventory hostname. --- group_vars/default/base.yml | 18 +++++++++--------- group_vars/default/pvc.yml | 12 ++++++------ roles/base/tasks/main.yml | 7 ++++++- .../templates/etc/network/interfaces.d/ipmi.j2 | 6 +++--- roles/pvc/tasks/common/add_cluster_ips.yml | 6 +++--- roles/pvc/tasks/main.yml | 9 +++++++-- roles/pvc/templates/frr/frr.conf.j2 | 2 +- roles/pvc/templates/libvirt/libvirtd.conf.j2 | 2 +- roles/pvc/templates/patroni/patroni.yml.j2 | 6 +++--- roles/pvc/templates/pvc/pvcnoded.yaml.j2 | 14 +++++++------- roles/pvc/templates/zookeeper/zoo.cfg.j2 | 2 +- 11 files changed, 47 insertions(+), 37 deletions(-) diff --git a/group_vars/default/base.yml b/group_vars/default/base.yml index 073fe69..ac69a0e 100644 --- a/group_vars/default/base.yml +++ b/group_vars/default/base.yml @@ -54,21 +54,21 @@ ipmi: username: "host" password: "" hosts: - "pvchv1": # Use the inventory hostname here - hostname: pvchv1-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be - # used here; PVC connects to this *hostname* for fencing. + "pvchv1": # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]" + hostname: pvchv1-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be used here and it must resolve to address. + # PVC connects to this *hostname* for fencing. address: 192.168.100.101 netmask: 255.255.255.0 gateway: 192.168.100.1 - "pvchv2": # Use the inventory hostname here - hostname: pvchv2-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be - # used here; PVC connects to this *hostname* for fencing. + "pvchv2": # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]" + hostname: pvchv2-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be used here and it must resolve to address. + # PVC connects to this *hostname* for fencing. address: 192.168.100.102 netmask: 255.255.255.0 gateway: 192.168.100.1 - "pvchv3": # Use the inventory hostname here - hostname: pvchv3-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be - # used here; PVC connects to this *hostname* for fencing. + "pvchv3": # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]" + hostname: pvchv3-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be used here and it must resolve to address. + # PVC connects to this *hostname* for fencing. address: 192.168.100.103 netmask: 255.255.255.0 gateway: 192.168.100.1 diff --git a/group_vars/default/pvc.yml b/group_vars/default/pvc.yml index 23d732f..a2ba452 100644 --- a/group_vars/default/pvc.yml +++ b/group_vars/default/pvc.yml @@ -77,34 +77,34 @@ pvc_routers: # PVC Node list # > Every node configured with this playbook must be specified in this list. pvc_nodes: - - hostname: "pvchv1" # This name MUST match the Ansible inventory_hostname + - hostname: "pvchv1" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]" is_coordinator: yes node_id: 1 router_id: "192.168.100.11" upstream_ip: "192.168.100.11" cluster_ip: "10.0.0.1" storage_ip: "10.0.1.1" - ipmi_host: "{{ ipmi['hosts']['pvchv1']['hostname'] }}" # Note the node inventory hostname key in here + ipmi_host: "{{ ipmi['hosts']['pvchv1']['hostname'] }}" # Note the node hostname key in here ipmi_user: "{{ ipmi['users']['pvc']['username'] }}" ipmi_password: "{{ ipmi['users']['pvc']['password'] }}" - - hostname: "pvchv2" + - hostname: "pvchv2" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]" is_coordinator: yes node_id: 2 router_id: "192.168.100.12" upstream_ip: "192.168.100.12" cluster_ip: "10.0.0.2" storage_ip: "10.0.1.2" - ipmi_host: "{{ ipmi['hosts']['pvchv2']['hostname'] }}" # Note the node inventory hostname key in here + ipmi_host: "{{ ipmi['hosts']['pvchv2']['hostname'] }}" # Note the node hostname key in here ipmi_user: "{{ ipmi['users']['pvc']['username'] }}" ipmi_password: "{{ ipmi['users']['pvc']['password'] }}" - - hostname: "pvchv3" + - hostname: "pvchv3" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]" is_coordinator: yes node_id: 3 router_id: "192.168.100.13" upstream_ip: "192.168.100.13" cluster_ip: "10.0.0.3" storage_ip: "10.0.1.3" - ipmi_host: "{{ ipmi['hosts']['pvchv3']['hostname'] }}" # Note the node inventory hostname key in here + ipmi_host: "{{ ipmi['hosts']['pvchv3']['hostname'] }}" # Note the node hostname key in here ipmi_user: "{{ ipmi['users']['pvc']['username'] }}" ipmi_password: "{{ ipmi['users']['pvc']['password'] }}" diff --git a/roles/base/tasks/main.yml b/roles/base/tasks/main.yml index 81f29ea..4424bc8 100644 --- a/roles/base/tasks/main.yml +++ b/roles/base/tasks/main.yml @@ -15,9 +15,14 @@ when: newhost_check.changed tags: always +# Set this_node fact +- set_fact: + this_node: "{{ inventory_hostname.split('.')[0] }}" + tags: always + # Set coordinator state fact - set_fact: - is_coordinator: "{% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.is_coordinator }}{% endfor %}" + is_coordinator: "{% for node in pvc_nodes if node.hostname == this_node %}{{ node.is_coordinator }}{% endfor %}" tags: always # diff --git a/roles/base/templates/etc/network/interfaces.d/ipmi.j2 b/roles/base/templates/etc/network/interfaces.d/ipmi.j2 index 572ef52..6cca726 100644 --- a/roles/base/templates/etc/network/interfaces.d/ipmi.j2 +++ b/roles/base/templates/etc/network/interfaces.d/ipmi.j2 @@ -1,5 +1,5 @@ iface ipmi inet manual pre-up ipmitool lan set 1 ipsrc static - pre-up ipmitool lan set 1 ipaddr {{ ipmi['hosts'][inventory_hostname]['address'] }} - pre-up ipmitool lan set 1 netmask {{ ipmi['hosts'][inventory_hostname]['netmask'] }} - pre-up ipmitool lan set 1 defgw ipaddr {{ ipmi['hosts'][inventory_hostname]['gateway'] }} + pre-up ipmitool lan set 1 ipaddr {{ ipmi['hosts'][inventory_hostname.split('.')[0]]['address'] }} + pre-up ipmitool lan set 1 netmask {{ ipmi['hosts'][inventory_hostname.split('.')[0]]['netmask'] }} + pre-up ipmitool lan set 1 defgw ipaddr {{ ipmi['hosts'][inventory_hostname.split('.')[0]]['gateway'] }} diff --git a/roles/pvc/tasks/common/add_cluster_ips.yml b/roles/pvc/tasks/common/add_cluster_ips.yml index b16bb54..4de35b0 100644 --- a/roles/pvc/tasks/common/add_cluster_ips.yml +++ b/roles/pvc/tasks/common/add_cluster_ips.yml @@ -38,13 +38,13 @@ - "{{ bridges }}" - name: add IP addresses to upstream bridge - command: ip address add {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %} dev brupstream + command: ip address add {% for node in pvc_nodes if node.hostname == this_node %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %} dev brupstream ignore_errors: yes - name: add IP addresses to cluster bridge - command: ip address add {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %} dev brcluster + command: ip address add {% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %} dev brcluster ignore_errors: yes - name: add IP addresses to storage bridge (will error if storage == cluster) - command: ip address add {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %} dev brstorage + command: ip address add {% for node in pvc_nodes if node.hostname == this_node %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %} dev brstorage ignore_errors: yes diff --git a/roles/pvc/tasks/main.yml b/roles/pvc/tasks/main.yml index 1de19dd..b6aeba0 100644 --- a/roles/pvc/tasks/main.yml +++ b/roles/pvc/tasks/main.yml @@ -1,8 +1,13 @@ --- -# Set coordinator state +# Set this_node fact - set_fact: - is_coordinator: "{% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.is_coordinator }}{% endfor %}" + this_node: "{{ inventory_hostname.split('.')[0] }}" + tags: always + +# Set coordinator state fact +- set_fact: + is_coordinator: "{% for node in pvc_nodes if node.hostname == this_node %}{{ node.is_coordinator }}{% endfor %}" tags: always # First-run check diff --git a/roles/pvc/templates/frr/frr.conf.j2 b/roles/pvc/templates/frr/frr.conf.j2 index e1a43ed..5c2c4f4 100644 --- a/roles/pvc/templates/frr/frr.conf.j2 +++ b/roles/pvc/templates/frr/frr.conf.j2 @@ -16,7 +16,7 @@ line vty ! BGP EVPN mesh configuration ! router bgp {{ pvc_asn }} - bgp router-id {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.router_id }}{% endfor %} + bgp router-id {% for node in pvc_nodes if node.hostname == this_node %}{{ node.router_id }}{% endfor %} no bgp default ipv4-unicast ! BGP sessions with route reflectors diff --git a/roles/pvc/templates/libvirt/libvirtd.conf.j2 b/roles/pvc/templates/libvirt/libvirtd.conf.j2 index 025d896..8f53864 100644 --- a/roles/pvc/templates/libvirt/libvirtd.conf.j2 +++ b/roles/pvc/templates/libvirt/libvirtd.conf.j2 @@ -3,6 +3,6 @@ listen_tls = 0 listen_tcp = 1 -listen_addr = "{% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}{% endfor %}" +listen_addr = "{% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}{% endfor %}" tcp_port = "16509" auth_tcp = "none" diff --git a/roles/pvc/templates/patroni/patroni.yml.j2 b/roles/pvc/templates/patroni/patroni.yml.j2 index 1a13f7d..a75cefa 100644 --- a/roles/pvc/templates/patroni/patroni.yml.j2 +++ b/roles/pvc/templates/patroni/patroni.yml.j2 @@ -3,9 +3,9 @@ namespace: /patroni/ name: {{ ansible_hostname }} restapi: - listen: {% for node in pvc_nodes if node.hostname == inventory_hostname %}'{{ node.cluster_ip }}:8008'{% endfor %} + listen: {% for node in pvc_nodes if node.hostname == this_node %}'{{ node.cluster_ip }}:8008'{% endfor %} - connect_address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}'{{ node.cluster_ip }}:8008'{% endfor %} + connect_address: {% for node in pvc_nodes if node.hostname == this_node %}'{{ node.cluster_ip }}:8008'{% endfor %} zookeeper: hosts: [ {% for node in pvc_nodes if node.is_coordinator %}"{{ node.cluster_ip }}:2181"{% if not loop.last %},{% endif %}{% endfor %} ] @@ -44,7 +44,7 @@ bootstrap: postgresql: listen: '0.0.0.0:5432' - connect_address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}'{{ node.cluster_ip }}:5432' + connect_address: {% for node in pvc_nodes if node.hostname == this_node %}'{{ node.cluster_ip }}:5432' {% endfor %} log_destination: 'stderr' log_min_messages: INFO diff --git a/roles/pvc/templates/pvc/pvcnoded.yaml.j2 b/roles/pvc/templates/pvc/pvcnoded.yaml.j2 index c136826..d2e6900 100644 --- a/roles/pvc/templates/pvc/pvcnoded.yaml.j2 +++ b/roles/pvc/templates/pvc/pvcnoded.yaml.j2 @@ -2,7 +2,7 @@ # pvcnoded configuration # {{ ansible_managed }} pvc: - node: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.hostname.split('.')[0] }}{% endfor %} + node: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.hostname.split('.')[0] }}{% endfor %} debug: False functions: @@ -55,11 +55,11 @@ pvc: successful_fence: {{ pvc_fence_successful_action }} failed_fence: {{ pvc_fence_failed_action }} ipmi: - host: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.ipmi_host }}{% endfor %} + host: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.ipmi_host }}{% endfor %} - user: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.ipmi_user }}{% endfor %} + user: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.ipmi_user }}{% endfor %} - pass: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.ipmi_password }}{% endfor %} + pass: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.ipmi_password }}{% endfor %} migration: target_selector: {{ pvc_fence_migrate_target_selector }} @@ -87,17 +87,17 @@ pvc: upstream: device: {{ pvc_upstream_device }} mtu: {{ pvc_upstream_mtu }} - address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %} + address: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %} cluster: device: {{ pvc_cluster_device }} mtu: {{ pvc_cluster_mtu }} - address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %} + address: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %} storage: device: {{ pvc_storage_device }} mtu: {{ pvc_storage_mtu }} - address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %} + address: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %} storage: ceph_config_file: "/etc/ceph/ceph.conf" diff --git a/roles/pvc/templates/zookeeper/zoo.cfg.j2 b/roles/pvc/templates/zookeeper/zoo.cfg.j2 index 57e17c8..c67024a 100644 --- a/roles/pvc/templates/zookeeper/zoo.cfg.j2 +++ b/roles/pvc/templates/zookeeper/zoo.cfg.j2 @@ -21,7 +21,7 @@ autopurge.purgeInterval=1 # Listen on port 2181 on the cluster IP clientPort=2181 -clientPortAddress={% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}{% endfor %} +clientPortAddress={% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}{% endfor %} # Node list - all coordinators {% for node in pvc_nodes if node.is_coordinator %}