Unify and standardize inventory_hostname
This was causing some confusing conflicts, so create a new fact called "this_node" which is inventory_hostname.split('.')[0], i.e. the short name, and use that everywhere instead of an FQDN or true inventory hostname.
This commit is contained in:
parent
5de3ab0c3a
commit
d24cb8a8ef
|
@ -54,21 +54,21 @@ ipmi:
|
||||||
username: "host"
|
username: "host"
|
||||||
password: ""
|
password: ""
|
||||||
hosts:
|
hosts:
|
||||||
"pvchv1": # Use the inventory hostname here
|
"pvchv1": # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
|
||||||
hostname: pvchv1-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be
|
hostname: pvchv1-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be used here and it must resolve to address.
|
||||||
# used here; PVC connects to this *hostname* for fencing.
|
# PVC connects to this *hostname* for fencing.
|
||||||
address: 192.168.100.101
|
address: 192.168.100.101
|
||||||
netmask: 255.255.255.0
|
netmask: 255.255.255.0
|
||||||
gateway: 192.168.100.1
|
gateway: 192.168.100.1
|
||||||
"pvchv2": # Use the inventory hostname here
|
"pvchv2": # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
|
||||||
hostname: pvchv2-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be
|
hostname: pvchv2-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be used here and it must resolve to address.
|
||||||
# used here; PVC connects to this *hostname* for fencing.
|
# PVC connects to this *hostname* for fencing.
|
||||||
address: 192.168.100.102
|
address: 192.168.100.102
|
||||||
netmask: 255.255.255.0
|
netmask: 255.255.255.0
|
||||||
gateway: 192.168.100.1
|
gateway: 192.168.100.1
|
||||||
"pvchv3": # Use the inventory hostname here
|
"pvchv3": # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
|
||||||
hostname: pvchv3-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be
|
hostname: pvchv3-lom # A valid short name (e.g. from /etc/hosts) or an FQDN must be used here and it must resolve to address.
|
||||||
# used here; PVC connects to this *hostname* for fencing.
|
# PVC connects to this *hostname* for fencing.
|
||||||
address: 192.168.100.103
|
address: 192.168.100.103
|
||||||
netmask: 255.255.255.0
|
netmask: 255.255.255.0
|
||||||
gateway: 192.168.100.1
|
gateway: 192.168.100.1
|
||||||
|
|
|
@ -77,34 +77,34 @@ pvc_routers:
|
||||||
# PVC Node list
|
# PVC Node list
|
||||||
# > Every node configured with this playbook must be specified in this list.
|
# > Every node configured with this playbook must be specified in this list.
|
||||||
pvc_nodes:
|
pvc_nodes:
|
||||||
- hostname: "pvchv1" # This name MUST match the Ansible inventory_hostname
|
- hostname: "pvchv1" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
|
||||||
is_coordinator: yes
|
is_coordinator: yes
|
||||||
node_id: 1
|
node_id: 1
|
||||||
router_id: "192.168.100.11"
|
router_id: "192.168.100.11"
|
||||||
upstream_ip: "192.168.100.11"
|
upstream_ip: "192.168.100.11"
|
||||||
cluster_ip: "10.0.0.1"
|
cluster_ip: "10.0.0.1"
|
||||||
storage_ip: "10.0.1.1"
|
storage_ip: "10.0.1.1"
|
||||||
ipmi_host: "{{ ipmi['hosts']['pvchv1']['hostname'] }}" # Note the node inventory hostname key in here
|
ipmi_host: "{{ ipmi['hosts']['pvchv1']['hostname'] }}" # Note the node hostname key in here
|
||||||
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
||||||
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
||||||
- hostname: "pvchv2"
|
- hostname: "pvchv2" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
|
||||||
is_coordinator: yes
|
is_coordinator: yes
|
||||||
node_id: 2
|
node_id: 2
|
||||||
router_id: "192.168.100.12"
|
router_id: "192.168.100.12"
|
||||||
upstream_ip: "192.168.100.12"
|
upstream_ip: "192.168.100.12"
|
||||||
cluster_ip: "10.0.0.2"
|
cluster_ip: "10.0.0.2"
|
||||||
storage_ip: "10.0.1.2"
|
storage_ip: "10.0.1.2"
|
||||||
ipmi_host: "{{ ipmi['hosts']['pvchv2']['hostname'] }}" # Note the node inventory hostname key in here
|
ipmi_host: "{{ ipmi['hosts']['pvchv2']['hostname'] }}" # Note the node hostname key in here
|
||||||
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
||||||
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
||||||
- hostname: "pvchv3"
|
- hostname: "pvchv3" # This name MUST match the Ansible inventory_hostname's first portion, i.e. "inventory_hostname.split('.')[0]"
|
||||||
is_coordinator: yes
|
is_coordinator: yes
|
||||||
node_id: 3
|
node_id: 3
|
||||||
router_id: "192.168.100.13"
|
router_id: "192.168.100.13"
|
||||||
upstream_ip: "192.168.100.13"
|
upstream_ip: "192.168.100.13"
|
||||||
cluster_ip: "10.0.0.3"
|
cluster_ip: "10.0.0.3"
|
||||||
storage_ip: "10.0.1.3"
|
storage_ip: "10.0.1.3"
|
||||||
ipmi_host: "{{ ipmi['hosts']['pvchv3']['hostname'] }}" # Note the node inventory hostname key in here
|
ipmi_host: "{{ ipmi['hosts']['pvchv3']['hostname'] }}" # Note the node hostname key in here
|
||||||
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
ipmi_user: "{{ ipmi['users']['pvc']['username'] }}"
|
||||||
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
ipmi_password: "{{ ipmi['users']['pvc']['password'] }}"
|
||||||
|
|
||||||
|
|
|
@ -15,9 +15,14 @@
|
||||||
when: newhost_check.changed
|
when: newhost_check.changed
|
||||||
tags: always
|
tags: always
|
||||||
|
|
||||||
|
# Set this_node fact
|
||||||
|
- set_fact:
|
||||||
|
this_node: "{{ inventory_hostname.split('.')[0] }}"
|
||||||
|
tags: always
|
||||||
|
|
||||||
# Set coordinator state fact
|
# Set coordinator state fact
|
||||||
- set_fact:
|
- set_fact:
|
||||||
is_coordinator: "{% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.is_coordinator }}{% endfor %}"
|
is_coordinator: "{% for node in pvc_nodes if node.hostname == this_node %}{{ node.is_coordinator }}{% endfor %}"
|
||||||
tags: always
|
tags: always
|
||||||
|
|
||||||
#
|
#
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
iface ipmi inet manual
|
iface ipmi inet manual
|
||||||
pre-up ipmitool lan set 1 ipsrc static
|
pre-up ipmitool lan set 1 ipsrc static
|
||||||
pre-up ipmitool lan set 1 ipaddr {{ ipmi['hosts'][inventory_hostname]['address'] }}
|
pre-up ipmitool lan set 1 ipaddr {{ ipmi['hosts'][inventory_hostname.split('.')[0]]['address'] }}
|
||||||
pre-up ipmitool lan set 1 netmask {{ ipmi['hosts'][inventory_hostname]['netmask'] }}
|
pre-up ipmitool lan set 1 netmask {{ ipmi['hosts'][inventory_hostname.split('.')[0]]['netmask'] }}
|
||||||
pre-up ipmitool lan set 1 defgw ipaddr {{ ipmi['hosts'][inventory_hostname]['gateway'] }}
|
pre-up ipmitool lan set 1 defgw ipaddr {{ ipmi['hosts'][inventory_hostname.split('.')[0]]['gateway'] }}
|
||||||
|
|
|
@ -38,13 +38,13 @@
|
||||||
- "{{ bridges }}"
|
- "{{ bridges }}"
|
||||||
|
|
||||||
- name: add IP addresses to upstream bridge
|
- name: add IP addresses to upstream bridge
|
||||||
command: ip address add {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %} dev brupstream
|
command: ip address add {% for node in pvc_nodes if node.hostname == this_node %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %} dev brupstream
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
|
|
||||||
- name: add IP addresses to cluster bridge
|
- name: add IP addresses to cluster bridge
|
||||||
command: ip address add {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %} dev brcluster
|
command: ip address add {% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %} dev brcluster
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
|
|
||||||
- name: add IP addresses to storage bridge (will error if storage == cluster)
|
- name: add IP addresses to storage bridge (will error if storage == cluster)
|
||||||
command: ip address add {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %} dev brstorage
|
command: ip address add {% for node in pvc_nodes if node.hostname == this_node %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %} dev brstorage
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
|
|
|
@ -1,8 +1,13 @@
|
||||||
---
|
---
|
||||||
|
|
||||||
# Set coordinator state
|
# Set this_node fact
|
||||||
- set_fact:
|
- set_fact:
|
||||||
is_coordinator: "{% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.is_coordinator }}{% endfor %}"
|
this_node: "{{ inventory_hostname.split('.')[0] }}"
|
||||||
|
tags: always
|
||||||
|
|
||||||
|
# Set coordinator state fact
|
||||||
|
- set_fact:
|
||||||
|
is_coordinator: "{% for node in pvc_nodes if node.hostname == this_node %}{{ node.is_coordinator }}{% endfor %}"
|
||||||
tags: always
|
tags: always
|
||||||
|
|
||||||
# First-run check
|
# First-run check
|
||||||
|
|
|
@ -16,7 +16,7 @@ line vty
|
||||||
! BGP EVPN mesh configuration
|
! BGP EVPN mesh configuration
|
||||||
!
|
!
|
||||||
router bgp {{ pvc_asn }}
|
router bgp {{ pvc_asn }}
|
||||||
bgp router-id {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.router_id }}{% endfor %}
|
bgp router-id {% for node in pvc_nodes if node.hostname == this_node %}{{ node.router_id }}{% endfor %}
|
||||||
|
|
||||||
no bgp default ipv4-unicast
|
no bgp default ipv4-unicast
|
||||||
! BGP sessions with route reflectors
|
! BGP sessions with route reflectors
|
||||||
|
|
|
@ -3,6 +3,6 @@
|
||||||
|
|
||||||
listen_tls = 0
|
listen_tls = 0
|
||||||
listen_tcp = 1
|
listen_tcp = 1
|
||||||
listen_addr = "{% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}{% endfor %}"
|
listen_addr = "{% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}{% endfor %}"
|
||||||
tcp_port = "16509"
|
tcp_port = "16509"
|
||||||
auth_tcp = "none"
|
auth_tcp = "none"
|
||||||
|
|
|
@ -3,9 +3,9 @@ namespace: /patroni/
|
||||||
name: {{ ansible_hostname }}
|
name: {{ ansible_hostname }}
|
||||||
|
|
||||||
restapi:
|
restapi:
|
||||||
listen: {% for node in pvc_nodes if node.hostname == inventory_hostname %}'{{ node.cluster_ip }}:8008'{% endfor %}
|
listen: {% for node in pvc_nodes if node.hostname == this_node %}'{{ node.cluster_ip }}:8008'{% endfor %}
|
||||||
|
|
||||||
connect_address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}'{{ node.cluster_ip }}:8008'{% endfor %}
|
connect_address: {% for node in pvc_nodes if node.hostname == this_node %}'{{ node.cluster_ip }}:8008'{% endfor %}
|
||||||
|
|
||||||
zookeeper:
|
zookeeper:
|
||||||
hosts: [ {% for node in pvc_nodes if node.is_coordinator %}"{{ node.cluster_ip }}:2181"{% if not loop.last %},{% endif %}{% endfor %} ]
|
hosts: [ {% for node in pvc_nodes if node.is_coordinator %}"{{ node.cluster_ip }}:2181"{% if not loop.last %},{% endif %}{% endfor %} ]
|
||||||
|
@ -44,7 +44,7 @@ bootstrap:
|
||||||
|
|
||||||
postgresql:
|
postgresql:
|
||||||
listen: '0.0.0.0:5432'
|
listen: '0.0.0.0:5432'
|
||||||
connect_address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}'{{ node.cluster_ip }}:5432'
|
connect_address: {% for node in pvc_nodes if node.hostname == this_node %}'{{ node.cluster_ip }}:5432'
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
log_destination: 'stderr'
|
log_destination: 'stderr'
|
||||||
log_min_messages: INFO
|
log_min_messages: INFO
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
# pvcnoded configuration
|
# pvcnoded configuration
|
||||||
# {{ ansible_managed }}
|
# {{ ansible_managed }}
|
||||||
pvc:
|
pvc:
|
||||||
node: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.hostname.split('.')[0] }}{% endfor %}
|
node: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.hostname.split('.')[0] }}{% endfor %}
|
||||||
|
|
||||||
debug: False
|
debug: False
|
||||||
functions:
|
functions:
|
||||||
|
@ -55,11 +55,11 @@ pvc:
|
||||||
successful_fence: {{ pvc_fence_successful_action }}
|
successful_fence: {{ pvc_fence_successful_action }}
|
||||||
failed_fence: {{ pvc_fence_failed_action }}
|
failed_fence: {{ pvc_fence_failed_action }}
|
||||||
ipmi:
|
ipmi:
|
||||||
host: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.ipmi_host }}{% endfor %}
|
host: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.ipmi_host }}{% endfor %}
|
||||||
|
|
||||||
user: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.ipmi_user }}{% endfor %}
|
user: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.ipmi_user }}{% endfor %}
|
||||||
|
|
||||||
pass: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.ipmi_password }}{% endfor %}
|
pass: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.ipmi_password }}{% endfor %}
|
||||||
|
|
||||||
migration:
|
migration:
|
||||||
target_selector: {{ pvc_fence_migrate_target_selector }}
|
target_selector: {{ pvc_fence_migrate_target_selector }}
|
||||||
|
@ -87,17 +87,17 @@ pvc:
|
||||||
upstream:
|
upstream:
|
||||||
device: {{ pvc_upstream_device }}
|
device: {{ pvc_upstream_device }}
|
||||||
mtu: {{ pvc_upstream_mtu }}
|
mtu: {{ pvc_upstream_mtu }}
|
||||||
address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %}
|
address: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.upstream_ip }}/{{ pvc_upstream_netmask }}{% endfor %}
|
||||||
|
|
||||||
cluster:
|
cluster:
|
||||||
device: {{ pvc_cluster_device }}
|
device: {{ pvc_cluster_device }}
|
||||||
mtu: {{ pvc_cluster_mtu }}
|
mtu: {{ pvc_cluster_mtu }}
|
||||||
address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %}
|
address: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}/{{ pvc_cluster_netmask }}{% endfor %}
|
||||||
|
|
||||||
storage:
|
storage:
|
||||||
device: {{ pvc_storage_device }}
|
device: {{ pvc_storage_device }}
|
||||||
mtu: {{ pvc_storage_mtu }}
|
mtu: {{ pvc_storage_mtu }}
|
||||||
address: {% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %}
|
address: {% for node in pvc_nodes if node.hostname == this_node %}{{ node.storage_ip }}/{{ pvc_storage_netmask }}{% endfor %}
|
||||||
|
|
||||||
storage:
|
storage:
|
||||||
ceph_config_file: "/etc/ceph/ceph.conf"
|
ceph_config_file: "/etc/ceph/ceph.conf"
|
||||||
|
|
|
@ -21,7 +21,7 @@ autopurge.purgeInterval=1
|
||||||
|
|
||||||
# Listen on port 2181 on the cluster IP
|
# Listen on port 2181 on the cluster IP
|
||||||
clientPort=2181
|
clientPort=2181
|
||||||
clientPortAddress={% for node in pvc_nodes if node.hostname == inventory_hostname %}{{ node.cluster_ip }}{% endfor %}
|
clientPortAddress={% for node in pvc_nodes if node.hostname == this_node %}{{ node.cluster_ip }}{% endfor %}
|
||||||
|
|
||||||
# Node list - all coordinators
|
# Node list - all coordinators
|
||||||
{% for node in pvc_nodes if node.is_coordinator %}
|
{% for node in pvc_nodes if node.is_coordinator %}
|
||||||
|
|
Loading…
Reference in New Issue