pvc-ansible/roles/pvc/handlers/main.yml

60 lines
1.9 KiB
YAML

---
- name: restart zookeeper
service:
name: zookeeper
state: restarted
- name: restart prometheus-zookeeper
service:
name: prometheus-zookeeper-exporter
state: restarted
- name: restart libvirtd
service:
name: libvirtd
state: restarted
ignore_errors: true # Ansible sometimes inexplicably fails to restart
- name: restart frr
service:
name: frr
state: restarted
- name: restart patroni
service:
name: patroni
state: restarted
ignore_errors: yes
- name: restart prometheus-postgres
service:
name: prometheus-postgres-exporter
state: restarted
- name: restart keydb
service:
name: keydb-server
state: restarted
ignore_errors: yes
# Restart services one-at-a-time
# Ideally, this would be accomplished by a serial=1, but Ansible doesn't support that and
# likely never will. Instead, we just run the service restart manually, waiting X seconds
# before each iteration, where X is 15 seconds times the "host_id" minus 1. Thus, the
# following host configuration leads to the specified sleep times:
# * pvchv1: 0s
# * pvchv2: 15s
# * pvchv3: 30s
# * etc.
# This results in a practical sense in a "serial=1" sequence of restarts allowing the service
# to float its primary around after changing a configuration.
# Note that this only applies to coordinators; hypervisor-only nodes restart immediately in
# parallel, and the first two of three handlers do not apply to hypervisor-only nodes.
- name: restart ceph-mon
shell: "{% if is_coordinator %}sleep {{ 30 * (hostvars[inventory_hostname].ansible_local.host_id|int - 1) }} && systemctl restart ceph-mon@{{ this_node }}.service{% endif %}"
ignore_errors: true
- name: restart ceph-mgr
shell: "{% if is_coordinator %}sleep {{ 30 * (hostvars[inventory_hostname].ansible_local.host_id|int - 1) }} && systemctl restart ceph-mgr@{{ this_node }}.service{% endif %}"
ignore_errors: true