Reorganize and rejigger

This commit is contained in:
Joshua Boniface 2023-09-01 15:42:19 -04:00
parent 5cbce17bd1
commit a922fe4df7
16 changed files with 378 additions and 367 deletions

View File

@ -1 +1 @@
../../../group_vars ../../../files

View File

@ -1,153 +0,0 @@
---
- name: stop the monitor daemon
service:
name: ceph-mon@{{ ansible_hostname }}
state: stopped
ignore_errors: yes
- name: reset any systemd failures
command: systemctl reset-failed
- block:
- name: generate cluster FSID
command: uuidgen
register: fsid_raw
delegate_to: localhost
- set_fact:
fsid: "{{ fsid_raw.stdout }}"
- name: initialize bootstrap ceph.conf
file:
dest: /etc/ceph/ceph.conf
state: touch
- name: set fsid in bootstrap ceph.conf
lineinfile:
dest: /etc/ceph/ceph.conf
line: "fsid = {{ fsid }}"
state: present
- name: set mon initial members in bootstrap ceph.conf
lineinfile:
dest: /etc/ceph/ceph.conf
line: "mon initial members = {% for host in pvc_nodes %}{{ host.hostname }}{% if not loop.last %},{% endif %}{% endfor %}"
state: present
- name: set mon hosts in bootstrap ceph.conf
lineinfile:
dest: /etc/ceph/ceph.conf
line: "mon host = {% for host in pvc_nodes %}{{ host.storage_ip }}{% if not loop.last %},{% endif %}{% endfor %}"
state: present
- name: create temporary directory
file:
dest: /tmp/ceph-bootstrap
state: directory
- name: create mon keyring
command: ceph-authtool --create-keyring /tmp/ceph-bootstrap/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
- name: create client admin keyring
command: ceph-authtool --create-keyring /tmp/ceph-bootstrap/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
- name: add client admin keyring to mon keyring
command: ceph-authtool /tmp/ceph-bootstrap/ceph.mon.keyring --import-keyring /tmp/ceph-bootstrap/ceph.client.admin.keyring
- name: create OSD bootstrap keyring
command: ceph-authtool --create-keyring /tmp/ceph-bootstrap/ceph.osd.bootstrap.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
- name: add OSD bootstrap keyring to mon keyring
command: ceph-authtool /tmp/ceph-bootstrap/ceph.mon.keyring --import-keyring /tmp/ceph-bootstrap/ceph.osd.bootstrap.keyring
- name: create monmap
command: monmaptool --create --fsid {{ fsid }} /tmp/ceph-bootstrap/monmap
- name: add monitors to monmap
command: monmaptool --add {{ item.hostname }} {{ item.storage_ip }} --fsid {{ fsid }} /tmp/ceph-bootstrap/monmap
with_items:
- "{{ pvc_nodes }}"
- name: copy initial ceph.conf to the boostrap directory
copy:
src: /etc/ceph/ceph.conf
dest: /tmp/ceph-bootstrap/ceph.conf
remote_src: yes
- name: add additional configuration lines to ceph.conf
lineinfile:
dest: /tmp/ceph-bootstrap/ceph.conf
line: "{{ item }}"
state: present
with_items:
- "public network = {{ pvc_cluster_subnet }}"
- "cluster network = {{ pvc_storage_subnet }}"
- "auth cluster required = cephx"
- "auth service required = cephx"
- "auth client required = cephx"
- "osd journal size = 2"
- "osd pool default size = 3"
- "osd pool default min size = 2"
- "osd pool default pg num = 512"
- "osd pool default pgp num = 512"
- "osd crush chooseleaf type = 1"
- name: collect bootstrapped Ceph files into the role
fetch:
src: /tmp/ceph-bootstrap/{{ item }}
dest: roles/pvc/files/ceph/{{ hostvars[inventory_hostname].group_names[0] }}/ceph/
flat: yes
with_items:
- ceph.conf
- ceph.mon.keyring
- ceph.client.admin.keyring
- ceph.osd.bootstrap.keyring
- monmap
- name: remove the temporary bootstrap directory
file:
dest: /tmp/ceph-bootstrap
state: absent
force: yes
run_once: true
- name: deploy out configurations to all nodes
copy:
src: ceph/{{ hostvars[inventory_hostname].group_names[0] }}/ceph/{{ item }}
dest: /etc/ceph/{{ item }}
owner: ceph
group: ceph
mode: 0640
with_items:
- ceph.conf
- ceph.mon.keyring
- ceph.client.admin.keyring
- ceph.osd.bootstrap.keyring
- monmap
- name: create monitor data directory
file:
dest: sudo mkdir /var/lib/ceph/mon/ceph-{{ ansible_hostname }}
state: directory
owner: ceph
group: ceph
mode: 0750
- name: populate monitor with map and keys
command: ceph-mon --mkfs -i {{ ansible_hostname }} --monmap /etc/ceph/monmap --keyring /etc/ceph/ceph.mon.keyring
become_user: ceph
- name: touch monitor done file
file:
dest: /var/lib/ceph/mon/ceph-{{ ansible_hostname }}/done
state: touch
- name: start the monitor daemon
service:
name: ceph-mon@{{ ansible_hostname }}
state: started
enabled: yes
- name: create Libvirt keyring
command: ceph auth get-or-create client.libvirt mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=pvc*'
run_once: yes

View File

@ -1,46 +0,0 @@
---
- name: initial deployment check
shell: "echo 'bootstrapped' > /etc/ceph-install"
register: newhost
args:
creates: "/etc/ceph-install"
- name: install packages
apt:
name:
- ceph-osd
- ceph-mds
- ceph-mon
- ceph-mgr
- radosgw
- libjemalloc2
state: latest
- name: install sysctl tweaks
template:
src: ceph/sysctl.conf.j2
dest: /etc/sysctl.d/pvc-ceph.conf
- name: activate sysctl tweaks
command: sysctl -p /etc/sysctl.d/pvc-ceph.conf
- name: install user limits overrides
template:
src: ceph/limits.conf.j2
dest: /etc/security/limits.d/99-pvc-ceph.conf
- name: install ceph default config
template:
src: ceph/default.conf.j2
dest: /etc/default/ceph
- name: create ceph configuration directory
file:
dest: /etc/ceph
state: directory
- include: add_cluster_ips.yml
when: newhost.changed
- include: bootstrap_ceph.yml
when: newhost.changed

View File

@ -0,0 +1,107 @@
---
- name: generate cluster FSID
command: uuidgen
register: fsid_raw
delegate_to: localhost
- set_fact:
fsid: "{{ fsid_raw.stdout }}"
- name: initialize bootstrap ceph.conf
file:
dest: /etc/ceph/ceph.conf
state: touch
- name: set global section in bootstrap ceph.conf
lineinfile:
dest: /etc/ceph/ceph.conf
line: "[global]"
state: present
- name: set fsid in bootstrap ceph.conf
lineinfile:
dest: /etc/ceph/ceph.conf
line: "fsid = {{ fsid }}"
state: present
- name: set mon initial members in bootstrap ceph.conf
lineinfile:
dest: /etc/ceph/ceph.conf
line: "mon initial members = {% for host in pvc_nodes if host.is_coordinator %}{{ host.hostname }}{% if not loop.last %},{% endif %}{% endfor %}"
state: present
- name: set mon hosts in bootstrap ceph.conf
lineinfile:
dest: /etc/ceph/ceph.conf
line: "mon host = {% for host in pvc_nodes if host.is_coordinator %}{{ host.cluster_ip }}{% if not loop.last %},{% endif %}{% endfor %}"
state: present
- name: create temporary directory
file:
dest: /tmp/ceph-bootstrap
state: directory
- name: create mon keyring
command: ceph-authtool --create-keyring /tmp/ceph-bootstrap/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
- name: create client admin keyring
command: ceph-authtool --create-keyring /tmp/ceph-bootstrap/ceph.client.admin.keyring --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
- name: add client admin keyring to mon keyring
command: ceph-authtool /tmp/ceph-bootstrap/ceph.mon.keyring --import-keyring /tmp/ceph-bootstrap/ceph.client.admin.keyring
- name: create OSD bootstrap keyring
command: ceph-authtool --create-keyring /tmp/ceph-bootstrap/ceph.osd.bootstrap.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
- name: add OSD bootstrap keyring to mon keyring
command: ceph-authtool /tmp/ceph-bootstrap/ceph.mon.keyring --import-keyring /tmp/ceph-bootstrap/ceph.osd.bootstrap.keyring
- name: create monmap
command: monmaptool --create --fsid {{ fsid }} /tmp/ceph-bootstrap/monmap
- name: add monitors to monmap
command: monmaptool --add {{ item.hostname }} {{ item.cluster_ip }} --fsid {{ fsid }} /tmp/ceph-bootstrap/monmap
with_items:
- "{{ pvc_nodes }}"
- name: copy initial ceph.conf to the boostrap directory
copy:
src: /etc/ceph/ceph.conf
dest: /tmp/ceph-bootstrap/ceph.conf
remote_src: yes
- name: add additional configuration lines to ceph.conf
lineinfile:
dest: /tmp/ceph-bootstrap/ceph.conf
line: "{{ item }}"
state: present
with_items:
- "public network = {{ pvc_cluster_subnet }}"
- "cluster network = {{ pvc_storage_subnet }}"
- "auth cluster required = cephx"
- "auth service required = cephx"
- "auth client required = cephx"
- "osd journal size = 2"
- "osd pool default size = 3"
- "osd pool default min size = 2"
- "osd pool default pg num = 512"
- "osd pool default pgp num = 512"
- "osd crush chooseleaf type = 1"
- name: collect bootstrapped Ceph files into the role
fetch:
src: /tmp/ceph-bootstrap/{{ item }}
dest: roles/pvc/files/ceph/{{ hostvars[inventory_hostname].group_names[0] }}/ceph/
flat: yes
with_items:
- ceph.conf
- ceph.mon.keyring
- ceph.client.admin.keyring
- ceph.osd.bootstrap.keyring
- monmap
- name: remove the temporary bootstrap directory
file:
dest: /tmp/ceph-bootstrap
state: absent
force: yes

View File

@ -0,0 +1,84 @@
---
- name: install packages
apt:
name:
- ceph-osd
- ceph-mds
- ceph-mon
- ceph-mgr
- radosgw
- libjemalloc2
state: latest
- name: install sysctl tweaks
template:
src: ceph/sysctl.conf.j2
dest: /etc/sysctl.d/pvc-ceph.conf
- name: activate sysctl tweaks
command: sysctl -p /etc/sysctl.d/pvc-ceph.conf
- name: install user limits overrides
template:
src: ceph/limits.conf.j2
dest: /etc/security/limits.d/99-pvc-ceph.conf
- name: install ceph default config
template:
src: ceph/default.conf.j2
dest: /etc/default/ceph
- name: create ceph configuration directory
file:
dest: /etc/ceph
state: directory
- include: ceph/bootstrap.yml
when: bootstrap is defined and bootstrap
run_once: yes
- name: install configurations
copy:
src: ceph/{{ hostvars[inventory_hostname].group_names[0] }}/ceph/{{ item }}
dest: /etc/ceph/{{ item }}
owner: ceph
group: ceph
mode: 0640
with_items:
- ceph.conf
- ceph.mon.keyring
- ceph.client.admin.keyring
- ceph.osd.bootstrap.keyring
- monmap
- name: create monitor data directory
file:
dest: /var/lib/ceph/mon/ceph-{{ ansible_hostname }}
state: directory
owner: ceph
group: ceph
mode: 0750
when: newhost is defined and newhost
- name: populate monitor with map and keys
command: ceph-mon --mkfs -i {{ ansible_hostname }} --monmap /etc/ceph/monmap --keyring /etc/ceph/ceph.mon.keyring
become_user: ceph
when: newhost is defined and newhost
- name: touch monitor done file
file:
dest: /var/lib/ceph/mon/ceph-{{ ansible_hostname }}/done
state: touch
become_user: ceph
when: newhost is defined and newhost
- name: start and enable daemons
service:
name: "{{ item }}"
state: started
enabled: yes
with_items:
- ceph-mon@{{ ansible_hostname }}
- ceph-mgr@{{ ansible_hostname }}
- meta: flush_handlers

View File

@ -21,3 +21,5 @@
enabled: no enabled: no
with_items: with_items:
- frr - frr
- meta: flush_handlers

View File

@ -0,0 +1,3 @@
---
- name: create Libvirt keyring
command: ceph auth get-or-create client.libvirt mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=pvc*'

View File

@ -20,6 +20,10 @@
- ceph-secret.xml - ceph-secret.xml
notify: restart libvirtd notify: restart libvirtd
- include: libvirt/bootstrap.yml
when: bootstrap is defined and bootstrap
run_once: yes
- name: get ceph libvirt secret key - name: get ceph libvirt secret key
command: ceph auth get-key client.libvirt command: ceph auth get-key client.libvirt
register: libvirt_key register: libvirt_key
@ -45,3 +49,5 @@
enabled: no enabled: no
with_items: with_items:
- libvirtd - libvirtd
- meta: flush_handlers

View File

@ -1,29 +1,48 @@
--- ---
# First-run check
- name: check if this is a new instance
shell: "echo 'bootstrapped' > /etc/pvc-install"
args:
creates: /etc/pvc-install
register: newhost_check
- name: set newhost fact
set_fact:
newhost: yes
when: newhost_check.changed
- include: common/add_cluster_ips.yml
when: newhost is defined and newhost
# General blacklisting of modules
- name: add module blacklist - name: add module blacklist
template: template:
src: system/blacklist.j2 src: system/blacklist.j2
dest: /etc/modprobe.d/blacklist.conf dest: /etc/modprobe.d/blacklist.conf
- include: ceph.yml # Install base databases
- include: ceph/main.yml
tags: pvc-ceph tags: pvc-ceph
- include: zookeeper.yml - include: zookeeper/main.yml
tags: pvc-zookeeper tags: pvc-zookeeper
- meta: flush_handlers - include: patroni/main.yml
- include: libvirt.yml
tags: pvc-libvirt
- include: frr.yml
tags: pvc-frr
- include: patroni.yml
tags: pvc-patroni tags: pvc-patroni
- meta: flush_handlers # Install core services
- include: libvirt/main.yml
tags: pvc-libvirt
- include: pvc.yml - include: frr/main.yml
tags: pvc-frr
- include: remove_cluster_ips.yml
when: newhost is defined and newhost
# Install PVC sequentially
- include: pvc/main.yml
tags: pvc-pvc tags: pvc-pvc
run_once: true run_once: true
delegate_to: "{{ play_host }}" delegate_to: "{{ play_host }}"

View File

@ -1,141 +0,0 @@
---
- name: install patroni packages via apt
apt:
name:
- python-psycopg2
- python3-kazoo
- patroni
- postgresql-11
state: latest
update-cache: yes
- name: first run check
shell: "echo 'bootstrapped' > /etc/patroni-install"
register: newinstance
args:
creates: /etc/patroni-install
- name: stop and disable postgresql
service:
name: "{{ item }}"
state: stopped
enabled: no
with_items:
- postgresql
- postgresql@11-main
when: newinstance.changed
- name: remove obsolete database directories
file:
dest: "{{ item }}"
state: absent
with_items:
- /etc/postgresql/11
- /var/lib/postgresql/11
when: newinstance.changed
- name: create patroni database directory
file:
dest: /var/lib/postgresql/patroni/pvc
state: directory
owner: postgres
mode: 0700
when: newinstance.changed
- name: install postgresql customization configuration file
template:
src: patroni/postgresql.pvc.conf.j2
dest: /etc/postgresql/postgresql.pvc.conf
owner: postgres
group: sudo
mode: 0640
notify: restart patroni
- name: install patroni configuration file
template:
src: patroni/patroni.yml.j2
dest: /etc/patroni/config.yml
owner: postgres
group: postgres
mode: 0640
notify: restart patroni
- name: install check_mk agent check
copy:
src: patroni/postgres
dest: /usr/lib/check_mk_agent/plugins/postgres
mode: 0755
- name: install initial schema files
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: postgres
group: sudo
mode: 0640
with_items:
- { src: "patroni/powerdns-schema.sql", dest: "/etc/postgresql/powerdns-schema.sql" }
- block:
- name: ensure patroni services are enabled and started on this host so it becomes header
service:
name: "{{ item }}.service"
state: started
enabled: yes
with_items:
- patroni
- name: wait 15s for cluster to initialize
pause:
seconds: 15
when: newinstance.changed
run_once: yes
- block:
- name: create user for role
postgresql_user:
name: "{{ pvc_dns_database_user }}"
password: "{{ pvc_dns_database_password }}"
encrypted: yes
state: present
login_host: /run/postgresql
- name: create database for role
postgresql_db:
name: "{{ pvc_dns_database_name }}"
owner: "{{ pvc_dns_database_user }}"
encoding: utf8
state: present
login_host: /run/postgresql
- name: set user privs for role
postgresql_user:
name: "{{ pvc_dns_database_user }}"
db: "{{ pvc_dns_database_name }}"
priv: ALL
login_host: /run/postgresql
- name: create extensions
postgresql_ext:
name: "{{ item }}"
db: "{{ pvc_dns_database_name }}"
login_host: /run/postgresql
with_items: "{{ extensions }}"
when: extensions is defined
- name: import dns database schema
command: "psql -U {{ pvc_dns_database_user }} -f /etc/postgresql/powerdns-schema.sql {{ pvc_dns_database_name }}"
become: yes
become_user: postgres
when: newinstance.changed
run_once: yes
- name: ensure patroni services are enabled and started
service:
name: "{{ item }}.service"
state: started
enabled: yes
with_items:
- patroni

View File

@ -0,0 +1,50 @@
---
- name: ensure patroni services are enabled and started on this host so it becomes header
service:
name: "{{ item }}.service"
state: started
enabled: yes
with_items:
- patroni
- name: wait 15s for cluster to initialize
pause:
seconds: 15
- block:
- name: create user for role
postgresql_user:
name: "{{ pvc_dns_database_user }}"
password: "{{ pvc_dns_database_password }}"
encrypted: yes
state: present
login_host: /run/postgresql
- name: create database for role
postgresql_db:
name: "{{ pvc_dns_database_name }}"
owner: "{{ pvc_dns_database_user }}"
encoding: utf8
state: present
login_host: /run/postgresql
- name: set user privs for role
postgresql_user:
name: "{{ pvc_dns_database_user }}"
db: "{{ pvc_dns_database_name }}"
priv: ALL
login_host: /run/postgresql
- name: create extensions
postgresql_ext:
name: "{{ item }}"
db: "{{ pvc_dns_database_name }}"
login_host: /run/postgresql
with_items: "{{ extensions }}"
when: extensions is defined
- name: import dns database schema
command: "psql -U {{ pvc_dns_database_user }} -f /etc/postgresql/powerdns-schema.sql {{ pvc_dns_database_name }}"
become: yes
become_user: postgres

View File

@ -0,0 +1,85 @@
---
- name: install patroni packages via apt
apt:
name:
- python-psycopg2
- python3-kazoo
- patroni
- postgresql-11
state: latest
update-cache: yes
- name: stop and disable postgresql
service:
name: "{{ item }}"
state: stopped
enabled: no
with_items:
- postgresql
- postgresql@11-main
when: newhost is defined and newhost
- name: remove obsolete database directories
file:
dest: "{{ item }}"
state: absent
with_items:
- /etc/postgresql/11
- /var/lib/postgresql/11
when: newhost is defined and newhost
- name: create patroni database directory
file:
dest: /var/lib/postgresql/patroni/pvc
state: directory
owner: postgres
mode: 0700
when: newhost is defined and newhost
- name: install postgresql customization configuration file
template:
src: patroni/postgresql.pvc.conf.j2
dest: /etc/postgresql/postgresql.pvc.conf
owner: postgres
group: sudo
mode: 0640
notify: restart patroni
- name: install patroni configuration file
template:
src: patroni/patroni.yml.j2
dest: /etc/patroni/config.yml
owner: postgres
group: postgres
mode: 0640
notify: restart patroni
- name: install check_mk agent check
copy:
src: patroni/postgres
dest: /usr/lib/check_mk_agent/plugins/postgres
mode: 0755
- name: install initial schema files
copy:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: postgres
group: sudo
mode: 0640
with_items:
- { src: "patroni/powerdns-schema.sql", dest: "/etc/postgresql/powerdns-schema.sql" }
- include: patroni/bootstrap.yml
run_once: yes
when: bootstrap is defined and bootstrap
- name: ensure patroni services are enabled and started
service:
name: "{{ item }}.service"
state: started
enabled: yes
with_items:
- patroni
- meta: flush_handlers

View File

@ -15,17 +15,6 @@
- pvcd.yaml - pvcd.yaml
notify: restart pvcd notify: restart pvcd
- name: verify if cluster has been started
shell: "/usr/share/zookeeper/bin/zkCli.sh stat /nodes 2>&1 | grep -q 'Node does not exist'"
register: cluster_init
failed_when: no
run_once: yes
- name: bootstrap a fresh cluster
shell: /usr/bin/pvc init
when: cluster_init.rc == 0
run_once: yes
- name: stop and disable unneccessary services - name: stop and disable unneccessary services
service: service:
name: "{{ item }}" name: "{{ item }}"
@ -34,8 +23,10 @@
with_items: with_items:
- pdns.service - pdns.service
- include: remove_cluster_ips.yml - name: bootstrap a fresh cluster
when: newhost.changed shell: /usr/bin/pvc init
when: bootstrap is defined and bootstrap
run_once: yes
- name: start and enable services - name: start and enable services
service: service:
@ -46,3 +37,5 @@
- pvc-flush.service - pvc-flush.service
- pvcd.service - pvcd.service
- pvcd.target - pvcd.target
- meta: flush_handlers

View File

@ -24,3 +24,5 @@
enabled: no enabled: no
with_items: with_items:
- zookeeper - zookeeper
- meta: flush_handlers