Compare commits
3 Commits
6362a3b40f
...
7d329708bc
Author | SHA1 | Date | |
---|---|---|---|
7d329708bc | |||
574af89a18 | |||
38a6437bbc |
@@ -146,45 +146,6 @@ pvc_sriov_enable: False
|
|||||||
# Lowering the stack limit may cause poor performance or crashes in Zookeeper during some tasks.
|
# Lowering the stack limit may cause poor performance or crashes in Zookeeper during some tasks.
|
||||||
#pvc_zookeeper_stack_limit: 256M # 1/4 of default
|
#pvc_zookeeper_stack_limit: 256M # 1/4 of default
|
||||||
|
|
||||||
# CPU pinning configuration via cset
|
|
||||||
# > ADVANCED TUNING: For most users, this is unnecessary and PVC will run fine with the default scheduling.
|
|
||||||
# Uncomment these options only for testing or if you are certain you meet the following conditions.
|
|
||||||
# > These options will tune cpuset (installed by default) to limit Ceph OSDs to certain CPU cores, while
|
|
||||||
# simultaneously limiting other system tasks and VMs to the remaining CPU cores. In effect it dedicates the
|
|
||||||
# specified CPU cores to Ceph OSDs only to ensure those processes can have dedicated CPU time.
|
|
||||||
# > Generally speaking, except in cases where extremely high random read throughput is required and in which
|
|
||||||
# the node(s) have a very large number of physical cores, this setting will not improve performance, and
|
|
||||||
# may in fact hurt performance. For more details please see the documentation.
|
|
||||||
# > For optimal performance when using this setting, you should dedicate exactly 2 cores, and their
|
|
||||||
# respective SMT threads if applicable, to each OSD. For instance, with 2 OSDs, 4 real cores (and their
|
|
||||||
# corresponding SMT threads if applicable) should be specified. More cores has been seen to, in some cases
|
|
||||||
# drop performance further. For more details please see the documentation.
|
|
||||||
# > Use the 'virsh capabilities' command to confim the exact CPU IDs (and SMT "siblings") for these lists.
|
|
||||||
#
|
|
||||||
pvc_shield_osds_enable: False
|
|
||||||
#pvc_shield_osds_cset:
|
|
||||||
# # This example host has 2x 6-core SMT-enabled CPUs; we want to use cores 0 (+SMT 12) and 2 (+SMT 14), which are
|
|
||||||
# # both on physical CPU 0, for 1x OSD.
|
|
||||||
# - hostname: pvchv1
|
|
||||||
# osd_cset:
|
|
||||||
# - 0
|
|
||||||
# - 2
|
|
||||||
# - 12
|
|
||||||
# - 14
|
|
||||||
# # These example hosts have 1x 8-core SMT-enabled CPUs; we want to use cores 0 (+SMT 8) and 1 (+SMT 9) for 1x OSD.
|
|
||||||
# - hostname: pvchv2
|
|
||||||
# osd_cset:
|
|
||||||
# - 0
|
|
||||||
# - 1
|
|
||||||
# - 8
|
|
||||||
# - 9
|
|
||||||
# - hostname: pvchv3
|
|
||||||
# osd_cset:
|
|
||||||
# - 0
|
|
||||||
# - 1
|
|
||||||
# - 8
|
|
||||||
# - 9
|
|
||||||
|
|
||||||
# Configuration file networks
|
# Configuration file networks
|
||||||
# > Taken from base.yml's configuration; DO NOT MODIFY THIS SECTION.
|
# > Taken from base.yml's configuration; DO NOT MODIFY THIS SECTION.
|
||||||
pvc_upstream_device: "{{ networks['upstream']['device'] }}"
|
pvc_upstream_device: "{{ networks['upstream']['device'] }}"
|
||||||
|
@@ -638,11 +638,14 @@
|
|||||||
state: directory
|
state: directory
|
||||||
tags: base-backups
|
tags: base-backups
|
||||||
|
|
||||||
- name: install daily backup script
|
- name: install daily backup scripts
|
||||||
template:
|
template:
|
||||||
src: etc/cron.daily/pvc-backup.j2
|
src: "etc/cron.daily/{{ item }}.j2"
|
||||||
dest: /etc/cron.daily/pvc-backup
|
dest: "/etc/cron.daily/{{ item }}"
|
||||||
mode: 0755
|
mode: 0755
|
||||||
|
with_items:
|
||||||
|
- pvc-backup
|
||||||
|
- mon-backup
|
||||||
tags: base-backups
|
tags: base-backups
|
||||||
|
|
||||||
- name: install IPMI network interfaces fragment
|
- name: install IPMI network interfaces fragment
|
||||||
|
24
roles/base/templates/etc/cron.daily/mon-backup.j2
Executable file
24
roles/base/templates/etc/cron.daily/mon-backup.j2
Executable file
@@ -0,0 +1,24 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Backup the Ceph monitor data
|
||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
set -o errexit
|
||||||
|
|
||||||
|
BACKUP_DIR="/srv/backups"
|
||||||
|
DATE="$( date +%Y%m%d )"
|
||||||
|
HOSTNAME="$( hostname -s )"
|
||||||
|
|
||||||
|
if [[ ! -d ${BACKUP_DIR} ]]; then
|
||||||
|
mkdir -p ${BACKUP_DIR}
|
||||||
|
fi
|
||||||
|
|
||||||
|
pushd ${BACKUP_DIR} &>/dev/null
|
||||||
|
|
||||||
|
MON_BACKUP_FILENAME_BASE="ceph-mon-backup"
|
||||||
|
MON_BACKUP_FILENAME="${BACKUP_DIR}/${MON_BACKUP_FILENAME_BASE}.${DATE}.tar.xz"
|
||||||
|
|
||||||
|
tar -cJf ${MON_BACKUP_FILENAME} /var/lib/ceph/mon
|
||||||
|
find ${BACKUP_DIR} -type f -name "${MON_BACKUP_FILENAME_BASE}*" -mtime +7 -exec rm {} \;
|
||||||
|
|
||||||
|
popd &>/dev/null
|
@@ -138,56 +138,4 @@
|
|||||||
command: ceph osd crush rule create-replicated replicated_rule default osd
|
command: ceph osd crush rule create-replicated replicated_rule default osd
|
||||||
when: "{{ pvc_nodes | length }} == 1"
|
when: "{{ pvc_nodes | length }} == 1"
|
||||||
|
|
||||||
# System OSD CPU shielding activation
|
|
||||||
- block:
|
|
||||||
- name: install packages
|
|
||||||
apt:
|
|
||||||
name:
|
|
||||||
- cpuset
|
|
||||||
- numactl
|
|
||||||
state: latest
|
|
||||||
|
|
||||||
- name: install ceph-osd-cpuset controller config
|
|
||||||
template:
|
|
||||||
src: ceph/ceph-osd-cpuset-enable.j2
|
|
||||||
dest: /etc/default/ceph-osd-cpuset
|
|
||||||
|
|
||||||
- name: install ceph-osd-cpuset script
|
|
||||||
template:
|
|
||||||
src: ceph/ceph-osd-cpuset.j2
|
|
||||||
dest: /usr/local/sbin/ceph-osd-cpuset
|
|
||||||
mode: 0755
|
|
||||||
|
|
||||||
- name: install ceph-osd-cpuset service unit
|
|
||||||
template:
|
|
||||||
src: ceph/ceph-osd-cpuset.service.j2
|
|
||||||
dest: /etc/systemd/system/ceph-osd-cpuset.service
|
|
||||||
register: systemd_file_cpuset
|
|
||||||
|
|
||||||
- name: create ceph-osd override dropin directory
|
|
||||||
file:
|
|
||||||
dest: /etc/systemd/system/ceph-osd@.service.d
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: install ceph-osd override dropin
|
|
||||||
template:
|
|
||||||
src: ceph/ceph-osd-cpuset.conf.j2
|
|
||||||
dest: /etc/systemd/system/ceph-osd@.service.d/cpuset.conf
|
|
||||||
register: systemd_file_osd
|
|
||||||
|
|
||||||
- name: reload systemd to apply previous changes
|
|
||||||
command: "systemctl daemon-reload"
|
|
||||||
when: systemd_file_cpuset.changed or systemd_file_osd.changed
|
|
||||||
|
|
||||||
- name: enable ceph-osd-cpuset service
|
|
||||||
service:
|
|
||||||
name: ceph-osd-cpuset
|
|
||||||
enabled: yes
|
|
||||||
|
|
||||||
- debug:
|
|
||||||
msg: "NOTICE: Any cpuset configs have NOT been applied to the running system. This node must be rebooted to apply these changes."
|
|
||||||
tags: pvc-ceph-cpuset
|
|
||||||
when:
|
|
||||||
- pvc_shield_osds_enable is defined
|
|
||||||
|
|
||||||
- meta: flush_handlers
|
- meta: flush_handlers
|
||||||
|
@@ -1 +0,0 @@
|
|||||||
{{ pvc_shield_osds_enable }}
|
|
@@ -1,5 +0,0 @@
|
|||||||
# ceph-osd@.service overrides for cpuset
|
|
||||||
# {{ ansible_managed }}
|
|
||||||
[Service]
|
|
||||||
ExecStart =
|
|
||||||
ExecStart = /usr/bin/cset proc --set=osd --exec /usr/bin/ceph-osd -- -f --cluster ${CLUSTER} --id %i --setuser ceph --setgroup ceph
|
|
@@ -1,79 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# PVC Ceph OSD cpuset preparation script
|
|
||||||
# {{ ansible_managed }}
|
|
||||||
|
|
||||||
# This script is designed to prepare the cpusets for use by Ceph OSDs, VMs, and other system resources.
|
|
||||||
# Libvirt does not make this easy with any way to globally set its CPUs, so we must do this trickery.
|
|
||||||
{% if pvc_shield_osds_cset is defined %}
|
|
||||||
{% set cset_host = pvc_shield_osds_cset | selectattr('hostname', 'equalto', inventory_hostname) %}
|
|
||||||
|
|
||||||
A_OSD_CPUS=( {{ cset_host[0]['osd_cset'] | join(' ') }} )
|
|
||||||
A_SYS_CPUS=()
|
|
||||||
{% else %}
|
|
||||||
|
|
||||||
A_OSD_CPUS=()
|
|
||||||
A_SYS_CPUS=()
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
CPU_INFO="$( lscpu )"
|
|
||||||
|
|
||||||
# First, we must determine how many NUMA nodes we have
|
|
||||||
NUMA_COUNT="$( grep '^NUMA node(s)' <<<"${CPU_INFO}" | awk '{ print $NF }' )"
|
|
||||||
|
|
||||||
# If we have 1 NUMA node, our SYS_MEMS is 0; otherwise it's 0-X
|
|
||||||
# This is needed to explicitly set our memspec during the set
|
|
||||||
if [[ ${NUMA_COUNT} -eq 1 ]]; then
|
|
||||||
SYS_MEMS="0"
|
|
||||||
else
|
|
||||||
SYS_MEMS="0-$(( ${NUMA_COUNT} - 1 ))"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# We must determine which NUMA nodes our OSD CPUS are in for the memspec during the set
|
|
||||||
A_OSD_MEMS=()
|
|
||||||
for CPU in ${A_OSD_CPUS[@]}; do
|
|
||||||
NODE="$( grep -E '^NUMA node[0-9]+ CPU' <<<"${CPU_INFO}" | grep -w "${CPU}" | awk '{ print $2 }' | sed 's/node//' )"
|
|
||||||
if [[ ! " ${A_OSD_MEMS} " =~ " ${NODE} " ]]; then
|
|
||||||
A_OSD_MEMS+=( $NODE )
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# Determine our CPU count
|
|
||||||
CPU_COUNT="$( grep '^CPU(s)' <<<"${CPU_INFO}" | awk '{ print $NF }' )"
|
|
||||||
|
|
||||||
# Loop through all the CPUs in the count; if they are not in OSD_CPUS, add them to the SYS_CPUS array
|
|
||||||
for i in $( seq 0 $(( ${CPU_COUNT} - 1 )) ); do
|
|
||||||
if [[ ! " ${A_OSD_CPUS[*]} " =~ " ${i} " ]]; then
|
|
||||||
A_SYS_CPUS+=( $i )
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
{% raw %}
|
|
||||||
if [[ $( cat /etc/default/ceph-osd-cpuset ) == "True" && ${#A_OSD_CPUS[@]} -gt 0 ]]; then
|
|
||||||
{% endraw %}
|
|
||||||
# Convert arrays into CSV
|
|
||||||
OSD_MEMS="$( IFS=, ; echo "${A_OSD_MEMS[*]}" )"
|
|
||||||
OSD_CPUS="$( IFS=, ; echo "${A_OSD_CPUS[*]}" )"
|
|
||||||
SYS_CPUS="$( IFS=, ; echo "${A_SYS_CPUS[*]}" )"
|
|
||||||
else
|
|
||||||
# Configs installed but disabled, so use all CPUs for everything
|
|
||||||
OSD_MEMS="${SYS_MEMS}"
|
|
||||||
OSD_CPUS="0-$(( ${CPU_COUNT} - 1 ))"
|
|
||||||
SYS_CPUS="0-$(( ${CPU_COUNT} - 1 ))"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Enabled: $( cat /etc/default/ceph-osd-cpuset )"
|
|
||||||
echo "CPU count: ${CPU_COUNT}"
|
|
||||||
echo "OSD CPUs: ${OSD_CPUS}"
|
|
||||||
echo "OSD Mems: ${OSD_MEMS}"
|
|
||||||
echo "System/VM CPUs: ${SYS_CPUS}"
|
|
||||||
echo "System/VM Mems: ${SYS_MEMS}"
|
|
||||||
|
|
||||||
# Create the system cpuset and move everything currently running into it
|
|
||||||
/usr/bin/cset set --cpu=${SYS_CPUS} --mem=${SYS_MEMS} system
|
|
||||||
/usr/bin/cset proc --move --force --threads root --toset=system
|
|
||||||
|
|
||||||
# Create our Libvirt cpuset (identical to system cpuset)
|
|
||||||
/usr/bin/cset set --cpu=${SYS_CPUS} --mem=${SYS_MEMS} machine
|
|
||||||
|
|
||||||
# Create our OSD cpuset
|
|
||||||
/usr/bin/cset set --cpu=${OSD_CPUS} --mem=${OSD_MEMS} osd
|
|
@@ -1,12 +0,0 @@
|
|||||||
# PVC Ceph OSD cpuset service unit
|
|
||||||
# {{ ansible_managed }}
|
|
||||||
[Unit]
|
|
||||||
Description = Ceph OSD cpuset shield creation
|
|
||||||
Before = ceph-osd@.service libvirtd.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type = oneshot
|
|
||||||
ExecStart = /usr/local/sbin/ceph-osd-cpuset
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy = ceph.target
|
|
Reference in New Issue
Block a user