Add debian common role

This commit is contained in:
Joshua Boniface 2023-05-05 15:47:27 -04:00
parent b387d68eda
commit bf6bfe2809
79 changed files with 3166 additions and 0 deletions

View File

@ -0,0 +1,223 @@
---
# A root password for the system in plaintext format
root_password: "OverrideMeToSomethingSecurePlease!"
# Timezone & Locale
timezone: Canada/Eastern
locale: en_CA.UTF-8
# Hosts to allow for hostbased authentication
hostbased_auth: # Must be list of inventory hostnames
# - adminhost.domain.tld
# Custom facts (from the templates/etc/ansible/facts.d directory) to install
custom_facts:
- moe_release
- host_id
- host_group
- dhcp_status
# Apt configuration files (from the templates/etc/apt/apt.conf.d directory) to install
apt_configurations:
- 10norecommends
- 30aptcacher
- 50unattended-upgrades
# Apt sources entries
apt_sources:
- name: rafal.ca-base
has_src: yes
url: http://debian.mirror.rafal.ca/debian
distribution: "{{ moe_release.debian_codename }}"
components:
- main
- contrib
- non-free
- name: rafal.ca-updates
has_src: yes
url: http://debian.mirror.rafal.ca/debian
distribution: "{{ moe_release.debian_codename }}-updates"
components:
- main
- contrib
- non-free
- name: rafal.ca-security
has_src: yes
url: http://security.debian.org/debian-security
distribution: "{{ moe_release.debian_codename }}-security"
components:
- main
- contrib
- non-free
- name: repo.bonifacelabs.net
has_src: no
url: https://repo.bonifacelabs.net/debian
distribution: "{{ moe_release.debian_codename }}"
components:
- main
gpg_url: https://repo.bonifacelabs.net/debian/bonifacelabs_signing_key.pub
gpg_id: 83D07192314835D4
# Packages to explicitly remove from the system
packages_remove:
- exim4
- exim4-base
- exim4-config
- exim4-daemon-light
- nano
- joe
- python2
# Packages to install on the system
packages_add:
- acl
- acpi-support-base
- acpid
- bash
- bash-completion
- bc
- bind9-host
- binutils
- bzip2
- ca-certificates
- check-mk-agent
- curl
- debconf-utils
- deborphan
- dns-root-data
- dnsutils
- dstat
- fail2ban
- gawk
- git
- haveged
- htop
- iotop
- iperf
- iperf3
- iptables
- jnettop
- less
- libpam-systemd
- locales
- logrotate
- lsof
- man
- mmv
- needrestart
- net-tools
- netcat-openbsd
- nethogs
- nftables
- nmap
- ntp
- openssh-client
- openssh-server
- openssl
- postfix
- psmisc
- pv
- reptyr
- rsync
- rsyslog
- screenfetch
- sharutils
- shellcheck
- strace
- sudo
- sysstat
- tcptraceroute
- traceroute
- tshark
- unattended-upgrades
- vim
- wget
- zram-tools
- "linux-headers-{{ moe_release.dpkg_architecture }}"
- "linux-image-{{ moe_release.dpkg_architecture }}"
# Apt preferences to set before installing packages
apt_preferences:
- name: wireshark-common
question: wireshark-common/install-setuid
vtype: select
value: 'true'
- name: postfix
question: postfix/main_mailer_type
vtype: select
value: "Internet Site"
# Services to enable (after installing but before configuring)
enabled_services:
- acpid
- rsyslog
- nftables
- postfix
- ntp
- ssh
# Capabilities overrides on binaries
set_capabilities:
- path: /bin/ping
capability: cap_net_raw=ep
# Sysctl configuration files (from templates/etc/sysctl.d) to install
sysctl_files:
- moe.conf
# NFTables rules to create; leave empty for a default allow-all ruleset
nftables_rules:
# # EXAMPLE: Permit CheckMK only from RFC1918 subnets
# - chain: input
# rule: "ip saddr { 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16 } tcp dport 6556 accept"
# - chain: input
# rule: "ip tcp dport 6556 drop"
# CheckMK plugin files (from files/usr/lib/check_mk_agent/plugins) to install
check_mk_plugins:
- mk_logwatch
- backup
- cephfsmounts
- dpkg
- entropy
- freshness
- kernel_taint
- ownership
# Additional groups to add
add_groups:
- name: media
gid: 9000
# SSH keys for backup purposes
backup_ssh_keys:
- name: backup@domain.tld
date: 2020-01
type: ssh-ed25519
key: AAAA...ZZZZ
# Administrative users
admin_users:
- name: example
uid: 501
add_groups:
- wireshark
- media
shell: /bin/bash
ssh_keys:
- name: example@domain.tld
date: 2020-01
type: ssh-ed25519
key: AAAA...ZZZZ
# Non-mailhost postfix relay and domain information (for cron emails, etc.)
postfix_relay: ""
postfix_domain: ""
# File used to determine if the Postfix main.cf configuration should not be installed
# Ensure this file is created in a later role for hosts that need their own main.cf configuration
# to avoid this role overwriting it in the future.
postfix_mailhost_flag_file: "/etc/postfix/mailhost"

View File

@ -0,0 +1,11 @@
#!/bin/bash
# Backup check for Check_MK
# Installed by BLSE 2.x ansible
SHARELIST=( $( cat /var/backups/shares ) )
echo "<<<backup>>>"
for SHARE in ${SHARELIST[@]}; do
echo "${SHARE} $( cat ${SHARE}/.backup )"
done

View File

@ -0,0 +1,15 @@
echo '<<<cephfsmounts>>>'
sed -n '/ ceph\? /s/[^ ]* \([^ ]*\) .*/\1/p' < /proc/mounts |
sed 's/\\040/ /g' |
while read MP
do
if [ ! -r $MP ]; then
echo "$MP Permission denied"
elif [ $STAT_VERSION != $STAT_BROKE ]; then
waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" || \
echo "$MP hanging 0 0 0 0"
else
waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" && \
printf '\n'|| echo "$MP hanging 0 0 0 0"
fi
done

View File

@ -0,0 +1,33 @@
#!/bin/bash
# Apt and dpkg status check for Check_MK
# Installed by BLSE 2.x ansible
TMP_DPKG="$( COLUMNS=200 dpkg --list )"
TMP_AWK="$( awk '
{ if (NR>5) {
if ($1 != "ii") bad_package[$2]=$1;
}}
END {
print NR-5;
bad_package_count=asort(bad_package,junk)
if (bad_package_count) {
for (package in bad_package)
print package "[" bad_package[package] "]"
exit 1
}
}
' <<<"$TMP_DPKG" )"
DEBIAN_VERSION="$( cat /etc/debian_version )"
TOTAL_PACKAGES=$( head --lines=1 <<<"${TMP_AWK}" )
UPGRADABLE_PACKAGES=( $( apt list --upgradable 2>/dev/null | grep -v '^Listing' | awk '{ gsub(/\]/,"",$NF); print $1 "[" $NF "<>" $2 "]" }' ) )
INCONSISTENT_PACKAGES=( $( tail --lines=+2 <<<"${TMP_AWK}" ) )
OLD_CONFIG_FILES=( $( ionice -c3 find /etc -type f -a \( -name '*.dpkg-*' -o -name '*.ucf-*' -o -name '*.update-*' \) 2>/dev/null ) )
echo "<<<dpkg>>>"
echo "debian_version ${DEBIAN_VERSION}"
echo "total_packages ${TOTAL_PACKAGES}"
echo "upgradable_packages ${#UPGRADABLE_PACKAGES[*]} ${UPGRADABLE_PACKAGES[*]}"
echo "inconsistent_packages ${#INCONSISTENT_PACKAGES[*]} ${INCONSISTENT_PACKAGES[*]}"
echo "obsolete_configuration_files ${#OLD_CONFIG_FILES[*]} ${OLD_CONFIG_FILES[*]}"

View File

@ -0,0 +1,16 @@
#!/bin/bash
# Entropy availability check for Check_MK
# Installed by BLSE 2.x ansible
if [ -e /proc/sys/kernel/random/entropy_avail ]; then
echo '<<<entropy_avail>>>'
echo -n "entropy_avail "
cat /proc/sys/kernel/random/entropy_avail
echo -n "poolsize "
cat /proc/sys/kernel/random/poolsize
fi

View File

@ -0,0 +1,103 @@
#!/usr/bin/env python
# Check for freshness of various components using needrestart
import subprocess
import re
import json
try:
nrout = subprocess.run(["/usr/sbin/needrestart", "-b"], timeout=5, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.TimeoutExpired:
exit(2)
except Exception:
exit(1)
stdout = nrout.stdout.decode("ascii").split('\n')
stderr = nrout.stdout.decode("ascii").split('\n')
# Output data structure after parsing needrestart output
data = {
'kernel': {
'current': None,
'pending': None,
'state': 0,
},
'microcode': {
'current': None,
'pending': None,
'state': 0,
},
'services': {
'count': 0,
'list': list(),
},
'containers': {
'count': 0,
'list': list(),
},
'sessions': {
'count': 0,
'list': list(),
},
}
# NEEDRESTART-VER: 3.4
# NEEDRESTART-KCUR: 4.19.0-6-amd64
# NEEDRESTART-KEXP: 4.19.0-20-amd64
# NEEDRESTART-KSTA: 3
# NEEDRESTART-UCSTA: 2
# NEEDRESTART-UCCUR: 0xb000038
# NEEDRESTART-UCEXP: 0xb000040
# NEEDRESTART-SVC: acpid
# NEEDRESTART-SVC: cron
# NEEDRESTART-SVC: irqbalance
# NEEDRESTART-SVC: mcelog
# NEEDRESTART-SVC: munin-node
# NEEDRESTART-SVC: ntp
# NEEDRESTART-SVC: ssh
# NEEDRESTART-SVC: syslog-ng
# NEEDRESTART-SVC: trousers
# NEEDRESTART-SVC: watchdog
# NEEDRESTART-SVC: wd_keepalive
# NEEDRESTART-CONT: LXC web1
# NEEDRESTART-SESS: metabase @ user manager service
# NEEDRESTART-SESS: root @ session #28017
# STA:
# 0: unknown or failed to detect
# 1: no pending upgrade
# 2: ABI compatible upgrade pending
# 3: version upgrade pending
for line in stdout:
# Kernel version
if re.match(r'^NEEDRESTART-KSTA', line):
data['kernel']['state'] = int(line.split(': ')[-1])
elif re.match(r'^NEEDRESTART-KCUR', line):
data['kernel']['current'] = line.split(': ')[-1]
elif re.match(r'^NEEDRESTART-KEXP', line):
data['kernel']['pending'] = line.split(': ')[-1]
# Microcode version
elif re.match(r'^NEEDRESTART-UCSTA', line):
data['microcode']['state'] = int(line.split(': ')[-1])
elif re.match(r'^NEEDRESTART-UCCUR', line):
data['microcode']['current'] = line.split(': ')[-1]
elif re.match(r'^NEEDRESTART-UCEXP', line):
data['microcode']['pending'] = line.split(': ')[-1]
# Services needing restart
elif re.match(r'^NEEDRESTART-SVC', line):
data['services']['count'] += 1
data['services']['list'].append(' '.join(line.split(': ')[1:]))
# Containers needing restart
elif re.match(f'^NEEDRESTART-CONT', line):
data['containers']['count'] += 1
data['containers']['list'].append(' '.join(line.split(': ')[1:]))
# Sessions needing restart
elif re.match(f'^NEEDRESTART-SESS', line):
data['sessions']['count'] += 1
data['sessions']['list'].append(' '.join(line.split(': ')[1:]))
print("<<<freshness>>>")
print(json.dumps(data))
exit(0)

View File

@ -0,0 +1,51 @@
#!/usr/bin/env python3
taints_msg = list()
taints_err = list()
taint_map = {
0: { "text": "Proprietary module was loaded", "error": False },
1: { "text": "Module was force loaded", "error": True },
2: { "text": "Kernel running on an out of specification system", "error": True },
3: { "text": "Module was force unloaded", "error": True },
4: { "text": "Processor reported a Machine Check Exception (MCE)", "error": True },
5: { "text": "Bad page referenced or some unexpected page flags", "error": True },
6: { "text": "Taint requested by userspace application", "error": True },
7: { "text": "Kernel died recently (OOPS or BUG)", "error": True },
8: { "text": "ACPI table overridden by user", "error": True },
9: { "text": "Kernel issued warning", "error": True },
10: { "text": "Staging driver was loaded", "error": False },
11: { "text": "Workaround for bug in platform firmware applied", "error": True },
12: { "text": "Externally-built module was loaded", "error": False },
13: { "text": "Unsigned module was loaded", "error": False },
14: { "text": "Soft lockup occurred", "error": True },
15: { "text": "Kernel has been live patched", "error": True },
16: { "text": "Auxiliary taint", "error": True },
17: { "text": "Kernel was built with the struct randomization plugin", "error": True },
18: { "text": "An in-kernel test has been run", "error": True },
}
with open("/proc/sys/kernel/tainted") as tfh:
taint_val = int(tfh.read().strip())
for i in range(0, 18):
xor_val = i ** 2
val = (taint_val >> i) & 1
if val == 0:
continue
taint = taint_map[i]
taints_msg.append(taint['text'])
taints_err.append(taint['error'])
if len(taints_msg) < 1:
taints_err.append(False)
taints_msg = ["Kernel is untainted"]
if all(taints_err):
state = "WARN"
else:
state = "OK"
print("<<<kernel_taint>>>")
print(f'{state} {"; ".join(taints_msg)}')

View File

@ -0,0 +1,564 @@
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Call with -d for debug mode: colored output, no saving of status
import sys, os, re, time, glob
# .--MEI-Cleanup---------------------------------------------------------.
# | __ __ _____ ___ ____ _ |
# | | \/ | ____|_ _| / ___| | ___ __ _ _ __ _ _ _ __ |
# | | |\/| | _| | |_____| | | |/ _ \/ _` | '_ \| | | | '_ \ |
# | | | | | |___ | |_____| |___| | __/ (_| | | | | |_| | |_) | |
# | |_| |_|_____|___| \____|_|\___|\__,_|_| |_|\__,_| .__/ |
# | |_| |
# +----------------------------------------------------------------------+
# In case the program crashes or is killed in a hard way, the frozen binary .exe
# may leave temporary directories named "_MEI..." in the temporary path. Clean them
# up to prevent eating disk space over time.
########################################################################
############## DUPLICATE CODE WARNING ##################################
### This code is also used in the cmk-update-agent frozen binary #######
### Any changes to this class should also be made in cmk-update-agent ##
### In the bright future we will move this code into a library #########
########################################################################
class MEIFolderCleaner(object):
def pid_running(self, pid):
import ctypes
kernel32 = ctypes.windll.kernel32
SYNCHRONIZE = 0x100000
process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)
if process != 0:
kernel32.CloseHandle(process)
return True
else:
return False
def find_and_remove_leftover_folders(self, hint_filenames):
if not hasattr(sys, "frozen"):
return
import win32file # pylint: disable=import-error
import tempfile
base_path = tempfile.gettempdir()
for f in os.listdir(base_path):
try:
path = os.path.join(base_path, f)
if not os.path.isdir(path):
continue
# Only care about directories related to our program
invalid_dir = False
for hint_filename in hint_filenames:
if not os.path.exists(os.path.join(path, hint_filename)):
invalid_dir = True
break
if invalid_dir:
continue
pyinstaller_tmp_path = win32file.GetLongPathName(sys._MEIPASS).lower() # pylint: disable=no-member
if pyinstaller_tmp_path == path.lower():
continue # Skip our own directory
# Extract the process id from the directory and check whether or not it is still
# running. Don't delete directories of running processes!
# The name of the temporary directories is "_MEI<PID><NR>". We try to extract the PID
# by stripping of a single digit from the right. In the hope the NR is a single digit
# in all relevant cases.
pid = int(f[4:-1])
if self.pid_running(pid):
continue
shutil.rmtree(path)
except Exception, e:
# TODO: introduce verbose mode for mk_logwatch
pass
#.
os_type = "linux"
try:
import platform
os_type = platform.system().lower()
except:
pass
if '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]:
tty_red = '\033[1;31m'
tty_green = '\033[1;32m'
tty_yellow = '\033[1;33m'
tty_blue = '\033[1;34m'
tty_normal = '\033[0m'
debug = True
else:
tty_red = ''
tty_green = ''
tty_yellow = ''
tty_blue = ''
tty_normal = ''
debug = False
# The configuration file and status file are searched
# in the directory named by the environment variable
# LOGWATCH_DIR. If that is not set, MK_CONFDIR is used.
# If that is not set either, the current directory ist
# used.
logwatch_dir = os.getenv("LOGWATCH_DIR")
if logwatch_dir:
mk_confdir = logwatch_dir
mk_vardir = logwatch_dir
else:
mk_confdir = os.getenv("MK_CONFDIR") or "."
mk_vardir = os.getenv("MK_VARDIR") or os.getenv("MK_STATEDIR") or "."
sys.stdout.write("<<<logwatch>>>\n")
config_filename = mk_confdir + "/logwatch.cfg"
config_dir = mk_confdir + "/logwatch.d/*.cfg"
# Determine the name of the state file
# $REMOTE set -> logwatch.state.$REMOTE
# $REMOTE not set and a tty -> logwatch.state.local
# $REMOTE not set and not a tty -> logwatch.state
remote_hostname = os.getenv("REMOTE", "")
remote_hostname = remote_hostname.replace(":", "_")
if remote_hostname != "":
status_filename = "%s/logwatch.state.%s" % (mk_vardir, remote_hostname)
else:
if sys.stdout.isatty():
status_filename = "%s/logwatch.state.local" % mk_vardir
else:
status_filename = "%s/logwatch.state" % mk_vardir
# Copy the last known state from the logwatch.state when there is no status_filename yet.
if not os.path.exists(status_filename) and os.path.exists("%s/logwatch.state" % mk_vardir):
import shutil
shutil.copy("%s/logwatch.state" % mk_vardir, status_filename)
def is_not_comment(line):
if line.lstrip().startswith('#') or \
line.strip() == '':
return False
return True
def parse_filenames(line):
return line.split()
def parse_pattern(level, pattern, line):
if level not in [ 'C', 'W', 'I', 'O' ]:
raise Exception("Invalid pattern line '%s'" % line)
try:
compiled = re.compile(pattern)
except:
raise Exception("Invalid regular expression in line '%s'" % line)
return (level, compiled)
def read_config():
config_lines = []
try:
config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_filename).readlines()) ]
except IOError, e:
if debug:
raise
# Add config from a logwatch.d folder
for config_file in glob.glob(config_dir):
config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_file).readlines()) ]
have_filenames = False
config = []
cont_list = []
rewrite_list = []
for line in config_lines:
if line[0].isspace(): # pattern line
if not have_filenames:
raise Exception("Missing logfile names")
level, pattern = line.split(None, 1)
if level == 'A':
cont_list.append(parse_cont_pattern(pattern))
elif level == 'R':
rewrite_list.append(pattern)
else:
level, compiled = parse_pattern(level, pattern, line)
# New pattern for line matching => clear continuation and rewrite patterns
cont_list = []
rewrite_list = []
# TODO: Fix the code and remove the pragma below!
patterns.append((level, compiled, cont_list, rewrite_list)) # pylint: disable=used-before-assignment
else: # filename line
patterns = []
cont_list = [] # Clear list of continuation patterns from last file
rewrite_list = [] # Same for rewrite patterns
config.append((parse_filenames(line), patterns))
have_filenames = True
return config
def parse_cont_pattern(pattern):
try:
return int(pattern)
except:
try:
return re.compile(pattern)
except:
if debug:
raise
raise Exception("Invalid regular expression in line '%s'" % pattern)
# structure of statusfile
# # LOGFILE OFFSET INODE
# /var/log/messages|7767698|32455445
# /var/test/x12134.log|12345|32444355
def read_status():
if debug:
return {}
status = {}
for line in file(status_filename):
# TODO: Remove variants with spaces. rsplit is
# not portable. split fails if logfilename contains
# spaces
inode = -1
try:
parts = line.split('|')
filename = parts[0]
offset = parts[1]
if len(parts) >= 3:
inode = parts[2]
except:
try:
filename, offset = line.rsplit(None, 1)
except:
filename, offset = line.split(None, 1)
status[filename] = int(offset), int(inode)
return status
def save_status(status):
f = file(status_filename, "w")
for filename, (offset, inode) in status.items():
f.write("%s|%d|%d\n" % (filename, offset, inode))
pushed_back_line = None
def next_line(file_handle):
global pushed_back_line
if pushed_back_line != None:
line = pushed_back_line
pushed_back_line = None
return line
else:
try:
line = file_handle.next()
# Avoid parsing of (yet) incomplete lines (when acutal application
# is just in the process of writing)
# Just check if the line ends with a \n. This handles \n and \r\n
if not line.endswith("\n"):
begin_of_line_offset = file_handle.tell() - len(line)
os.lseek(file_handle.fileno(), begin_of_line_offset, 0)
return None
return line
except:
return None
def is_inode_cabable(path):
if "linux" in os_type:
return True
elif "windows" in os_type:
volume_name = "%s:\\\\" % path.split(":", 1)[0]
import win32api # pylint: disable=import-error
volume_info = win32api.GetVolumeInformation(volume_name)
volume_type = volume_info[-1]
if "ntfs" in volume_type.lower():
return True
else:
return False
else:
return False
def process_logfile(logfile, patterns):
global pushed_back_line
# Look at which file offset we have finished scanning
# the logfile last time. If we have never seen this file
# before, we set the offset to -1
offset, prev_inode = status.get(logfile, (-1, -1))
try:
file_desc = os.open(logfile, os.O_RDONLY)
if not is_inode_cabable(logfile):
inode = 1 # Create a dummy inode
else:
inode = os.fstat(file_desc)[1] # 1 = st_ino
except:
if debug:
raise
sys.stdout.write("[[[%s:cannotopen]]]\n" % logfile)
return
sys.stdout.write("[[[%s]]]\n" % logfile)
# Seek to the current end in order to determine file size
current_end = os.lseek(file_desc, 0, 2) # os.SEEK_END not available in Python 2.4
status[logfile] = current_end, inode
# If we have never seen this file before, we just set the
# current pointer to the file end. We do not want to make
# a fuss about ancient log messages...
if offset == -1:
if not debug:
return
else:
offset = 0
# If the inode of the logfile has changed it has appearently
# been started from new (logfile rotation). At least we must
# assume that. In some rare cases (restore of a backup, etc)
# we are wrong and resend old log messages
if prev_inode >= 0 and inode != prev_inode:
offset = 0
# Our previously stored offset is the current end ->
# no new lines in this file
if offset == current_end:
return # nothing new
# If our offset is beyond the current end, the logfile has been
# truncated or wrapped while keeping the same inode. We assume
# that it contains all new data in that case and restart from
# offset 0.
if offset > current_end:
offset = 0
# now seek to offset where interesting data begins
os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4
if os_type == "windows":
import io # Available with python 2.6
import codecs
# Some windows files are encoded in utf_16
# Peak the first two bytes to determine the encoding...
peak_handle = os.fdopen(file_desc, "rb")
first_two_bytes = peak_handle.read(2)
use_encoding = None
if first_two_bytes == "\xFF\xFE":
use_encoding = "utf_16"
elif first_two_bytes == "\xFE\xFF":
use_encoding = "utf_16_be"
os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4
file_handle = io.open(file_desc, encoding = use_encoding)
else:
file_handle = os.fdopen(file_desc)
worst = -1
outputtxt = ""
lines_parsed = 0
start_time = time.time()
while True:
line = next_line(file_handle)
if line == None:
break # End of file
# Handle option maxlinesize
if opt_maxlinesize != None and len(line) > opt_maxlinesize:
line = line[:opt_maxlinesize] + "[TRUNCATED]\n"
lines_parsed += 1
# Check if maximum number of new log messages is exceeded
if opt_maxlines != None and lines_parsed > opt_maxlines:
outputtxt += "%s Maximum number (%d) of new log messages exceeded.\n" % (
opt_overflow, opt_maxlines)
worst = max(worst, opt_overflow_level)
os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages
break
# Check if maximum processing time (per file) is exceeded. Check only
# every 100'th line in order to save system calls
if opt_maxtime != None and lines_parsed % 100 == 10 \
and time.time() - start_time > opt_maxtime:
outputtxt += "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
opt_overflow, opt_maxtime)
worst = max(worst, opt_overflow_level)
os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages
break
level = "."
for lev, pattern, cont_patterns, replacements in patterns:
matches = pattern.search(line[:-1])
if matches:
level = lev
levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev]
worst = max(levelint, worst)
# Check for continuation lines
for cont_pattern in cont_patterns:
if type(cont_pattern) == int: # add that many lines
for _unused_x in range(cont_pattern):
cont_line = next_line(file_handle)
if cont_line == None: # end of file
break
line = line[:-1] + "\1" + cont_line
else: # pattern is regex
while True:
cont_line = next_line(file_handle)
if cont_line == None: # end of file
break
elif cont_pattern.search(cont_line[:-1]):
line = line[:-1] + "\1" + cont_line
else:
pushed_back_line = cont_line # sorry for stealing this line
break
# Replacement
for replace in replacements:
line = replace.replace('\\0', line.rstrip()) + "\n"
for nr, group in enumerate(matches.groups()):
line = line.replace('\\%d' % (nr+1), group)
break # matching rule found and executed
color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level]
if debug:
line = line.replace("\1", "\nCONT:")
if level == "I":
level = "."
if opt_nocontext and level == '.':
continue
outputtxt += "%s%s %s%s\n" % (color, level, line[:-1], tty_normal)
new_offset = os.lseek(file_desc, 0, 1) # os.SEEK_CUR not available in Python 2.4
status[logfile] = new_offset, inode
# output all lines if at least one warning, error or ok has been found
if worst > -1:
sys.stdout.write(outputtxt)
sys.stdout.flush()
# Handle option maxfilesize, regardless of warning or errors that have happened
if opt_maxfilesize != None and (offset / opt_maxfilesize) < (new_offset / opt_maxfilesize):
sys.stdout.write("%sW Maximum allowed logfile size (%d bytes) exceeded for the %dth time.%s\n" %
(tty_yellow, opt_maxfilesize, new_offset / opt_maxfilesize, tty_normal))
try:
# This removes leftover folders which may be generated by crashing frozen binaries
folder_cleaner = MEIFolderCleaner()
folder_cleaner.find_and_remove_leftover_folders(hint_filenames = ["mk_logwatch.exe.manifest"])
except Exception, e:
sys.stdout.write("ERROR WHILE DOING FOLDER: %s\n" % e)
sys.exit(1)
try:
config = read_config()
except Exception, e:
if debug:
raise
sys.stdout.write("CANNOT READ CONFIG FILE: %s\n" % e)
sys.exit(1)
# Simply ignore errors in the status file. In case of a corrupted status file we simply begin
# with an empty status. That keeps the monitoring up and running - even if we might lose a
# message in the extreme case of a corrupted status file.
try:
status = read_status()
except Exception, e:
status = {}
logfile_patterns = {}
# The filename line may contain options like 'maxlines=100' or 'maxtime=10'
for filenames, patterns in config:
# Initialize options with default values
opt_maxlines = None
opt_maxtime = None
opt_maxlinesize = None
opt_maxfilesize = None
opt_regex = None
opt_overflow = 'C'
opt_overflow_level = 2
opt_nocontext = False
try:
options = [ o.split('=', 1) for o in filenames if '=' in o ]
for key, value in options:
if key == 'maxlines':
opt_maxlines = int(value)
elif key == 'maxtime':
opt_maxtime = float(value)
elif key == 'maxlinesize':
opt_maxlinesize = int(value)
elif key == 'maxfilesize':
opt_maxfilesize = int(value)
elif key == 'overflow':
if value not in [ 'C', 'I', 'W', 'O' ]:
raise Exception("Invalid value %s for overflow. Allowed are C, I, O and W" % value)
opt_overflow = value
opt_overflow_level = {'C':2, 'W':1, 'O':0, 'I':0}[value]
elif key == 'regex':
opt_regex = re.compile(value)
elif key == 'iregex':
opt_regex = re.compile(value, re.I)
elif key == 'nocontext':
opt_nocontext = True
else:
raise Exception("Invalid option %s" % key)
except Exception, e:
if debug:
raise
sys.stdout.write("INVALID CONFIGURATION: %s\n" % e)
sys.exit(1)
for glob_pattern in filenames:
if '=' in glob_pattern:
continue
logfiles = glob.glob(glob_pattern)
if opt_regex:
logfiles = [ f for f in logfiles if opt_regex.search(f) ]
if len(logfiles) == 0:
sys.stdout.write('[[[%s:missing]]]\n' % glob_pattern)
else:
for logfile in logfiles:
logfile_patterns[logfile] = logfile_patterns.get(logfile, []) + patterns
for logfile, patterns in logfile_patterns.items():
process_logfile(logfile, patterns)
if not debug:
save_status(status)

View File

@ -0,0 +1,71 @@
#!/bin/bash
# File ownership check for Check_MK
# Ensures that no files outside of homedirs are owned by administrative users
# Installed by BLSE 2.x ansible
ADMIN_UID_MIN=200
ADMIN_UID_MAX=599
# http://www.debian.org/doc/debian-policy/ch-opersys.html
# 0-99: Globally allocated by the Debian project
# 100-199: (BLSE) Dynamically allocated system users and groups
# 200-299: (BLSE) BLSE service users and groups
# 300-499: (BLSE) reserved
# 500-599: (BLSE) system administrators
# 600-999: (BLSE) Dynamically allocated system users and groups
# 64045: (BLSE) ceph
function is_element_of {
local TO_FIND=$1
shift
for ARRAY_ELEMENT in $*
do
if test $TO_FIND = $ARRAY_ELEMENT
then
return 0
fi
done
return 1
}
OK=0
WARNING=1
FILESYSTEMs=(/ /var/log)
MOUNTs=($(awk '{print $2}' '/proc/mounts'))
FILEs=()
for FILESYSTEM in ${FILESYSTEMs[@]}; do
while IFS= read -r -d $'\0' FILE
do
if ! is_element_of "$FILE" ${FILESYSTEMs[*]}; then
if is_element_of $FILE ${MOUNTs[*]}; then
continue
fi
fi
FILEs+=($FILE)
done < <( find ${FILESYSTEM} -xdev -uid +${ADMIN_UID_MIN} -uid -${ADMIN_UID_MAX} \
-not \( -type d -a \( -path /media -o -path /mnt \) \) \
-not \( -name '.*.swp' -a -mtime -3 \) \
-not \( -path '*/.git' -o -path '*/.git/*' \) \
-not \( -path '*.dirtrack.Storable' \) \
-not \( -path '/home/*' \) \
-not \( -path '/tmp/*' \) \
-not \( -path '/var/home/*' \) \
-not \( -path '/var/log/gitlab/*' \) \
-not \( -path '/var/spool/cron/crontabs/*' \) \
-print0 2>/dev/null )
done
echo "<<<file_ownership>>>"
if ! test ${#FILEs[*]} -eq 0; then
echo -n "${#FILEs[*]} file(s) found with invalid ownership (must be UID outside of ${ADMIN_UID_MIN}-${ADMIN_UID_MAX}): "
echo "${FILEs[*]}"
exit $WARNING
else
echo "All files have valid ownership"
exit $OK
fi

Binary file not shown.

View File

@ -0,0 +1,42 @@
---
- name: load sysctl tweaks
shell: "/sbin/sysctl -p /etc/sysctl.d/{{ item }}"
loop: "{{ sysctl_files }}"
- name: restart zramswap
service:
name: zramswap
state: restarted
- name: restart ntp
service:
name: ntp
state: restarted
- name: restart postfix
service:
name: postfix
state: restarted
- name: restart nftables
service:
name: nftables
state: restarted
- name: restart rsyslog
service:
name: rsyslog
state: restarted
- name: restart ssh
service:
name: ssh
state: restarted
- name: restart fail2ban
service:
name: fail2ban
state: restarted
- name: generate locales
command: /usr/sbin/locale-gen

View File

@ -0,0 +1,92 @@
---
- name: install apt prerequisite packages
apt:
name:
- gpg
- gnupg
state: latest
when: bootstrap
- set_fact:
update_cache: no
- name: install apt configuration files
template:
src: "etc/apt/apt.conf.d/{{ item }}.j2"
dest: "/etc/apt/apt.conf.d/{{ item }}"
mode: 0644
loop: "{{ apt_configurations }}"
register: configuration
- set_fact:
update_cache: yes
when: configuration.changed
- name: install apt pins configuration file
template:
src: "etc/apt/preferences.d/pins.j2"
dest: "/etc/apt/preferences.d/pins"
mode: 0644
register: pins
- set_fact:
update_cache: yes
when: pins.changed
- name: remove base apt sources files
file:
dest: /etc/apt/sources.list
state: absent
- name: install apt sources files
template:
src: "etc/apt/sources.list.d/source.j2"
dest: "/etc/apt/sources.list.d/{{ item.name }}.list"
mode: 0644
loop: "{{ apt_sources }}"
register: sources
- set_fact:
update_cache: yes
when: sources.changed
- name: install supplemental apt keyrings
apt_key:
url: "{{ item.gpg_url }}"
id: "{{ item.gpg_id }}"
keyring: "/etc/apt/trusted.gpg.d/{{ item.name }}.gpg"
state: present
when: item.gpg_url is defined and item.gpg_url
loop: "{{ apt_sources }}"
register: keyrings
- set_fact:
update_cache: yes
when: keyrings.changed
- name: set apt package preferences
debconf:
name: "{{ item.name }}"
question: "{{ item.question }}"
vtype: "{{ item.vtype }}"
value: "{{ item.value }}"
loop: "{{ apt_preferences }}"
register: preferences
- set_fact:
update_cache: yes
when: preferences.changed
- name: install cleanup scripts
template:
src: "usr/local/sbin/{{ item }}.j2"
dest: "/usr/local/sbin/{{ item }}"
mode: 0755
loop:
- dpkg-cleanup.sh
- kernel-cleanup.sh
- name: update apt cache
apt:
update_cache: yes
when: update_cache

View File

@ -0,0 +1,17 @@
---
- name: clean out apt cache
file:
path: /var/cache/apt/archives
state: absent
- name: install pending updates and autoremove
apt:
update_cache: yes
autoremove: yes
upgrade: full
- name: install dbus if missing
apt:
name:
- dbus
state: latest

View File

@ -0,0 +1,24 @@
---
- name: install new packages
apt:
name: "{{ packages_add }}"
state: latest
- name: remove unneeded packages
apt:
name: "{{ packages_remove }}"
state: absent
purge: yes
- name: ensure services are started and enabled
service:
name: "{{ item }}"
state: started
enabled: yes
loop: "{{ enabled_services }}"
- name: disable needrestrt dpkg integration if present
file:
dest: /usr/lib/needrestart/dpkg-status
mode: o-x,g-x,u-x
when: "'needrestart' in packages_add"

View File

@ -0,0 +1,23 @@
---
- name: create local facts directory
file:
dest: /etc/ansible/facts.d
state: directory
recurse: yes
- name: install local facts
template:
src: "etc/ansible/facts.d/{{ item }}.fact.j2"
dest: "/etc/ansible/facts.d/{{ item }}.fact"
mode: 0755
register: installed_facts
loop: "{{ custom_facts }}"
- name: regather all facts
setup:
gather_subset: "all,local"
when: installed_facts.changed
- name: set moe_release fact
set_fact:
moe_release: "{{ ansible_local.moe_release }}"

View File

@ -0,0 +1,117 @@
---
# First-run check
# Determines if the system has been bootstrapped previously
- name: ensure moe directory exists
file:
dest: /etc/moe
state: directory
tags: always
- name: first run bootstrap check
shell: "date > /etc/moe/bootstrapped"
register: bootstrap_check
args:
creates: "/etc/moe/bootstrapped"
tags: always
- set_fact:
bootstrap: no
tags: always
- set_fact:
bootstrap: yes
when: bootstrap_check.changed
tags: always
# Set system hostname
# Ensures that the system hostname matches the inventory hostname
- name: set hostname to inventory_hostname
copy:
dest: /etc/hostname
content: "{{ inventory_hostname }}\n"
tags: always
# Custom facts
# Loads facts.yml which installs and regathers supplemental local facts
- include: facts.yml
tags: debian-facts
# Configure APT environment
# Loads apt-base.yml which configures base Debian repositories
- include: apt-base.yml
tags: debian-apt-base
# Bootstrap APT configuration
# Loads apt-bootstrap.yml when bootstrap=yes to ensure system is ready for bootstrap
- include: apt-bootstrap.yml
when: bootstrap
tags: debian-apt-bootstrap
# Bootstrap restart (pre-configure)
# Loads restart.yml when bootstrap=yes to ensure system is ready for bootstrap
- include: restart.yml
when: bootstrap
tags: always
# Package configuration
# Loads apt-packages.yml to install and remove packages for base system setup
- include: apt-packages.yml
tags: debian-apt-packages
# General system setup
# Loads system.yml to configure core system items like capabilities, locales, timezones, cron, ntp, etc.
- include: system.yml
tags: debian-system
# Networking setup
# Loads network.yml to configure core network items like resolv.conf, hosts, firewall, etc.
- include: network.yml
tags: debian-network
# Syslog setup
# Loads syslog.yml to configure rsyslog
- include: syslog.yml
tags: debian-syslog
# Shell setup
# Loads shell.yml to configure basic global shell items like sudo, bash, motd, etc.
- include: shell.yml
tags: debian-shell
# SSH setup
# Loads ssh.yml to configure SSH server for remote management
- include: ssh.yml
tags: debian-ssh
# Monitoring setup
# Loads monitoring.yml to configure remote monitoring items like check_mk, etc.
- include: monitoring.yml
tags: debian-monitoring
# Root user setup
# Loads root.yml to configure root user
- include: users/root.yml
tags: debian-users-root
# Backup user setup
# Loads backup.yml to configure backup user
- include: users/backup.yml
tags: debian-users-backup
# Deploy (Ansible) user setup
# Loads deploy.yml to configure deploy user
- include: users/deploy.yml
tags: debian-users-deploy
# Administrative users setup
# Loads admin.yml to configure administrative shell users
- include: users/admin.yml
loop: "{{ admin_users }}"
tags: debian-users-admin
# Bootstrap restart (post-configure)
# Loads restart.yml when bootstrap=yes to ensure system is finalized after bootstrap
- include: restart.yml
when: bootstrap
tags: always

View File

@ -0,0 +1,13 @@
---
- name: install check_mk logwatch configuration file
template:
src: etc/check_mk/logwatch.cfg.j2
dest: /etc/check_mk/logwatch.cfg
mode: 0644
- name: install check_mk agent check configuration files
copy:
src: "usr/lib/check_mk_agent/plugins/{{ item }}"
dest: "/usr/lib/check_mk_agent/plugins/{{ item }}"
mode: 0755
loop: "{{ check_mk_plugins }}"

View File

@ -0,0 +1,33 @@
---
- name: disable managed /etc/hosts from cloud-init
lineinfile:
dest: /etc/cloud/cloud.cfg
regexp: "^manage_etc_hosts"
line: " manage_etc_hosts:false"
ignore_errors: yes
- name: write hosts configuration file
template:
src: etc/hosts.j2
dest: /etc/hosts
mode: 0644
- name: write resolver configuration files
template:
src: "{{ item }}.j2"
dest: "/{{ item }}"
mode: 0644
loop:
- etc/dhcp/dhclient-enter-hooks.d/noresolv
- etc/resolv.conf
ignore_errors: yes
- name: write firewall rules configuration file
template:
src: etc/nftables.conf.j2
dest: /etc/nftables.conf
when: nftables_rules is defined and nftables_rules
notify:
- restart nftables
- meta: flush_handlers

View File

@ -0,0 +1,10 @@
---
- name: restart system
reboot:
post_reboot_delay: 15
- name: wait 15 seconds for system to stabilize
pause:
seconds: 15
become: no
connection: local

View File

@ -0,0 +1,49 @@
---
- name: install sudo configuration file
template:
src: etc/sudoers.j2
dest: /etc/sudoers
mode: 0440
- name: install global bashrc configuration file
template:
src: etc/bash.bashrc.j2
dest: /etc/bash.bashrc
mode: 0644
- name: install general profile.d script files
template:
src: "{{ item }}.j2"
dest: "/{{ item }}"
mode: 0755
loop:
- etc/profile.d/w.sh
- name: remove default motd configuration file
file:
dest: /etc/motd
state: absent
- name: install motd handler script file
template:
src: usr/local/sbin/update-motd.sh.j2
dest: /usr/local/sbin/update-motd.sh
mode: 0755
- name: install motd update cron file
template:
src: etc/cron.d/update-motd.j2
dest: /etc/cron.d/update-motd
mode: 0644
- name: install global htoprc configuration file
template:
src: etc/htoprc.j2
dest: /etc/htoprc
mode: 0644
- name: add additional user groups
group:
name: "{{ item.name }}"
gid: "{{ item.gid }}"
loop: "{{ add_groups }}"

View File

@ -0,0 +1,56 @@
---
- name: install ssh configuration files
template:
src: "{{ item }}.j2"
dest: "/{{ item }}"
mode: 0644
notify:
- restart ssh
loop:
- etc/ssh/ssh_config
- etc/ssh/sshd_config
- etc/ssh/shosts.equiv
- etc/ssh/ssh_known_hosts
- etc/pam.d/sshd
- name: clean up unwanted ssh host keys (DSA and ECDSA)
file:
name: "{{ item }}"
state: absent
notify:
- restart ssh
loop:
- /etc/ssh/ssh_host_dsa_key
- /etc/ssh/ssh_host_dsa_key.pub
- /etc/ssh/ssh_host_ecdsa_key
- /etc/ssh/ssh_host_ecdsa_key.pub
- name: correct permissions on host keys
file:
dest: "{{ item.name }}"
mode: "{{ item.mode }}"
loop:
- name: /etc/ssh/ssh_host_rsa_key
mode: "0600"
- name: /etc/ssh/ssh_host_rsa_key.pub
mode: "0644"
- name: /etc/ssh/ssh_host_ed25519_key
mode: "0600"
- name: /etc/ssh/ssh_host_ed25519_key.pub
mode: "0644"
- name: install fail2ban configuration files
template:
src: "{{ item }}.j2"
dest: "/{{ item }}"
mode: 0644
notify:
- restart fail2ban
loop:
- etc/fail2ban/action.d/route.conf
- etc/fail2ban/filter.d/sshd.conf
- etc/fail2ban/jail.d/global.local
- etc/fail2ban/jail.d/sshd.conf
- etc/fail2ban/jail.d/sshd.local
- meta: flush_handlers

View File

@ -0,0 +1,25 @@
---
- name: install rsyslog config
template:
src: etc/rsyslog.conf.j2
dest: /etc/rsyslog.conf
mode: 0644
notify:
- restart rsyslog
- name: install logrotate configs
template:
src: "{{ item }}.j2"
dest: "/{{ item }}"
mode: 0644
loop:
- etc/logrotate.d/rsyslog
- etc/logrotate.d/backup-rsync
- name: set journalctl persistence
template:
src: etc/systemd/journald.conf.j2
dest: /etc/systemd/journald.conf
mode: 0644
- meta: flush_handlers

View File

@ -0,0 +1,77 @@
---
- name: install zramswap configuration
template:
src: etc/default/zramswap.j2
dest: /etc/default/zramswap
notify: restart zramswap
- name: enable and activate zramswap
service:
name: zramswap
state: started
enabled: yes
- name: set bin capabilities
capabilities:
path: "{{ item.path }}"
capability: "{{ item.capability }}"
ignore_errors: yes
loop: "{{ set_capabilities }}"
- name: install locale configuration files
template:
src: "{{ item }}.j2"
dest: "/{{ item }}"
mode: 0644
notify:
- generate locales
loop:
- etc/default/locale
- etc/locale.gen
- name: set timezone
file:
src: "/usr/share/zoneinfo/{{ timezone }}"
dest: /etc/localtime
state: link
mode: 0644
force: yes
- name: install sysctl tweaks
template:
src: "etc/sysctl.d/{{ item }}.j2"
dest: "/etc/sysctl.d/{{ item }}"
mode: 0644
notify:
- load sysctl tweaks
loop: "{{ sysctl_files }}"
- name: install base crontab file
template:
src: etc/crontab.j2
dest: /etc/crontab
mode: 0644
- name: install ntp configuration file
template:
src: etc/ntp.conf.j2
dest: /etc/ntp.conf
mode: 0644
notify:
- restart ntp
- name: register status of mailhost flag file
stat:
path: "{{ postfix_mailhost_flag_file }}"
register: mailhost_flag
- name: install postfix configuration file (non-mailhost only)
template:
src: etc/postfix/main.cf.j2
dest: /etc/postfix/main.cf
mode: 0644
when: not mailhost_flag.stat.exists
notify:
- restart postfix
- meta: flush_handlers

View File

@ -0,0 +1,77 @@
---
- name: "ensure {{ item.name }} user exists and is configured properly"
user:
name: "{{ item.name }}"
uid: "{{ item.uid }}"
group: operator
groups: "adm,sudo,{{ item.add_groups|join(',') }}"
shell: "{{ item.shell }}"
home: "/var/home/{{ item.name }}"
createhome: yes
move_home: yes
append: yes
state: present
- name: "set ownership of {{ item.name }} home directory"
file:
dest: "/var/home/{{ item.name }}"
state: directory
owner: "{{ item.name }}"
group: operator
mode: 0700
- name: "create {{ item.name }} .ssh configuration directory"
file:
dest: "/var/home/{{ item.name }}/.ssh"
state: directory
owner: "{{ item.name }}"
group: operator
mode: 0700
- name: "write {{ item.name }} ssh authorized_keys configuration file"
template:
src: var/home/user/ssh/authorized_keys.j2
dest: "/var/home/{{ item.name }}/.ssh/authorized_keys"
owner: "{{ item.name }}"
group: operator
mode: 0640
- name: "write {{ item.name }} profile configuration file"
template:
src: var/home/user/profile.j2
dest: "/var/home/{{ item.name }}/.profile"
owner: "{{ item.name }}"
group: operator
mode: 0750
- name: "write {{ item.name }} bashrc configuration file"
template:
src: var/home/user/bashrc.j2
dest: "/var/home/{{ item.name }}/.bashrc"
owner: "{{ item.name }}"
group: operator
mode: 0750
- name: "write {{ item.name }} bash_logout configuration file"
template:
src: var/home/user/bash_logout.j2
dest: "/var/home/{{ item.name }}/.bash_logout"
owner: "{{ item.name }}"
group: operator
mode: 0750
- name: "create {{ item.name }} vim state directory"
file:
dest: "/var/home/{{ item.name }}/.vim"
state: directory
owner: "{{ item.name }}"
group: operator
mode: 0700
- name: "write {{ item.name }} vimrc configuration file"
template:
src: var/home/user/vimrc.j2
dest: "/var/home/{{ item.name }}/.vimrc"
owner: "{{ item.name }}"
group: operator
mode: 0600

View File

@ -0,0 +1,40 @@
---
- name: ensure backup user has /bin/sh shell
user:
name: backup
shell: /bin/sh
state: present
- name: create backup .ssh configuration directory
file:
dest: /var/backups/.ssh
state: directory
owner: backup
group: operator
mode: 0700
- name: write backup ssh authorized_keys configuration file
template:
src: var/backups/ssh/authorized_keys.j2
dest: /var/backups/.ssh/authorized_keys
owner: backup
group: operator
mode: 0640
- name: install post-backup timestamp script
template:
src: var/backups/timestamp.sh.j2
dest: /var/backups/timestamp.sh
mode: 0755
- name: create backup shares file
command: touch /var/backups/shares
args:
creates: /var/backups/shares
- name: set ownership of backup shares file
file:
dest: /var/backups/shares
owner: backup
group: operator
mode: 0644

View File

@ -0,0 +1,35 @@
---
- name: ensure deploy user exists and is configured properly
user:
name: deploy
uid: 200
group: operator
shell: /bin/bash
home: /var/home/deploy
createhome: yes
move_home: yes
state: present
- name: set ownership of deploy home directory
file:
dest: /var/home/deploy
state: directory
owner: deploy
group: operator
mode: 0700
- name: create deploy .ssh configuration directory
file:
dest: /var/home/deploy/.ssh
state: directory
owner: deploy
group: operator
mode: 0700
- name: write deploy ssh authorized_keys configuration file
template:
src: var/home/deploy/ssh/authorized_keys.j2
dest: /var/home/deploy/.ssh/authorized_keys
owner: deploy
group: operator
mode: 0640

View File

@ -0,0 +1,23 @@
---
- name: set root password
user:
name: root
password: "{{ root_password | password_hash('sha512', root_password) }}" # Use password as salt for idemptence
state: present
- name: remove any root known_hosts configuration file
file:
dest: /root/.ssh/known_hosts
state: absent
- name: create root vim state directory
file:
dest: /root/.vim
state: directory
mode: 0700
- name: write admin user vimrc configuration file to root homedir
template:
src: var/home/user/vimrc.j2
dest: /root/.vimrc
mode: 0600

View File

@ -0,0 +1,5 @@
#!/bin/bash
# Ansible fact - dhcp_status
# {{ ansible_managed }}
DHCP_STATUS="$( grep -o 'dhcp' /etc/network/interfaces | uniq )"
echo "\"${DHCP_STATUS}\""

View File

@ -0,0 +1,5 @@
#!/bin/bash
# Ansible fact - host_group
# {{ ansible_managed }}
HOST_GROUP="$( hostname -s | sed 's/[0-9]*//g' )"
echo "\"${HOST_GROUP}\""

View File

@ -0,0 +1,8 @@
#!/bin/bash
# Ansible fact - host_id
# {{ ansible_managed }}
HOST_ID="$( hostname -s | grep -o '[0-9]\+' )"
if [[ -z ${HOST_ID} ]]; then
HOST_ID="0"
fi
echo "\"${HOST_ID}\""

View File

@ -0,0 +1,8 @@
#!/bin/bash
# Ansible fact - moe_release
# {{ ansible_managed }}
DPKG_ARCHITECTURE="$( dpkg --print-architecture )"
DEBIAN_CODENAME="$( grep 'VERSION_CODENAME=' /etc/os-release | sed 's/VERSION_CODENAME=//' )"
DEBIAN_VERSION="$( grep 'VERSION_ID=' /etc/os-release | sed -E 's/VERSION_ID="(.*)"/\1/' )"
MOE_VERSION="1.$(( ${DEBIAN_VERSION} - 11 ))"
echo "{ \"dpkg_architecture\": \"${DPKG_ARCHITECTURE}\", \"moe_version\": \"${MOE_VERSION}\", \"debian_version\": \"${DEBIAN_VERSION}\", \"debian_codename\": \"${DEBIAN_CODENAME}\" }"

View File

@ -0,0 +1,5 @@
# apt configuration: disable recommends
# {{ ansible_managed }}
APT::Install-Recommends "0";
APT::Install-Suggests "0";

View File

@ -0,0 +1,5 @@
# apt configuration: enable apt-cacher-ng proxy
# {{ ansible_managed }}
Acquire::http::Proxy::debian.mirror.rafal.ca "http://{{ blsedomains_admindomain }}:3142";
Acquire::http::Proxy::security.debian.org "http://{{ blsedomains_admindomain }}:3142";

View File

@ -0,0 +1,30 @@
# apt configuration: unattended upgrades
# {{ ansible_managed }}
Unattended-Upgrade::Origins-Pattern {
"origin=Debian,codename=${distro_codename},label=Debian";
"origin=Debian,codename=${distro_codename},label=Debian-Security";
};
Unattended-Upgrade::Package-Blacklist {
# "libc6$";
# "libc6-dev$";
# "libc6-i686$";
};
# General configurations
Unattended-Upgrade::AutoFixInterruptedDpkg "true";
Unattended-Upgrade::MinimalSteps "true";
Unattended-Upgrade::InstallOnShutdown "false";
Unattended-Upgrade::Mail "";
Unattended-Upgrade::MailOnlyOnError "true";
Unattended-Upgrade::Remove-Unused-Kernel-Packages "true";
Unattended-Upgrade::Remove-New-Unused-Dependencies "true";
Unattended-Upgrade::Remove-Unused-Dependencies "true";
Unattended-Upgrade::SyslogEnable "true";
Unattended-Upgrade::SyslogFacility "daemon";
Unattended-Upgrade::Verbose "false";
Unattended-Upgrade::Debug "false";
# Reboot configurations - do not reboot automatically
Unattended-Upgrade::Automatic-Reboot "false";

View File

@ -0,0 +1,11 @@
# apt configuration: pinning preferences
# {{ ansible_managed }}
Package: *
Pin: release a={{ moe_release.debian_codename }}
Pin-Priority: 999
# Ensure backports are not installed by default
Package: *
Pin: release a={{ moe_release.debian_codename }}-backports
Pin-Priority: -1

View File

@ -0,0 +1,7 @@
# {{ item.name }} sources.list entry
# {{ ansible_managed }}
deb {% if item.gpg_url is defined and item.gpg_url -%}[signed-by=/etc/apt/trusted.gpg.d/{{ item.name }}.gpg] {% endif -%} {{ item.url }} {{ item.distribution }} {{ item.components|join(' ') }}
{% if item.has_src %}
deb-src {% if item.gpg_url is defined and item.gpg_url -%}[signed-by=/etc/apt/trusted.gpg.d/{{ item.name }}.gpg] {% endif -%} {{ item.url }} {{ item.distribution }} {{ item.components|join(' ') }}
{% endif %}

View File

@ -0,0 +1,126 @@
# System-wide .bashrc file for interactive bash(1) shells.
# {{ ansible_managed }}
# To enable the settings / commands in this file for login shells as well,
# this file has to be sourced in /etc/profile.
# Fix the preceeding space stupidity
export HISTCONTROL=ignorespace
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
#------------------------------------------------------------------------------
# Returncode.
#------------------------------------------------------------------------------
function returncode
{
returncode=$?
if [ $returncode != 0 ]; then
echo "[$returncode]"
else
echo ""
fi
}
alias ll='ls -al'
use_color=false
# Set colorful PS1 only on colorful terminals.
# dircolors --print-database uses its own built-in database
# instead of using /etc/DIR_COLORS. Try to use the external file
# first to take advantage of user additions. Use internal bash
# globbing instead of external grep binary.
safe_term=${TERM//[^[:alnum:]]/?} # sanitize TERM
match_lhs=""
[[ -f ~/.dir_colors ]] && match_lhs="${match_lhs}$(<~/.dir_colors)"
[[ -f /etc/DIR_COLORS ]] && match_lhs="${match_lhs}$(</etc/DIR_COLORS)"
[[ -z ${match_lhs} ]] \
&& type -P dircolors >/dev/null \
&& match_lhs=$(dircolors --print-database)
[[ $'\n'${match_lhs} == *$'\n'"TERM "${safe_term}* ]] && use_color=true
if ${use_color} ; then
# Enable colors for ls, etc. Prefer ~/.dir_colors #64489
if type -P dircolors >/dev/null ; then
if [[ -f ~/.dir_colors ]] ; then
eval $(dircolors -b ~/.dir_colors)
elif [[ -f /etc/DIR_COLORS ]] ; then
eval $(dircolors -b /etc/DIR_COLORS)
else
eval $(dircolors)
fi
fi
if [[ ${EUID} == 0 ]] ; then
PS1='\[\033[0;31m\]$(returncode)\[\033[0;37m\]\[\033[0;35m\]${debian_chroot:+($debian_chroot)}\[\033[01;31m\]\H\[\033[01;34m\] \w \$\[\033[00m\] '
elif [[ ${UID} == 200 ]] ; then
PS1='\[\033[0;31m\]$(returncode)\[\033[0;37m\]\[\033[0;35m\]${debian_chroot:+($debian_chroot)}\[\033[01;31m\]\u@\H\[\033[01;34m\] \w \$\[\033[00m\] '
else
PS1='\[\033[0;31m\]$(returncode)\[\033[0;37m\]\[\033[0;35m\]${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\H\[\033[01;34m\] \w \$\[\033[00m\] '
fi
alias ls='ls --color=auto'
alias grep='grep --colour=auto'
alias fgrep='fgrep --colour=auto'
alias egrep='egrep --colour=auto'
alias ll='ls -lF'
alias la='ls -A'
alias l='ls -CF'
else
if [[ ${EUID} == 0 ]] ; then
# show root@ when we don't have colors
PS1='\[$(returncode)\]\u@\H \w \$ '
else
PS1='\[$(returncode)\]\u@\H \w \$ '
fi
fi
# Try to keep environment pollution down, EPA loves us.
unset use_color safe_term match_lhs
# Commented out, don't overwrite xterm -T "title" -n "icontitle" by default.
# If this is an xterm set the title to user@host:dir
#case "$TERM" in
#xterm*|rxvt*)
# PROMPT_COMMAND='echo -ne "\033]0;${USER}@${HOSTNAME}: ${PWD}\007"'
# ;;
#*)
# ;;
#esac
# enable bash completion in interactive shells
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# if the command-not-found package is installed, use it
if [ -x /usr/lib/command-not-found -o -x /usr/share/command-not-found/command-not-found ]; then
function command_not_found_handle {
# check because c-n-f could've been removed in the meantime
if [ -x /usr/lib/command-not-found ]; then
/usr/bin/python /usr/lib/command-not-found -- "$1"
return $?
elif [ -x /usr/share/command-not-found/command-not-found ]; then
/usr/bin/python /usr/share/command-not-found/command-not-found -- "$1"
return $?
else
printf "%s: command not found\n" "$1" >&2
return 127
fi
}
fi

View File

@ -0,0 +1,58 @@
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# logwatch.cfg
# This file configures mk_logwatch. Define your logfiles
# and patterns to be looked for here.
# Patterns are indented with one space are prefixed with:
# C: Critical messages
# W: Warning messages
# I: ignore these lines (OK)
# R: Rewrite the output previous match. You can use \1, \2 etc. for refer to groups (.*) of this match
# The first match decided. Lines that do not match any pattern
# are ignored
# Globbing patterns are allowed:
# /sapdata/*/saptrans.log
# C ORA-
/var/log/kern.log
I registered panic notifier
C panic
C Oops
W generic protection rip
W .*Unrecovered read error - auto reallocate failed
/var/log/auth.log
I sshd.*Corrupted MAC on input
/var/log/system.log
C Fail event detected on md device
I mdadm.*: Rebuild.*event detected
W mdadm\[
W ata.*hard resetting link
W ata.*soft reset failed (.*FIS failed)
W device-mapper: thin:.*reached low water mark
C device-mapper: thin:.*no free space

View File

@ -0,0 +1,6 @@
# cron file for motd
# {{ ansible_managed }}
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
@reboot root /usr/local/sbin/update-motd.sh &>/dev/null
*/5 * * * * root /usr/local/sbin/update-motd.sh &>/dev/null

View File

@ -0,0 +1,12 @@
# /etc/crontab: system-wide crontab
# {{ ansible_managed }}
SHELL=/bin/sh
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
# m h dom mon dow user command
00 * * * * root cd / && run-parts --report /etc/cron.hourly
05 0 * * * root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.daily )
15 0 * * 7 root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.weekly )
30 0 1 * * root test -x /usr/sbin/anacron || ( cd / && run-parts --report /etc/cron.monthly )
#

View File

@ -0,0 +1,7 @@
# Default locale settings
# {{ ansible_managed }}
LANGUAGE={{ locale }}
LC_ALL={{ locale }}
LANG={{ locale }}
LC_TYPE={{ locale }}

View File

@ -0,0 +1,25 @@
# Default zramswap settings
# {{ ansible_managed }}
# Compression algorithm selection
# speed: lz4 > zstd > lzo
# compression: zstd > lzo > lz4
# This is not inclusive of all that is available in latest kernels
# See /sys/block/zram0/comp_algorithm (when zram module is loaded) to see
# what is currently set and available for your kernel[1]
# [1] https://github.com/torvalds/linux/blob/master/Documentation/blockdev/zram.txt#L86
ALGO=zstd
# Specifies the amount of RAM that should be used for zram
# based on a percentage the total amount of available memory
# This takes precedence and overrides SIZE below
PERCENT=50
# Specifies a static amount of RAM that should be used for
# the ZRAM devices, this is in MiB
#SIZE=256
# Specifies the priority for the swap devices, see swapon(2)
# for more details. Higher number = higher priority
# This should probably be higher than hdd/ssd swaps.
PRIORITY=1000

View File

@ -0,0 +1,6 @@
#!/bin/sh
# Disasble resolv.conf generation from DHCP
# {{ ansible_managed }}
make_resolv_conf() {
:
}

View File

@ -0,0 +1,15 @@
# fail2ban action - route
[Definition]
actionban = ip route add <blocktype> <ip>
actionunban = ip route del <blocktype> <ip>
actioncheck =
actionstart =
actionstop =
[Init]
# Option: blocktype
# Note: Type can be blackhole, unreachable and prohibit. Unreachable and prohibit correspond to the ICMP reject messages.
# Values: STRING
blocktype = blackhole

View File

@ -0,0 +1,51 @@
# Fail2Ban filter for openssh
# {{ ansible_managed }}
[INCLUDES]
# Read common prefixes. If any customizations available -- read them from
# common.local
before = common.conf
[Definition]
_daemon = sshd
failregex = ^%(__prefix_line)s(?:error: PAM: )?[aA]uthentication (?:failure|error|failed) for .* from <HOST>( via \S+)?\s*$
^%(__prefix_line)s(?:error: PAM: )?User not known to the underlying authentication module for .* from <HOST>\s*$
^%(__prefix_line)sFailed \S+ for (?P<cond_inv>invalid user )?(?P<user>(?P<cond_user>\S+)|(?(cond_inv)(?:(?! from ).)*?|[^:]+)) from <HOST>(?: port \d+)?(?: ssh\d*)?(?(cond_user):|(?:(?:(?! from ).)*)$)
^%(__prefix_line)sROOT LOGIN REFUSED.* FROM <HOST>\s*$
^%(__prefix_line)s[iI](?:llegal|nvalid) user .*? from <HOST>(?: port \d+)?\s*$
^%(__prefix_line)sUser .+ from <HOST> not allowed because not listed in AllowUsers\s*$
^%(__prefix_line)sUser .+ from <HOST> not allowed because listed in DenyUsers\s*$
^%(__prefix_line)sUser .+ from <HOST> not allowed because not in any group\s*$
^%(__prefix_line)srefused connect from \S+ \(<HOST>\)\s*$
^%(__prefix_line)s(?:error: )?Received disconnect from <HOST>: 3: .*: Auth fail(?: \[preauth\])?$
^%(__prefix_line)sUser .+ from <HOST> not allowed because a group is listed in DenyGroups\s*$
^%(__prefix_line)sUser .+ from <HOST> not allowed because none of user's groups are listed in AllowGroups\s*$
^(?P<__prefix>%(__prefix_line)s)User .+ not allowed because account is locked<SKIPLINES>(?P=__prefix)(?:error: )?Received disconnect from <HOST>: 11: .+ \[preauth\]$
^(?P<__prefix>%(__prefix_line)s)Disconnecting: Too many authentication failures for .+? \[preauth\]<SKIPLINES>(?P=__prefix)(?:error: )?Connection closed by <HOST> \[preauth\]$
^(?P<__prefix>%(__prefix_line)s)Connection from <HOST> port \d+(?: on \S+ port \d+)?<SKIPLINES>(?P=__prefix)Disconnecting: Too many authentication failures for .+? \[preauth\]$
^%(__prefix_line)s(error: )?maximum authentication attempts exceeded for .* from <HOST>(?: port \d*)?(?: ssh\d*)? \[preauth\]$
^%(__prefix_line)spam_unix\(sshd:auth\):\s+authentication failure;\s*logname=\S*\s*uid=\d*\s*euid=\d*\s*tty=\S*\s*ruser=\S*\s*rhost=<HOST>\s.*$
^%(__prefix_line)sUnable to negotiate with <HOST> .*$
^%(__prefix_line)sConnection reset by authenticating user .* <HOST> port .* \[preauth\]$
ignoreregex =
[Init]
# "maxlines" is number of log lines to buffer for multi-line regex searches
maxlines = 10
journalmatch = _SYSTEMD_UNIT=sshd.service + _COMM=sshd
# DEV Notes:
#
# "Failed \S+ for .*? from <HOST>..." failregex uses non-greedy catch-all because
# it is coming before use of <HOST> which is not hard-anchored at the end as well,
# and later catch-all's could contain user-provided input, which need to be greedily
# matched away first.
#
# Author: Cyril Jaquier, Yaroslav Halchenko, Petr Voralek, Daniel Black

View File

@ -0,0 +1,4 @@
[DEFAULT]
maxretry = 3
bantime = 14400
ignoreip = 127.0.0.0/8 10.0.0.0/8 198.55.48.48/28 24.53.125.139

View File

@ -0,0 +1,30 @@
# Fail2Ban configuration file
#
# Author: Wolfgang Karall (based on sshd.conf from Cyril Jaquier)
#
[INCLUDES]
# Read common prefixes. If any customizations available -- read them from
# common.local
before = common.conf
[Definition]
_daemon = sshd
# Option: failregex
# Notes.: regex to match the password failures messages in the logfile. The
# host must be matched by a group named "host". The tag "<HOST>" can
# be used for standard IP/hostname matching and is only an alias for
# (?:::f{4,6}:)?(?P<host>[\w\-.^_]+)
# Values: TEXT
#
failregex = ^%(__prefix_line)sUnable to negotiate with <HOST> .*$
# Option: ignoreregex
# Notes.: regex to ignore. If this regex matches, the line is ignored.
# Values: TEXT
#
ignoreregex = ^%(__prefix_line)sDid not receive identification string from .*$

View File

@ -0,0 +1,5 @@
[ssh]
enabled = true
filter = sshd
action = route
logpath = /var/log/auth.log

View File

@ -0,0 +1,14 @@
# Local system hosts file
# {{ ansible_managed }}
127.0.0.1 localhost
::1 ip6-localhost ip6-loopback
ff02::1 ip6-allmodes
ff02::2 ip6-allrouters
{% if hosts_entries is defined %}
{% for host in hosts_entries %}
{{ host.ip }}{% for name in host.names %} {{ name }}{% endfor %}
{% endfor %}
{% endif %}

View File

@ -0,0 +1,39 @@
# htop configuration file (Debian)
# {{ ansible_managed }}
fields=0 48 17 18 38 39 40 2 46 47 49 1
sort_key=46
sort_direction=0
tree_sort_key=0
tree_sort_direction=1
hide_kernel_threads=0
hide_userland_threads=0
shadow_other_users=0
show_thread_names=1
show_program_path=1
highlight_base_name=1
highlight_megabytes=1
highlight_threads=1
highlight_changes=0
highlight_changes_delay_secs=5
find_comm_in_cmdline=1
strip_exe_from_cmdline=1
show_merged_command=0
tree_view=0
tree_view_always_by_pid=0
header_margin=1
detailed_cpu_time=1
cpu_count_from_one=1
show_cpu_usage=1
show_cpu_frequency=0
show_cpu_temperature=0
degree_fahrenheit=0
update_process_names=1
account_guest_in_cpu_meter=1
color_scheme=0
enable_mouse=1
delay=15
left_meters=Hostname Clock Uptime Blank LeftCPUs2 Blank CPU Blank
left_meter_modes=2 2 2 2 1 2 1 2
right_meters=LoadAverage Tasks Systemd Blank RightCPUs2 Blank Memory Swap
right_meter_modes=2 2 2 2 1 2 1 1
hide_function_bar=0

View File

@ -0,0 +1,4 @@
# Locales configuration file
# {{ ansible_managed }}
en_CA.UTF-8 UTF-8

View File

@ -0,0 +1,10 @@
# Logrotate configuration for backup rsync log
# {{ ansible_managed }}
/var/backups/rsync.log
{
rotate 1
weekly
missingok
notifempty
}

View File

@ -0,0 +1,22 @@
# Logrotate configuration for standard log files
# {{ ansible_managed }}
/var/log/kern.log
/var/log/daemon.log
/var/log/auth.log
/var/log/cron.log
/var/log/mail.log
/var/log/boot.log
/var/log/system.log
{
rotate {{ logrotate_keepcount }}
{{ logrotate_interval }}
missingok
notifempty
compress
delaycompress
sharedscripts
postrotate
/usr/lib/rsyslog/rsyslog-rotate
endscript
}

View File

@ -0,0 +1,25 @@
#!/usr/sbin/nft -f
# {{ ansible_managed }}
flush ruleset
table inet filter {
chain input {
type filter hook input priority 0;
{% for rule in nftables_rules if rule.chain == "input" %}
{{ rule.rule }};
{% endfor %}
}
chain forward {
type filter hook forward priority 0;
{% for rule in nftables_rules if rule.chain == "forward" %}
{{ rule.rule }};
{% endfor %}
}
chain output {
type filter hook output priority 0;
{% for rule in nftables_rules if rule.chain == "output" %}
{{ rule.rule }};
{% endfor %}
}
}

View File

@ -0,0 +1,38 @@
# Main NTP configuration
# {{ ansible_managed }}
driftfile /var/lib/ntp/ntp.drift
statistics loopstats peerstats clockstats
filegen loopstats file loopstats type day enable
filegen peerstats file peerstats type day enable
filegen clockstats file clockstats type day enable
{% if 'remote' in group_names %}
server time.nrc.ca
server time.chu.nrc.ca
restrict -4 default kod notrap nomodify nopeer
restrict -6 default kod notrap nomodify nopeer
{% elif 'role_ceph' in group_names %}
server 10.60.0.251 iburst
server 10.60.0.252 iburst
server ceph1 iburst
server ceph2 iburst
server ceph3 iburst
restrict -4 default notrap nomodify
restrict -6 default notrap nomodify
{% else %}
server 10.100.0.251 burst
server 10.100.0.252 burst
restrict -4 default notrap nomodify
restrict -6 default notrap nomodify
{% endif %}
restrict 127.0.0.1
restrict ::1

View File

@ -0,0 +1,54 @@
# PAM configuration for the Secure Shell service
# {{ ansible_managed }}
# Standard Un*x authentication.
@include common-auth
# Disallow non-root logins when /etc/nologin exists.
account required pam_nologin.so
# Uncomment and edit /etc/security/access.conf if you need to set complex
# access limits that are hard to express in sshd_config.
# account required pam_access.so
# Standard Un*x authorization.
@include common-account
# SELinux needs to be the first session rule. This ensures that any
# lingering context has been cleared. Without this it is possible that a
# module could execute code in the wrong domain.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so close
# Set the loginuid process attribute.
session required pam_loginuid.so
# Create a new session keyring.
session optional pam_keyinit.so force revoke
# Standard Un*x session setup and teardown.
@include common-session
# Print the message of the day upon successful login.
session optional pam_motd.so motd=/run/blse-motd.dynamic
session optional pam_motd.so noupdate
# Print the status of the user's mailbox upon successful login.
#session optional pam_mail.so standard noenv # [1]
# Set up user limits from /etc/security/limits.conf.
session required pam_limits.so
# Read environment variables from /etc/environment and
# /etc/security/pam_env.conf.
session required pam_env.so # [1]
# In Debian 4.0 (etch), locale-related environment variables were moved to
# /etc/default/locale, so read that as well.
session required pam_env.so user_readenv=1 envfile=/etc/default/locale
# SELinux needs to intervene at login time to ensure that the process starts
# in the proper default security context. Only sessions which are intended
# to run in the user's context should be run after this.
session [success=ok ignore=ignore module_unknown=ignore default=bad] pam_selinux.so open
# Standard Un*x password updating.
@include common-password

View File

@ -0,0 +1,22 @@
# Postfix main configuration for non-MTA hosts
# {{ ansible_managed }}
smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU)
biff = no
append_dot_mydomain = no
readme_directory = no
smtpd_use_tls=no
alias_maps = hash:/etc/postfix/aliases
alias_database = hash:/etc/postfix/aliases
mydestination =
relayhost = {{ postfix_relay }}
mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
mailbox_size_limit = 0
recipient_delimiter = +
myorigin = $mydomain
mydomain = {{ postfix_domain }}
#inet_interfaces = 127.0.0.1,::1
inet_interfaces = 127.0.0.1
#inet_protocols = ipv4,ipv6
inet_protocols = ipv4

View File

@ -0,0 +1,7 @@
#!/bin/sh
# Message of the day script to print active users
# {{ ansible_managed }}
export PROCPS_FROMLEN=36 PROCPS_USERLEN=12
w

View File

@ -0,0 +1,19 @@
# DNS resolver configuration
# {{ ansible_managed }}
options timeout:1 attempts:3 rotate
{% if 'remote' in group_names %}
search {{ blsedomains_admindomain }}. {{ blsedomains_rootdomain }}.
nameserver 8.8.8.8
nameserver 8.8.4.4
{% else %}
{% if 'role_env' in group_names %}
search {{ blsedomains_mandomain }}. {{ blsedomains_hostdomain }}. {{ blsedomains_rootdomain }}.
{% elif 'physical' in group_names %}
search {{ blsedomains_mandomain }}. {{ blsedomains_hostdomain }}. {{ blsedomains_admindomain }}. {{ blsedomains_rootdomain }}.
{% else %}
search {{ blsedomains_hostdomain }}. {{ blsedomains_admindomain }}. {{ blsedomains_rootdomain }}.
{% endif %}
nameserver {{ blsecluster_rns1v4 }}
nameserver {{ blsecluster_rns2v4 }}
{% endif %}

View File

@ -0,0 +1,73 @@
# Main rsyslog configuration
# {{ ansible_managed }}
#### ####
#### MODULES ####
#### ####
module(load="imuxsock") # provides support for local system logging (e.g. via logger command)
module(load="imklog") # provides kernel logging support (previously done by rklogd)
{% if 'role_log' in group_names %}
module(load="imtcp" MaxSessions="1024")
{% else %}
$ModLoad imudp
$UDPServerAddress ::1
$UDPServerRun 514
{% endif %}
#### ####
#### GLOBAL DIRECTIVES ####
#### ####
$PreserveFQDN on
$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
#### ####
$FileOwner root
$FileGroup adm
$FileCreateMode 0640
$DirCreateMode 0755
$Umask 0022
$WorkDirectory /var/spool/rsyslog
#### RULES ####
#### ####
ruleset(name="local") {
kern.* /var/log/kern.log
auth,authpriv.* /var/log/auth.log
{% if not 'rpi' in group_names %}
cron.* /var/log/cron.log
daemon,user.* /var/log/daemon.log
mail.* /var/log/mail.log
local5.* /var/log/nginx.log
local6.* /var/log/haproxy.log
local7.* /var/log/boot.log
*.info;kern,daemon,user,auth,authpriv,cron,mail,local6.none,local7.none /var/log/system.log
{% endif %}
{% if 'remote' in group_names %}
# Send everything to central logserver (rsyslog)
*.* @@log.{{ blsedomains_admindomain }}:514
{% else %}
# Send everything to central logserver (rsyslog)
*.* @@log.{{ blsedomains_hostdomain }}:514
{% endif %}
}
$DefaultRuleset local
{% if 'role_log' in group_names %}
ruleset(name="remote") {
kern.* /srv/log/kern.log
daemon,user.* /srv/log/daemon.log
auth,authpriv.* /srv/log/auth.log
cron.* /srv/log/cron.log
mail.* /srv/log/mail.log
local5.* /srv/log/nginx.log
local6.* /srv/log/haproxy.log
local7.* /srv/log/boot.log
*.info;kern,daemon,user,auth,authpriv,cron,mail,local6.none,local7.none /srv/log/system.log
}
input(type="imtcp" port="514" ruleset="remote")
{% endif %}

View File

@ -0,0 +1,8 @@
# SSH remote allowed hosts
# {{ ansible_managed }}
{% if hostbased_auth is defined and hostbased_auth %}
{% for entry in hostbased_auth %}
{{ entry }}
{% endfor %}
{% endif %}

View File

@ -0,0 +1,44 @@
# Default SSH client configuration
# {{ ansible_managed }}
Host *
# ForwardAgent no
# ForwardX11 no
# ForwardX11Trusted yes
# RhostsRSAAuthentication no
# RSAAuthentication yes
# PasswordAuthentication yes
# EnableSSHKeysign yes
# HostbasedAuthentication yes
# GSSAPIAuthentication no
# GSSAPIDelegateCredentials no
# GSSAPIKeyExchange no
# GSSAPITrustDNS no
# BatchMode no
# CheckHostIP yes
# AddressFamily any
# ConnectTimeout 0
# StrictHostKeyChecking ask
# IdentityFile ~/.ssh/identity
# IdentityFile ~/.ssh/id_rsa
# IdentityFile ~/.ssh/id_dsa
# Port 22
# Protocol 2,1
# Cipher 3des
# Ciphers aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,aes128-cbc,3des-cbc
# MACs hmac-md5,hmac-sha1,umac-64@openssh.com,hmac-ripemd160
# EscapeChar ~
# Tunnel no
# TunnelDevice any:any
# PermitLocalCommand no
# VisualHostKey no
# ProxyCommand ssh -q -W %h:%p gateway.example.com
# PreferredAuthentications hostbased,pubkey
SendEnv LANG LC_*
HashKnownHosts no
GSSAPIAuthentication yes
GSSAPIDelegateCredentials no
PubkeyAuthentication yes
HostbasedAuthentication yes
EnableSSHKeysign yes
CheckHostIP no

View File

@ -0,0 +1,8 @@
# SSH remote allowed hosts
# {{ ansible_managed }}
{% if hostbased_auth is defined and hostbased_auth %}
{% for entry in hostbased_auth %}
{{ hostvars[entry]['ansible_hostname'] }},{{ hostvars[entry]['ansible_fqdn'] }},{{ hostvars[entry]['inventory_hostname'] }} ssh-ed25519 {{ hostvars[entry]['ansible_ssh_host_key_ed25519_public'] }}
{% endfor %}
{% endif %}

View File

@ -0,0 +1,40 @@
# Main SSH daemon configuraton
# {{ ansible_managed }}
Port 22
ListenAddress ::
ListenAddress 0.0.0.0
Protocol 2
HostKey /etc/ssh/ssh_host_ed25519_key
HostKey /etc/ssh/ssh_host_rsa_key
SyslogFacility AUTH
LogLevel INFO
LoginGraceTime 30
UsePAM yes
StrictModes yes
X11Forwarding no
PrintMotd no
PrintLastLog yes
TCPKeepAlive yes
AcceptEnv LANG LC_*
KexAlgorithms curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha1,diffie-hellman-group-exchange-sha1,diffie-hellman-group1-sha1
Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com,hmac-sha1,hmac-sha1-96,hmac-md5,hmac-md5-96
PubkeyAuthentication yes
PermitEmptyPasswords no
ChallengeResponseAuthentication no
PasswordAuthentication no
{% if 'role_hv' in group_names %}
HostbasedAuthentication yes
HostbasedUsesNameFromPacketOnly yes
IgnoreRhosts no
PermitRootLogin yes
{% else %}
HostbasedAuthentication no
IgnoreRhosts yes
PermitRootLogin no
{% endif %}
Subsystem sftp /usr/lib/openssh/sftp-server -f AUTH -l INFO

View File

@ -0,0 +1,19 @@
# sudoers configuraton; per-host declarations go in /etc/sudoers.d
# {{ ansible_managed }}
Defaults env_reset
Defaults mail_badpass
Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
Cmnd_Alias BACKUPS = /usr/bin/rsync, /var/backups/timestamp.sh
root ALL=(ALL:ALL) NOPASSWD: ALL
backup ALL=(root) NOPASSWD: BACKUPS
deploy ALL=(ALL:ALL) NOPASSWD: /bin/sh
%sudo ALL=(ALL:ALL) NOPASSWD: ALL
{% if ansible_local.moe_release is defined and ansible_local.moe_release.debian_version|int >= 11 %}
@includedir /etc/sudoers.d
{% else %}
#includedir /etc/sudoers.d
{% endif %}

View File

@ -0,0 +1,55 @@
# General sysctl parameters for MOE
# {{ ansible_managed }}
{% if 'virtual' in group_names %}
# Turn off swap entirely
vm.swappiness = 100
{% else %}
# Lower swappiness
vm.swappiness = 80
{% endif %}
# Increase the cache pressure
vm.vfs_cache_pressure = 200
# enable Spoof protection (reverse-path filter)
# Turn on Source Address Verification in all interfaces to
# prevent some spoofing attacks
net.ipv4.conf.default.rp_filter = 1
net.ipv4.conf.all.rp_filter = 1
# Ignore ICMP broadcasts
net.ipv4.icmp_echo_ignore_broadcasts = 1
# Ignore bogus ICMP errors
net.ipv4.icmp_ignore_bogus_error_responses = 1
# Do not accept ICMP redirects (prevent MITM attacks)
net.ipv4.conf.all.accept_redirects = 0
{% if not 'rpi' in group_names %}
net.ipv6.conf.all.accept_redirects = 0
{% endif %}
# Do not send ICMP redirects (we are not a router)
net.ipv4.conf.all.send_redirects = 0
# Do not accept IP source route packets (we are not a router)
net.ipv4.conf.all.accept_source_route = 0
{% if not 'rpi' in group_names %}
net.ipv6.conf.all.accept_source_route = 0
{% endif %}
# Don't log Martian Packets
net.ipv4.conf.all.log_martians = 0
# Explicit Congestion Notification (ECN)
net.ipv4.tcp_ecn = 1
# number of seconds the kernel waits before rebooting on a panic
kernel.panic = 60
# Panic on an OOPS
kernel.panic_on_oops = 1
# Restrict dmesg
kernel.dmesg_restrict = 1

View File

@ -0,0 +1,7 @@
# Journald configuration
# {{ ansible_managed }}
[Journal]
Storage=persistent
ForwardToSyslog=yes
SystemMaxFiles=8
RuntimeMaxFiles=8

View File

@ -0,0 +1,18 @@
#!/bin/bash
# dpkg-cleanup.sh - Remove obsolete packages and config files
# {{ ansible_managed }}
# Phase 1 - purge `rc` packages
PACKAGE_LIST=( $( dpkg --list | awk '/^rc/{ print $2 } /^ri/{ print $2 }' ) )
apt purge -y ${PACKAGE_LIST[@]}
# Phase 2 - autoremove packages
apt autoremove --purge -y
# Phase 3 - clean archives
apt clean
# Phase 4 - find and remove obsolete config files
OLD_FILES_LIST=( $( find /etc -type f -a \( -name '*.dpkg-*' -o -name '*.ucf-*' -o -name '*.update-*' \) 2>/dev/null ) )
rm -f ${OLD_FILES_LIST[@]}

View File

@ -0,0 +1,55 @@
#!/bin/bash -x
# kernel-cleanup.sh - Remove obsolete packages and config files
# {{ ansible_managed }}
# Determine the active running kernel
RUNNING_KERNEL="$( uname -v | awk '{ print $4 }' )"
# Determine the list of installed kernels (latest is always last)
INSTALLED_KERNELS=( $( dpkg -l | grep 'linux-image-[0-9]' | awk '{ print $3 }' | sort -n ) )
echo ${INSTALLED_KERNELS}
NUM_INSTALLED=${{ '{#' }}INSTALLED_KERNELS[@]}
if [[ ${NUM_INSTALLED} -le 1 ]]; then
echo "A single kernel is installed, aborting cleanly."
exit 0
fi
LATEST_KERNEL="${INSTALLED_KERNELS[-1]}"
if [[ ${LATEST_KERNEL} == ${RUNNING_KERNEL} ]]; then
force=""
else
force="--allow-remove-essential"
fi
# Remove the latest kernel from the array
NUM_REMOVABLE=$(( ${NUM_INSTALLED} - 1 ))
REMOVABLE_KERNELS=( ${INSTALLED_KERNELS[@]:0:${NUM_REMOVABLE}} )
PURGE_PACKAGES=()
for KERNEL in ${REMOVABLE_KERNELS[@]}; do
PURGE_PACKAGES+=( $( dpkg -l | grep ${KERNEL} | grep -v 'linux-image-amd64\|linux-headers-amd64' | awk '{ print $2 }' ) )
done
# Override the "linux-check-removal" script
mv /usr/bin/linux-check-removal /usr/bin/linux-check-removal.orig
echo -e '#!/bin/sh\necho "Overriding default linux-check-removal script!"\nexit 0' > /usr/bin/linux-check-removal
chmod +x /usr/bin/linux-check-removal
# Remove the packages
echo "Removing: ${PURGE_PACKAGES[@]}"
apt-get purge --yes ${force} ${PURGE_PACKAGES[@]}
# Restore the "linux-check-removal" script
mv /usr/bin/linux-check-removal.orig /usr/bin/linux-check-removal
# Make sure there is still a valid kernel installed (just in case something broke)
if [[ $( dpkg -l | grep 'linux-image-[0-9]' | wc -l ) -lt 1 ]]; then
echo "WARNING: NO KERNEL IS INSTALLED. THROWING ERROR AND ABORTING."
exit 1
fi
update-grub
exit 0

View File

@ -0,0 +1,75 @@
#!/bin/sh
# Update dynamic MOTD file
# {{ ansible_managed }}
set -o errexit
TMPFILE=$(mktemp)
TGTFILE=/run/blse-motd.dynamic
{% if ansible_distribution_release == "jessie" %}
BLSEVER="BLSE 2.x (Debian Jessie)"
{% elif ansible_distribution_release == "stretch" %}
BLSEVER="BLSE 2.1 (Debian Stretch)"
{% elif ansible_distribution_release == "buster" %}
BLSEVER="BLSE 2.2 (Debian Buster)"
{% elif ansible_distribution_release == "bullseye" %}
BLSEVER="BLSE 2.3 (Debian Bullseye)"
{% elif ansible_distribution_release == "bookworm" %}
BLSEVER="BLSE 2.4 (Debian Bookworm)"
{% endif %}
echo >> $TMPFILE
echo "\033[01;34mBoniface Labs Server Environment \033[01;36m${BLSEVER}\033[0m" >> $TMPFILE
echo -n "> \033[01;32m$(hostname)\033[0m" >> $TMPFILE
if test -f /etc/hostdesc; then
echo " - $( cat /etc/hostdesc )" >> $TMPFILE
else
echo >> $TMPFILE
fi
echo -n "> " >> $TMPFILE
# Get virtual machine info from vhostmd if it exists
VENDOR="$(/usr/sbin/dmidecode | grep Vendor | tr -d ' \t\n\r')"
if [ "$VENDOR" = "Vendor:Bochs" ] || [ "$VENDOR" = "Vendor:SeaBIOS" ]; then
hvhostname=$(/usr/sbin/vm-dump-metrics | grep -A1 HostName | awk -F'>' '{ if ($1 == " <value") print $2 }')
hvvirtproductinfo=$(/usr/sbin/vm-dump-metrics | grep -A1 VirtProductInfo | awk -F'>' '{ if ($1 == " <value") print $2 }')
if [ "$hvhostname" ]; then
echo "\033[1;37mKVM virtual machine\033[0m on node \033[1;31m${hvhostname}\033[0m (${hvvirtproductinfo})" >> $TMPFILE
else
echo "\033[1;37mRemote KVM virtual machine\033[0m" >> $TMPFILE
fi
elif [ "$VENDOR" = 'Vendor:DigitalOcean' ]; then
echo "\033[1;37mRemote KVM virtual machine\033[0m on \033[1;31mDigitalOcean\033[0m" >> $TMPFILE
else
# Are we a KVM hypervsor
if [ "$(hostname | grep dcrhv)" ]; then
echo "\033[1;37mRouter Hypervisor\033[0m on \033[1;31m$(/usr/sbin/dmidecode | grep -A1 'Base Board Information' | tail -1 | awk -F':' '{print $2}' | tr -s ' ' | sed 's/^ //' )\033[0m hardware" >> $TMPFILE
# Are we a Ceph node?
elif [ "$(hostname | grep ceph)" ]; then
echo "\033[1;37mCeph Storage Node\033[0m on \033[1;31m$(/usr/sbin/dmidecode | grep -A1 'Base Board Information' | tail -1 | awk -F':' '{print $2}' | tr -s ' ' | sed 's/^ //' )\033[0m hardware" >> $TMPFILE
# Are we a GPU node?
elif [ "$(hostname | grep gpu)" ]; then
echo "\033[1;37mGPU Processing Host\033[0m on \033[1;31m$(/usr/sbin/dmidecode | grep -A1 'Base Board Information' | tail -1 | awk -F':' '{print $2}' | tr -s ' ' | sed 's/^ //' )\033[0m hardware" >> $TMPFILE
# Are we Base?
elif [ "$(hostname | grep base)" ]; then
echo "\033[1;37mHome Base\033[0m on \033[1;31m$(/usr/sbin/dmidecode | grep -A1 'Base Board Information' | tail -1 | awk -F':' '{print $2}' | tr -s ' ' | sed 's/^ //' )\033[0m hardware" >> $TMPFILE
# Are we Env?
elif [ "$(hostname | grep env)" ]; then
echo "\033[1;37mEnvironmental Monitor\033[0m on \033[1;31mRaspberry Pi\033[0m hardware" >> $TMPFILE
# Are we Kal?
elif [ "$(hostname | grep kal)" ]; then
echo "\033[1;37mVoice Control Node\033[0m on \033[1;31mRaspberry Pi\033[0m hardware" >> $TMPFILE
# Are we IR?
elif [ "$(hostname | grep ir)" ]; then
echo "\033[1;37mInfared Control Node\033[0m on \033[1;31mRaspberry Pi\033[0m hardware" >> $TMPFILE
# Otherwise, we're generic
else
echo "\033[1;37mGeneric server\033[0m on \033[1;31m$(/usr/sbin/dmidecode | grep -A1 'Base Board Information' | tail -1 | awk -F':' '{print $2}' | tr -s ' ' | sed 's/^ //' )\033[0m hardware" >> $TMPFILE
fi
fi
echo "> $(/bin/uname -srvmo)" >> $TMPFILE
mv $TMPFILE $TGTFILE || rm $TMPFILE
chmod 644 $TGTFILE

View File

@ -0,0 +1,6 @@
# backup user authorized_keys
# {{ ansible_managed }}
{% for entry in backup_ssh_keys %}
{{ entry.type }} {{ entry.key }} {{ entry.name }} {{ entry.date }}
{% endfor %}

View File

@ -0,0 +1,11 @@
#!/bin/bash
# Writes timestamps on successful BackupPC completion and updates dynamic share inventory for this host
# {{ ansible_managed }}
OK="$1"
SHARE="$2"
grep -F "${SHARE}" /var/backups/shares || echo "${SHARE}" >> /var/backups/shares
if [[ ${OK} -eq 1 ]]; then
/bin/date +%s > ${SHARE}/.backup
fi

View File

@ -0,0 +1,8 @@
# deploy user authorized_keys
# {{ ansible_managed }}
{% for user in admin_users %}
{% for entry in user.ssh_keys %}
{{ entry.type }} {{ entry.key }} {{ entry.name }} {{ entry.date }}
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,7 @@
# BLSE 2.x bash_logout file
# {{ ansible_managed }}
# when leaving the console clear the screen to increase privacy
if [ "$SHLVL" = 1 ]; then
[ -x /usr/bin/clear_console ] && /usr/bin/clear_console -q
fi

View File

@ -0,0 +1,149 @@
#!/bin/bash
# BLSE 2.x bashrc file
# {{ ansible_managed }}
#
# GENERAL SETTINGS
#
# Before anything, see if we're running interactively. If not, skip everything here.
[[ $- == *i* ]] || return
# Ensure bash completion is enabled if installed
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# Some other tweaks
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# Set history limits and values
shopt -s cdspell
shopt -s dirspell
shopt -s dotglob
shopt -s histreedit
shopt -s histverify
shopt -s histappend
PROMPT_COMMAND="history -a;$PROMPT_COMMAND"
HISTCONTROL=ignoreboth
HISTSIZE=25000
HISTFILESIZE=25000
#
# BASH SETTINGS
#
# Set a shiny Linux Mint-style PS1 with spaces for easy double-click-select
git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/git:\1 /'
}
export PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\[\033[01;32m\]\H\[\033[01;34m\] \[\e[35m\]$(git_branch)\[\033[01;34m\]\w \$\[\033[00m\] '
# Sensible PATH (find things in *sbin* as non-root user)
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/games:/usr/games:/usr/lib/check_mk_agent/plugins"
# Set PATH to include ~/Scripts if it exists
if [ -d ~/Scripts ]; then
export PATH=~/Scripts:$PATH
fi
# Set editor to vim
export EDITOR=/usr/bin/vim
# Force SCREEN to xterm due to Debian weirdness
export SCREEN="xterm"
#
# ALIASES
#
# Coloured command aliases
alias ls='ls --color=always'
alias dir='dir --color=always'
alias vdir='vdir --color=always'
alias grep='grep --color=always'
alias fgrep='fgrep --color=always'
alias egrep='egrep --color=always'
alias xzgrep='xzgrep --color=always'
alias less='less -r'
# Convenient ls aliases
alias ll='ls -alh'
alias la='ls -A'
alias l='ls -lh'
# Always-sudo commands, because fuck typing sudo all the time
alias service='sudo service'
alias systemctl='sudo systemctl'
alias journalctl='sudo journalctl'
alias dmesg='sudo dmesg'
alias apt='sudo apt'
alias dpkg='sudo dpkg'
alias find='sudo find'
alias htop='sudo htop'
alias powertop='sudo powertop'
alias jnettop='sudo jnettop'
alias wavemon='sudo wavemon'
alias parted='sudo parted'
alias fdisk='sudo fdisk'
alias gdisk='sudo gdisk'
alias chroot='sudo chroot'
alias mount='sudo mount'
alias umount='sudo umount'
alias virsh='sudo virsh -c qemu:///system'
alias ceph='sudo ceph'
alias rbd='sudo rbd'
alias mysql='sudo mysql'
alias zpool='sudo zpool'
alias zfs='sudo zfs'
alias crm='sudo crm'
# Cool aliases
alias cccp='sudo rsync -auv --progress'
alias untmp='sudo umount /tmp/tmp.*{/*/*,/*,} 2>/dev/null'
alias txz='tar -p --same-owner -I "xz -T4"'
alias stxz='sudo tar -p --same-owner -I "xz -T4"'
alias hatop='sudo hatop -s /var/lib/haproxy/admin.sock'
alias zkcli='sudo /usr/share/zookeeper/bin/zkCli.sh -server $(hostname -s):2181'
alias patronictl='sudo patronictl -c /etc/patroni/config.yml -d zookeeper://$(hostname -s):2181'
alias repo='sudo reprepro -b /srv/debrepo'
alias beet='sudo -u debian-deluged beet --config=/srv/deluged/config.beets/config.yaml'
alias glances='sudo glances -t 5'
{% if 'role_mon' in group_names %}
alias icli='sudo -u monitor icli --status-file /omd/sites/monitor/tmp/nagios/status.dat --config /omd/sites/monitor/var/nagios/objects.cache -z \!o'
#
# Show monitoring stats
#
icli
{% endif %}
#
# SOURCE OTHER SCRIPTS
#
if [[ -d ~/.bashrc.d ]]; then
for script in ~/.bashrc.d/*; do
. "$script"
done
fi
#
# NICE AND CLEAN
#
echo
#
# END OF FILE
#

View File

@ -0,0 +1,25 @@
# htop config file
# {{ ansible_managed }}
fields=0 48 17 18 38 39 40 2 46 47 49 1
sort_key=46
sort_direction=0
hide_threads=0
hide_kernel_threads=0
hide_userland_threads=0
shadow_other_users=0
show_thread_names=1
highlight_base_name=1
highlight_megabytes=1
highlight_threads=1
tree_view=0
header_margin=1
detailed_cpu_time=1
cpu_count_from_zero=0
update_process_names=1
account_guest_in_cpu_meter=1
color_scheme=0
delay=15
left_meters=LeftCPUs2 Blank CPU Blank Blank Memory Swap
left_meter_modes=1 2 1 2 2 1 1
right_meters=RightCPUs2 Blank LoadAverage Tasks Blank Hostname Clock Uptime Blank
right_meter_modes=1 2 2 2 2 2 2 2 2

View File

@ -0,0 +1,16 @@
# {{ ansible_managed }}
EDITOR=/usr/bin/vim
# if running bash
if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
# set PATH so it includes user's private bin if it exists
if [ -d "$HOME/bin" ] ; then
PATH="$HOME/bin:$PATH"
fi

View File

@ -0,0 +1,6 @@
# administrative shell user authorized_keys ({{ item.name }})
# {{ ansible_managed }}
{% for entry in item.ssh_keys %}
{{ entry.type }} {{ entry.key }} {{ entry.name }} {{ entry.date }}
{% endfor %}

View File

@ -0,0 +1,13 @@
set showcmd
set number
set cursorline
set autoindent
set expandtab
set tabstop=4
set viminfo='100,<1000,s1000,h
hi CursorLine term=bold cterm=bold guibg=Grey40
syntax on
set ruler
set directory=~/.vim
set mouse=
autocmd Filetype gitcommit setlocal spell textwidth=72