2021-08-21 02:46:11 -04:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
# CephInstance.py - Class implementing a PVC node Ceph instance
|
|
|
|
# Part of the Parallel Virtual Cluster (PVC) system
|
|
|
|
#
|
|
|
|
# Copyright (C) 2018-2021 Joshua M. Boniface <joshua@boniface.me>
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, version 3.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
import time
|
|
|
|
import json
|
|
|
|
import psutil
|
|
|
|
|
|
|
|
import daemon_lib.common as common
|
|
|
|
|
2021-09-23 13:59:49 -04:00
|
|
|
from distutils.util import strtobool
|
2021-09-26 00:55:29 -04:00
|
|
|
from re import search
|
2021-09-23 13:59:49 -04:00
|
|
|
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
class CephOSDInstance(object):
|
|
|
|
def __init__(self, zkhandler, this_node, osd_id):
|
|
|
|
self.zkhandler = zkhandler
|
|
|
|
self.this_node = this_node
|
|
|
|
self.osd_id = osd_id
|
|
|
|
self.node = None
|
|
|
|
self.size = None
|
|
|
|
self.stats = dict()
|
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
@self.zkhandler.zk_conn.DataWatch(
|
|
|
|
self.zkhandler.schema.path("osd.node", self.osd_id)
|
|
|
|
)
|
|
|
|
def watch_osd_node(data, stat, event=""):
|
|
|
|
if event and event.type == "DELETED":
|
2021-08-21 02:46:11 -04:00
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = data.decode("ascii")
|
2021-08-21 02:46:11 -04:00
|
|
|
except AttributeError:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = ""
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
if data and data != self.node:
|
|
|
|
self.node = data
|
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
@self.zkhandler.zk_conn.DataWatch(
|
|
|
|
self.zkhandler.schema.path("osd.stats", self.osd_id)
|
|
|
|
)
|
|
|
|
def watch_osd_stats(data, stat, event=""):
|
|
|
|
if event and event.type == "DELETED":
|
2021-08-21 02:46:11 -04:00
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = data.decode("ascii")
|
2021-08-21 02:46:11 -04:00
|
|
|
except AttributeError:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = ""
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
if data and data != self.stats:
|
|
|
|
self.stats = json.loads(data)
|
|
|
|
|
|
|
|
@staticmethod
|
2021-11-06 03:02:43 -04:00
|
|
|
def add_osd(
|
|
|
|
zkhandler, logger, node, device, weight, ext_db_flag=False, ext_db_ratio=0.05
|
|
|
|
):
|
2021-08-21 02:46:11 -04:00
|
|
|
# We are ready to create a new OSD on this node
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Creating new OSD disk on block device {}".format(device), state="i")
|
2021-08-21 02:46:11 -04:00
|
|
|
try:
|
|
|
|
# 1. Create an OSD; we do this so we know what ID will be gen'd
|
2021-11-06 03:02:43 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command("ceph osd create")
|
2021-08-21 02:46:11 -04:00
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("ceph osd create")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
osd_id = stdout.rstrip()
|
|
|
|
|
|
|
|
# 2. Remove that newly-created OSD
|
2021-11-06 03:02:43 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
|
|
|
"ceph osd rm {}".format(osd_id)
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("ceph osd rm")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
# 3a. Zap the disk to ensure it is ready to go
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Zapping disk {}".format(device), state="i")
|
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
|
|
|
"ceph-volume lvm zap --destroy {}".format(device)
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("ceph-volume lvm zap")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
|
2021-09-23 13:59:49 -04:00
|
|
|
dev_flags = "--data {}".format(device)
|
|
|
|
|
|
|
|
# 3b. Prepare the logical volume if ext_db_flag
|
|
|
|
if ext_db_flag:
|
2021-11-06 03:02:43 -04:00
|
|
|
_, osd_size_bytes, _ = common.run_os_command(
|
|
|
|
"blockdev --getsize64 {}".format(device)
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
osd_size_bytes = int(osd_size_bytes)
|
2021-11-06 03:02:43 -04:00
|
|
|
result = CephOSDInstance.create_osd_db_lv(
|
|
|
|
zkhandler, logger, osd_id, ext_db_ratio, osd_size_bytes
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
if not result:
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-09-23 13:59:49 -04:00
|
|
|
db_device = "osd-db/osd-{}".format(osd_id)
|
|
|
|
dev_flags += " --block.db {}".format(db_device)
|
|
|
|
else:
|
|
|
|
db_device = ""
|
|
|
|
|
|
|
|
# 3c. Create the OSD for real
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Preparing LVM for new OSD disk with ID {} on {}".format(
|
|
|
|
osd_id, device
|
|
|
|
),
|
|
|
|
state="i",
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:02:43 -04:00
|
|
|
"ceph-volume lvm prepare --bluestore {devices}".format(
|
2021-11-06 03:21:04 -04:00
|
|
|
devices=dev_flags
|
2021-08-21 02:46:11 -04:00
|
|
|
)
|
|
|
|
)
|
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("ceph-volume lvm prepare")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
# 4a. Get OSD FSID
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Getting OSD FSID for ID {} on {}".format(osd_id, device), state="i"
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:21:04 -04:00
|
|
|
"ceph-volume lvm list {device}".format(device=device)
|
2021-08-21 02:46:11 -04:00
|
|
|
)
|
2021-11-06 03:02:43 -04:00
|
|
|
for line in stdout.split("\n"):
|
|
|
|
if "osd fsid" in line:
|
2021-08-21 02:46:11 -04:00
|
|
|
osd_fsid = line.split()[-1]
|
|
|
|
|
|
|
|
if not osd_fsid:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("ceph-volume lvm list")
|
|
|
|
print("Could not find OSD fsid in data:")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
# 4b. Activate the OSD
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
2021-11-06 03:21:04 -04:00
|
|
|
"Activating new OSD disk with ID {}".format(osd_id), state="i"
|
2021-11-06 03:02:43 -04:00
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:02:43 -04:00
|
|
|
"ceph-volume lvm activate --bluestore {osdid} {osdfsid}".format(
|
|
|
|
osdid=osd_id, osdfsid=osd_fsid
|
2021-08-21 02:46:11 -04:00
|
|
|
)
|
|
|
|
)
|
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("ceph-volume lvm activate")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
# 5. Add it to the crush map
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Adding new OSD disk with ID {} to CRUSH map".format(osd_id), state="i"
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:02:43 -04:00
|
|
|
"ceph osd crush add osd.{osdid} {weight} root=default host={node}".format(
|
|
|
|
osdid=osd_id, weight=weight, node=node
|
2021-08-21 02:46:11 -04:00
|
|
|
)
|
|
|
|
)
|
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("ceph osd crush add")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
time.sleep(0.5)
|
|
|
|
|
|
|
|
# 6. Verify it started
|
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:02:43 -04:00
|
|
|
"systemctl status ceph-osd@{osdid}".format(osdid=osd_id)
|
2021-08-21 02:46:11 -04:00
|
|
|
)
|
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("systemctl status")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
# 7. Add the new OSD to the list
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Adding new OSD disk with ID {} to Zookeeper".format(osd_id), state="i"
|
|
|
|
)
|
|
|
|
zkhandler.write(
|
|
|
|
[
|
|
|
|
(("osd", osd_id), ""),
|
|
|
|
(("osd.node", osd_id), node),
|
|
|
|
(("osd.device", osd_id), device),
|
|
|
|
(("osd.db_device", osd_id), db_device),
|
|
|
|
(("osd.stats", osd_id), "{}"),
|
|
|
|
]
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
# Log it
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Created new OSD disk with ID {}".format(osd_id), state="o")
|
2021-08-21 02:46:11 -04:00
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
|
|
# Log it
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Failed to create new OSD disk: {}".format(e), state="e")
|
2021-08-21 02:46:11 -04:00
|
|
|
return False
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def remove_osd(zkhandler, logger, osd_id, osd_obj):
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Removing OSD disk {}".format(osd_id), state="i")
|
2021-08-21 02:46:11 -04:00
|
|
|
try:
|
|
|
|
# 1. Verify the OSD is present
|
2021-11-06 03:02:43 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command("ceph osd ls")
|
|
|
|
osd_list = stdout.split("\n")
|
2021-08-21 02:46:11 -04:00
|
|
|
if osd_id not in osd_list:
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Could not find OSD {} in the cluster".format(osd_id), state="e"
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
return True
|
|
|
|
|
|
|
|
# 1. Set the OSD out so it will flush
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Setting out OSD disk with ID {}".format(osd_id), state="i")
|
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
|
|
|
"ceph osd out {}".format(osd_id)
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("ceph osd out")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
# 2. Wait for the OSD to flush
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Flushing OSD disk with ID {}".format(osd_id), state="i")
|
2021-08-21 02:46:11 -04:00
|
|
|
osd_string = str()
|
|
|
|
while True:
|
|
|
|
try:
|
2021-11-06 03:02:43 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
|
|
|
"ceph pg dump osds --format json"
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
dump_string = json.loads(stdout)
|
|
|
|
for osd in dump_string:
|
2021-11-06 03:02:43 -04:00
|
|
|
if str(osd["osd"]) == osd_id:
|
2021-08-21 02:46:11 -04:00
|
|
|
osd_string = osd
|
2021-11-06 03:02:43 -04:00
|
|
|
num_pgs = osd_string["num_pgs"]
|
2021-08-21 02:46:11 -04:00
|
|
|
if num_pgs > 0:
|
|
|
|
time.sleep(5)
|
|
|
|
else:
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
except Exception:
|
|
|
|
break
|
|
|
|
|
|
|
|
# 3. Stop the OSD process and wait for it to be terminated
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Stopping OSD disk with ID {}".format(osd_id), state="i")
|
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
|
|
|
"systemctl stop ceph-osd@{}".format(osd_id)
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("systemctl stop")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
# FIXME: There has to be a better way to do this /shrug
|
|
|
|
while True:
|
|
|
|
is_osd_up = False
|
|
|
|
# Find if there is a process named ceph-osd with arg '--id {id}'
|
2021-11-06 03:02:43 -04:00
|
|
|
for p in psutil.process_iter(attrs=["name", "cmdline"]):
|
|
|
|
if "ceph-osd" == p.info["name"] and "--id {}".format(
|
|
|
|
osd_id
|
|
|
|
) in " ".join(p.info["cmdline"]):
|
2021-08-21 02:46:11 -04:00
|
|
|
is_osd_up = True
|
|
|
|
# If there isn't, continue
|
|
|
|
if not is_osd_up:
|
|
|
|
break
|
|
|
|
|
|
|
|
# 4. Determine the block devices
|
2021-11-06 03:02:43 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
|
|
|
"readlink /var/lib/ceph/osd/ceph-{}/block".format(osd_id)
|
|
|
|
)
|
|
|
|
vg_name = stdout.split("/")[-2] # e.g. /dev/ceph-<uuid>/osd-block-<uuid>
|
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
|
|
|
"vgs --separator , --noheadings -o pv_name {}".format(vg_name)
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
pv_block = stdout.strip()
|
|
|
|
|
|
|
|
# 5. Zap the volumes
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Zapping OSD disk with ID {} on {}".format(osd_id, pv_block), state="i"
|
|
|
|
)
|
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
|
|
|
"ceph-volume lvm zap --destroy {}".format(pv_block)
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("ceph-volume lvm zap")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
# 6. Purge the OSD from Ceph
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Purging OSD disk with ID {}".format(osd_id), state="i")
|
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
|
|
|
"ceph osd purge {} --yes-i-really-mean-it".format(osd_id)
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("ceph osd purge")
|
2021-08-21 02:46:11 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-08-21 02:46:11 -04:00
|
|
|
|
2021-09-23 13:59:49 -04:00
|
|
|
# 7. Remove the DB device
|
2021-11-06 03:02:43 -04:00
|
|
|
if zkhandler.exists(("osd.db_device", osd_id)):
|
|
|
|
db_device = zkhandler.read(("osd.db_device", osd_id))
|
|
|
|
logger.out(
|
|
|
|
'Removing OSD DB logical volume "{}"'.format(db_device), state="i"
|
|
|
|
)
|
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
|
|
|
"lvremove --yes --force {}".format(db_device)
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
|
|
|
|
# 8. Delete OSD from ZK
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Deleting OSD disk with ID {} from Zookeeper".format(osd_id), state="i"
|
|
|
|
)
|
|
|
|
zkhandler.delete(("osd", osd_id), recursive=True)
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
# Log it
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Removed OSD disk with ID {}".format(osd_id), state="o")
|
2021-08-21 02:46:11 -04:00
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
|
|
# Log it
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Failed to purge OSD disk with ID {}: {}".format(osd_id, e), state="e"
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
return False
|
|
|
|
|
2021-09-23 13:59:49 -04:00
|
|
|
@staticmethod
|
|
|
|
def add_db_vg(zkhandler, logger, device):
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Creating new OSD database volume group on block device {}".format(device),
|
|
|
|
state="i",
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
try:
|
|
|
|
# 0. Check if an existsing volume group exists
|
2021-11-06 03:02:43 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command("vgdisplay osd-db")
|
2021-09-23 13:59:49 -04:00
|
|
|
if retcode != 5:
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out('Ceph OSD database VG "osd-db" already exists', state="e")
|
2021-09-23 13:59:49 -04:00
|
|
|
return False
|
|
|
|
|
|
|
|
# 1. Create an empty partition table
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Creating partitions on block device {}".format(device), state="i"
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:02:43 -04:00
|
|
|
"sgdisk --clear {}".format(device)
|
2021-09-23 13:59:49 -04:00
|
|
|
)
|
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("sgdisk create partition table")
|
2021-09-23 13:59:49 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-09-23 13:59:49 -04:00
|
|
|
|
2021-09-26 00:08:54 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:02:43 -04:00
|
|
|
"sgdisk --new 1:: --typecode 1:8e00 {}".format(device)
|
2021-09-26 00:08:54 -04:00
|
|
|
)
|
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("sgdisk create pv partition")
|
2021-09-26 00:08:54 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-09-26 00:08:54 -04:00
|
|
|
|
|
|
|
# Handle the partition ID portion
|
2021-11-06 03:02:43 -04:00
|
|
|
if search(r"by-path", device) or search(r"by-id", device):
|
2021-09-26 00:08:54 -04:00
|
|
|
# /dev/disk/by-path/pci-0000:03:00.0-scsi-0:1:0:0 -> pci-0000:03:00.0-scsi-0:1:0:0-part1
|
2021-11-06 03:02:43 -04:00
|
|
|
partition = "{}-part1".format(device)
|
|
|
|
elif search(r"nvme", device):
|
2021-09-26 00:08:54 -04:00
|
|
|
# /dev/nvme0n1 -> nvme0n1p1
|
2021-11-06 03:02:43 -04:00
|
|
|
partition = "{}p1".format(device)
|
2021-09-26 00:08:54 -04:00
|
|
|
else:
|
|
|
|
# /dev/sda -> sda1
|
|
|
|
# No other '/dev/disk/by-*' types are valid for raw block devices anyways
|
2021-11-06 03:02:43 -04:00
|
|
|
partition = "{}1".format(device)
|
2021-09-26 00:08:54 -04:00
|
|
|
|
2021-09-23 13:59:49 -04:00
|
|
|
# 2. Create the PV
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out("Creating PV on block device {}".format(partition), state="i")
|
2021-09-23 13:59:49 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:02:43 -04:00
|
|
|
"pvcreate --force {}".format(partition)
|
2021-09-23 13:59:49 -04:00
|
|
|
)
|
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("pv creation")
|
2021-09-23 13:59:49 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-09-23 13:59:49 -04:00
|
|
|
|
|
|
|
# 2. Create the VG (named 'osd-db')
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
'Creating VG "osd-db" on block device {}'.format(partition), state="i"
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:02:43 -04:00
|
|
|
"vgcreate --force osd-db {}".format(partition)
|
2021-09-23 13:59:49 -04:00
|
|
|
)
|
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("vg creation")
|
2021-09-23 13:59:49 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-09-23 13:59:49 -04:00
|
|
|
|
|
|
|
# Log it
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Created new OSD database volume group on block device {}".format(
|
|
|
|
device
|
|
|
|
),
|
|
|
|
state="o",
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
|
|
# Log it
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Failed to create OSD database volume group: {}".format(e), state="e"
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
return False
|
|
|
|
|
|
|
|
@staticmethod
|
2021-09-23 23:31:58 -04:00
|
|
|
def create_osd_db_lv(zkhandler, logger, osd_id, ext_db_ratio, osd_size_bytes):
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Creating new OSD database logical volume for OSD ID {}".format(osd_id),
|
|
|
|
state="i",
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
try:
|
|
|
|
# 0. Check if an existsing logical volume exists
|
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:02:43 -04:00
|
|
|
"lvdisplay osd-db/osd{}".format(osd_id)
|
2021-09-23 13:59:49 -04:00
|
|
|
)
|
|
|
|
if retcode != 5:
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
'Ceph OSD database LV "osd-db/osd{}" already exists'.format(osd_id),
|
|
|
|
state="e",
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
return False
|
|
|
|
|
2021-09-23 23:31:58 -04:00
|
|
|
# 1. Determine LV sizing
|
|
|
|
osd_db_size = int(osd_size_bytes * ext_db_ratio / 1024 / 1024)
|
2021-09-23 13:59:49 -04:00
|
|
|
|
|
|
|
# 2. Create the LV
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
'Creating DB LV "osd-db/osd-{}" of {}M ({} * {})'.format(
|
|
|
|
osd_id, osd_db_size, osd_size_bytes, ext_db_ratio
|
|
|
|
),
|
|
|
|
state="i",
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
retcode, stdout, stderr = common.run_os_command(
|
2021-11-06 03:02:43 -04:00
|
|
|
"lvcreate --yes --name osd-{} --size {} osd-db".format(
|
|
|
|
osd_id, osd_db_size
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
)
|
|
|
|
if retcode:
|
2021-11-06 03:02:43 -04:00
|
|
|
print("db lv creation")
|
2021-09-23 13:59:49 -04:00
|
|
|
print(stdout)
|
|
|
|
print(stderr)
|
2021-09-26 00:50:10 -04:00
|
|
|
raise Exception
|
2021-09-23 13:59:49 -04:00
|
|
|
|
|
|
|
# Log it
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
'Created new OSD database logical volume "osd-db/osd-{}"'.format(
|
|
|
|
osd_id
|
|
|
|
),
|
|
|
|
state="o",
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
return True
|
|
|
|
except Exception as e:
|
|
|
|
# Log it
|
2021-11-06 03:02:43 -04:00
|
|
|
logger.out(
|
|
|
|
"Failed to create OSD database logical volume: {}".format(e), state="e"
|
|
|
|
)
|
2021-09-23 13:59:49 -04:00
|
|
|
return False
|
|
|
|
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
class CephPoolInstance(object):
|
|
|
|
def __init__(self, zkhandler, this_node, name):
|
|
|
|
self.zkhandler = zkhandler
|
|
|
|
self.this_node = this_node
|
|
|
|
self.name = name
|
2021-11-06 03:02:43 -04:00
|
|
|
self.pgs = ""
|
2021-08-21 02:46:11 -04:00
|
|
|
self.stats = dict()
|
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
@self.zkhandler.zk_conn.DataWatch(
|
|
|
|
self.zkhandler.schema.path("pool.pgs", self.name)
|
|
|
|
)
|
|
|
|
def watch_pool_node(data, stat, event=""):
|
|
|
|
if event and event.type == "DELETED":
|
2021-08-21 02:46:11 -04:00
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = data.decode("ascii")
|
2021-08-21 02:46:11 -04:00
|
|
|
except AttributeError:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = ""
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
if data and data != self.pgs:
|
|
|
|
self.pgs = data
|
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
@self.zkhandler.zk_conn.DataWatch(
|
|
|
|
self.zkhandler.schema.path("pool.stats", self.name)
|
|
|
|
)
|
|
|
|
def watch_pool_stats(data, stat, event=""):
|
|
|
|
if event and event.type == "DELETED":
|
2021-08-21 02:46:11 -04:00
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = data.decode("ascii")
|
2021-08-21 02:46:11 -04:00
|
|
|
except AttributeError:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = ""
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
if data and data != self.stats:
|
|
|
|
self.stats = json.loads(data)
|
|
|
|
|
|
|
|
|
|
|
|
class CephVolumeInstance(object):
|
|
|
|
def __init__(self, zkhandler, this_node, pool, name):
|
|
|
|
self.zkhandler = zkhandler
|
|
|
|
self.this_node = this_node
|
|
|
|
self.pool = pool
|
|
|
|
self.name = name
|
|
|
|
self.stats = dict()
|
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
@self.zkhandler.zk_conn.DataWatch(
|
|
|
|
self.zkhandler.schema.path("volume.stats", f"{self.pool}/{self.name}")
|
|
|
|
)
|
|
|
|
def watch_volume_stats(data, stat, event=""):
|
|
|
|
if event and event.type == "DELETED":
|
2021-08-21 02:46:11 -04:00
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = data.decode("ascii")
|
2021-08-21 02:46:11 -04:00
|
|
|
except AttributeError:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = ""
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
if data and data != self.stats:
|
|
|
|
self.stats = json.loads(data)
|
|
|
|
|
|
|
|
|
|
|
|
class CephSnapshotInstance(object):
|
|
|
|
def __init__(self, zkhandler, this_node, pool, volume, name):
|
|
|
|
self.zkhandler = zkhandler
|
|
|
|
self.this_node = this_node
|
|
|
|
self.pool = pool
|
|
|
|
self.volume = volume
|
|
|
|
self.name = name
|
|
|
|
self.stats = dict()
|
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
@self.zkhandler.zk_conn.DataWatch(
|
|
|
|
self.zkhandler.schema.path(
|
|
|
|
"snapshot.stats", f"{self.pool}/{self.volume}/{self.name}"
|
|
|
|
)
|
|
|
|
)
|
|
|
|
def watch_snapshot_stats(data, stat, event=""):
|
|
|
|
if event and event.type == "DELETED":
|
2021-08-21 02:46:11 -04:00
|
|
|
# The key has been deleted after existing before; terminate this watcher
|
|
|
|
# because this class instance is about to be reaped in Daemon.py
|
|
|
|
return False
|
|
|
|
|
|
|
|
try:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = data.decode("ascii")
|
2021-08-21 02:46:11 -04:00
|
|
|
except AttributeError:
|
2021-11-06 03:02:43 -04:00
|
|
|
data = ""
|
2021-08-21 02:46:11 -04:00
|
|
|
|
|
|
|
if data and data != self.stats:
|
|
|
|
self.stats = json.loads(data)
|
|
|
|
|
|
|
|
|
|
|
|
# Primary command function
|
|
|
|
# This command pipe is only used for OSD adds and removes
|
|
|
|
def ceph_command(zkhandler, logger, this_node, data, d_osd):
|
|
|
|
# Get the command and args
|
|
|
|
command, args = data.split()
|
|
|
|
|
|
|
|
# Adding a new OSD
|
2021-11-06 03:02:43 -04:00
|
|
|
if command == "osd_add":
|
|
|
|
node, device, weight, ext_db_flag, ext_db_ratio = args.split(",")
|
2021-09-23 13:59:49 -04:00
|
|
|
ext_db_flag = bool(strtobool(ext_db_flag))
|
2021-09-23 23:31:58 -04:00
|
|
|
ext_db_ratio = float(ext_db_ratio)
|
2021-08-21 02:46:11 -04:00
|
|
|
if node == this_node.name:
|
|
|
|
# Lock the command queue
|
2021-11-06 03:02:43 -04:00
|
|
|
zk_lock = zkhandler.writelock("base.cmd.ceph")
|
2021-08-21 02:46:11 -04:00
|
|
|
with zk_lock:
|
|
|
|
# Add the OSD
|
2021-11-06 03:02:43 -04:00
|
|
|
result = CephOSDInstance.add_osd(
|
|
|
|
zkhandler, logger, node, device, weight, ext_db_flag, ext_db_ratio
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
# Command succeeded
|
|
|
|
if result:
|
|
|
|
# Update the command queue
|
2021-11-06 03:02:43 -04:00
|
|
|
zkhandler.write([("base.cmd.ceph", "success-{}".format(data))])
|
2021-08-21 02:46:11 -04:00
|
|
|
# Command failed
|
|
|
|
else:
|
|
|
|
# Update the command queue
|
2021-11-06 03:02:43 -04:00
|
|
|
zkhandler.write([("base.cmd.ceph", "failure-{}".format(data))])
|
2021-08-21 02:46:11 -04:00
|
|
|
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
|
|
time.sleep(1)
|
|
|
|
|
|
|
|
# Removing an OSD
|
2021-11-06 03:02:43 -04:00
|
|
|
elif command == "osd_remove":
|
2021-08-21 02:46:11 -04:00
|
|
|
osd_id = args
|
|
|
|
|
|
|
|
# Verify osd_id is in the list
|
|
|
|
if d_osd[osd_id] and d_osd[osd_id].node == this_node.name:
|
|
|
|
# Lock the command queue
|
2021-11-06 03:02:43 -04:00
|
|
|
zk_lock = zkhandler.writelock("base.cmd.ceph")
|
2021-08-21 02:46:11 -04:00
|
|
|
with zk_lock:
|
|
|
|
# Remove the OSD
|
2021-11-06 03:02:43 -04:00
|
|
|
result = CephOSDInstance.remove_osd(
|
|
|
|
zkhandler, logger, osd_id, d_osd[osd_id]
|
|
|
|
)
|
2021-08-21 02:46:11 -04:00
|
|
|
# Command succeeded
|
|
|
|
if result:
|
|
|
|
# Update the command queue
|
2021-11-06 03:02:43 -04:00
|
|
|
zkhandler.write([("base.cmd.ceph", "success-{}".format(data))])
|
2021-08-21 02:46:11 -04:00
|
|
|
# Command failed
|
|
|
|
else:
|
|
|
|
# Update the command queue
|
2021-11-06 03:02:43 -04:00
|
|
|
zkhandler.write([("base.cmd.ceph", "failure-{}".format(data))])
|
2021-08-21 02:46:11 -04:00
|
|
|
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
|
|
time.sleep(1)
|
2021-09-23 13:59:49 -04:00
|
|
|
|
|
|
|
# Adding a new DB VG
|
2021-11-06 03:02:43 -04:00
|
|
|
elif command == "db_vg_add":
|
|
|
|
node, device = args.split(",")
|
2021-09-23 13:59:49 -04:00
|
|
|
if node == this_node.name:
|
|
|
|
# Lock the command queue
|
2021-11-06 03:02:43 -04:00
|
|
|
zk_lock = zkhandler.writelock("base.cmd.ceph")
|
2021-09-23 13:59:49 -04:00
|
|
|
with zk_lock:
|
|
|
|
# Add the VG
|
|
|
|
result = CephOSDInstance.add_db_vg(zkhandler, logger, device)
|
|
|
|
# Command succeeded
|
|
|
|
if result:
|
|
|
|
# Update the command queue
|
2021-11-06 03:02:43 -04:00
|
|
|
zkhandler.write([("base.cmd.ceph", "success-{}".format(data))])
|
2021-09-23 13:59:49 -04:00
|
|
|
# Command failed
|
|
|
|
else:
|
|
|
|
# Update the command queue
|
2021-11-06 03:02:43 -04:00
|
|
|
zkhandler.write([("base.cmd.ceph", "failure={}".format(data))])
|
2021-09-23 13:59:49 -04:00
|
|
|
# Wait 1 seconds before we free the lock, to ensure the client hits the lock
|
|
|
|
time.sleep(1)
|