Rename some entries for consistency

This commit is contained in:
Joshua Boniface 2018-10-30 09:17:41 -04:00
parent 9a271bda0a
commit 89a3e0c7ee
2 changed files with 13 additions and 28 deletions

View File

@ -42,7 +42,7 @@ class CephOSDInstance(object):
self.stats = dict()
@self.zk_conn.DataWatch('/ceph/osds/{}/node'.format(self.osd_id))
def watch_osd_host(data, stat, event=''):
def watch_osd_node(data, stat, event=''):
if event and event.type == 'DELETED':
# The key has been deleted after existing before; terminate this watcher
# because this class instance is about to be reaped in Daemon.py
@ -56,23 +56,8 @@ class CephOSDInstance(object):
if data != self.node:
self.node = data
@self.zk_conn.DataWatch('/ceph/osds/{}/size'.format(self.osd_id))
def watch_osd_host(data, stat, event=''):
if event and event.type == 'DELETED':
# The key has been deleted after existing before; terminate this watcher
# because this class instance is about to be reaped in Daemon.py
return False
try:
data = data.decode('ascii')
except AttributeError:
data = ''
if data != self.size:
self.size = data
@self.zk_conn.DataWatch('/ceph/osds/{}/stats'.format(self.osd_id))
def watch_osd_host(data, stat, event=''):
def watch_osd_stats(data, stat, event=''):
if event and event.type == 'DELETED':
# The key has been deleted after existing before; terminate this watcher
# because this class instance is about to be reaped in Daemon.py
@ -84,10 +69,10 @@ class CephOSDInstance(object):
data = ''
if data != self.stats:
self.stats.update(ast.literal_eval(data))
self.stats = dict(ast.literal_eval(data))
def add_osd(zk_conn, logger, node, device):
# We are ready to create a new OSD on this host
# We are ready to create a new OSD on this node
logger.out('Creating new OSD disk', state='i')
try:
# 1. Create an OSD; we do this so we know what ID will be gen'd

View File

@ -259,9 +259,9 @@ common.run_os_command('sysctl net.ipv6.conf.{}.rp_filter=0'.format(config['vni_d
###############################################################################
# What is the list of coordinator hosts
coordinator_hosts = config['coordinators'].split(',')
coordinator_nodes = config['coordinators'].split(',')
if myhostname in coordinator_hosts:
if myhostname in coordinator_nodes:
# We are indeed a coordinator host
config['daemon_mode'] = 'coordinator'
# Start the zookeeper service using systemctl
@ -278,7 +278,7 @@ else:
# Start the connection to the coordinators
zk_conn = kazoo.client.KazooClient(hosts=config['coordinators'])
try:
logger.out('Connecting to Zookeeper cluster hosts {}'.format(config['coordinators']), state='i')
logger.out('Connecting to Zookeeper cluster nodes {}'.format(config['coordinators']), state='i')
# Start connection
zk_conn.start()
except Exception as e:
@ -775,7 +775,7 @@ def update_zookeeper():
if len(line) > 1 and line[1].isdigit():
# This is an OSD line so parse it
osd_id = line[1]
host = line[3].split('.')[0]
node = line[3].split('.')[0]
used = line[5]
avail = line[7]
wr_ops = line[9]
@ -786,7 +786,7 @@ def update_zookeeper():
osd_status.update({
# osd_stats.update({
str(osd_id): {
'host': host,
'node': node,
'used': used,
'avail': avail,
'wr_ops': wr_ops,
@ -804,13 +804,13 @@ def update_zookeeper():
osd_stats[osd] = this_dump
# Trigger updates for each OSD on this node
osds_this_host = 0
osds_this_node = 0
for osd in osd_list:
if d_osd[osd].node == myhostname:
zkhandler.writedata(zk_conn, {
'/ceph/osds/{}/stats'.format(osd): str(osd_stats[osd])
})
osds_this_host += 1
osds_this_node += 1
# Toggle state management of dead VMs to restart them
@ -924,13 +924,13 @@ def update_zookeeper():
logger.out(
'{bold}Ceph cluster status:{nofmt} {health_colour}{health}{nofmt} '
'{bold}Total OSDs:{nofmt} {total_osds} '
'{bold}Host OSDs:{nofmt} {host_osds}'.format(
'{bold}Node OSDs:{nofmt} {node_osds}'.format(
bold=logger.fmt_bold,
health_colour=ceph_health_colour,
nofmt=logger.fmt_end,
health=ceph_health,
total_osds=len(osd_list),
host_osds=osds_this_host
node_osds=osds_this_node
),
)