Fix some bugs and add RBD volume stats

This commit is contained in:
Joshua Boniface 2019-06-19 10:25:22 -04:00
parent db0b382b3d
commit a940d03959
3 changed files with 31 additions and 27 deletions

View File

@ -1445,7 +1445,7 @@ def ceph_volume_snapshot_remove(pool, volume, name):
choice = input('Are you sure you want to do this? (y/N) ')
if choice == 'y' or choice == 'Y':
zk_conn = pvc_common.startZKConnection(zk_host)
retcode, retmsg = pvc_ceph.remove_snapshot(zk_conn, pool, name)
retcode, retmsg = pvc_ceph.remove_snapshot(zk_conn, pool, volume, name)
cleanup(retcode, retmsg, zk_conn)
else:
click.echo('Aborting.')

View File

@ -504,11 +504,15 @@ def add_volume(zk_conn, logger, pool, name, size):
print(stderr)
raise
# Get volume stats
retcode, stdout, stderr = common.run_os_command('rbd info {}/{}'.format(pool, name))
volstats = stdout
# Add the new volume to ZK
zkhandler.writedata(zk_conn, {
'/ceph/volumes/{}/{}'.format(pool, name): '',
'/ceph/volumes/{}/{}/size'.format(pool, name): size,
'/ceph/volumes/{}/{}/stats'.format(pool, name): '{}',
'/ceph/volumes/{}/{}/stats'.format(pool, name): volstats,
'/ceph/snapshots/{}/{}'.format(pool, name): '',
})
@ -857,7 +861,7 @@ def run_command(zk_conn, logger, this_node, data, d_osd):
# Removing a snapshot
elif command == 'snapshot_remove':
pool, name, name = args.split(',')
pool, volume, name = args.split(',')
if this_node.router_state == 'primary':
# Lock the command queue

View File

@ -657,15 +657,13 @@ d_network = dict()
d_domain = dict()
d_osd = dict()
d_pool = dict()
d_volume = dict()
d_snapshot = dict()
d_volume = dict() # Dict of Dicts
node_list = []
network_list = []
domain_list = []
osd_list = []
pool_list = []
volume_list = []
snapshot_list = []
volume_list = dict() # Dict of Lists
if enable_networking:
# Create an instance of the DNS Aggregator if we're a coordinator
@ -829,6 +827,8 @@ if enable_storage:
for pool in new_pool_list:
if not pool in pool_list:
d_pool[pool] = CephInstance.CephPoolInstance(zk_conn, this_node, pool)
d_volume[pool] = dict()
volume_list[pool] = []
# Remove any deleted Pools from the list
for pool in pool_list:
@ -840,26 +840,26 @@ if enable_storage:
pool_list = new_pool_list
logger.out('{}Pool list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(pool_list)), state='i')
# Volume objects
for pool in pool_list:
@zk_conn.ChildrenWatch('/ceph/volumes/{}'.format(pool))
def update_volumes(new_volume_list):
global volume_list, d_volume
# Volume objects in each pool
for pool in pool_list:
@zk_conn.ChildrenWatch('/ceph/volumes/{}'.format(pool))
def update_volumes(new_volume_list):
global volume_list, d_volume
# Add any missing Volumes to the list
for volume in new_volume_list:
if not volume in volume_list:
d_volume[volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume)
# Add any missing Volumes to the list
for volume in new_volume_list:
if not volume in volume_list[pool]:
d_volume[pool][volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume)
# Remove any deleted Volumes from the list
for volume in volume_list:
if not volume in new_volume_list:
# Delete the object
del(d_volume[volume])
# Remove any deleted Volumes from the list
for volume in volume_list[pool]:
if not volume in new_volume_list:
# Delete the object
del(d_volume[pool][volume])
# Update and print new list
volume_list = new_volume_list
logger.out('{}Volume list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(volume_list)), state='i')
# Update and print new list
volume_list[pool] = new_volume_list
logger.out('{}Volume list [{pool}]:{} {plist}'.format(logger.fmt_blue, logger.fmt_end, pool=pool, plist=' '.join(volume_list[pool])), state='i')
###############################################################################
# PHASE 9 - Run the daemon