Fix some bugs and add RBD volume stats
This commit is contained in:
parent
db0b382b3d
commit
a940d03959
|
@ -1445,7 +1445,7 @@ def ceph_volume_snapshot_remove(pool, volume, name):
|
||||||
choice = input('Are you sure you want to do this? (y/N) ')
|
choice = input('Are you sure you want to do this? (y/N) ')
|
||||||
if choice == 'y' or choice == 'Y':
|
if choice == 'y' or choice == 'Y':
|
||||||
zk_conn = pvc_common.startZKConnection(zk_host)
|
zk_conn = pvc_common.startZKConnection(zk_host)
|
||||||
retcode, retmsg = pvc_ceph.remove_snapshot(zk_conn, pool, name)
|
retcode, retmsg = pvc_ceph.remove_snapshot(zk_conn, pool, volume, name)
|
||||||
cleanup(retcode, retmsg, zk_conn)
|
cleanup(retcode, retmsg, zk_conn)
|
||||||
else:
|
else:
|
||||||
click.echo('Aborting.')
|
click.echo('Aborting.')
|
||||||
|
|
|
@ -504,11 +504,15 @@ def add_volume(zk_conn, logger, pool, name, size):
|
||||||
print(stderr)
|
print(stderr)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
# Get volume stats
|
||||||
|
retcode, stdout, stderr = common.run_os_command('rbd info {}/{}'.format(pool, name))
|
||||||
|
volstats = stdout
|
||||||
|
|
||||||
# Add the new volume to ZK
|
# Add the new volume to ZK
|
||||||
zkhandler.writedata(zk_conn, {
|
zkhandler.writedata(zk_conn, {
|
||||||
'/ceph/volumes/{}/{}'.format(pool, name): '',
|
'/ceph/volumes/{}/{}'.format(pool, name): '',
|
||||||
'/ceph/volumes/{}/{}/size'.format(pool, name): size,
|
'/ceph/volumes/{}/{}/size'.format(pool, name): size,
|
||||||
'/ceph/volumes/{}/{}/stats'.format(pool, name): '{}',
|
'/ceph/volumes/{}/{}/stats'.format(pool, name): volstats,
|
||||||
'/ceph/snapshots/{}/{}'.format(pool, name): '',
|
'/ceph/snapshots/{}/{}'.format(pool, name): '',
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -857,7 +861,7 @@ def run_command(zk_conn, logger, this_node, data, d_osd):
|
||||||
|
|
||||||
# Removing a snapshot
|
# Removing a snapshot
|
||||||
elif command == 'snapshot_remove':
|
elif command == 'snapshot_remove':
|
||||||
pool, name, name = args.split(',')
|
pool, volume, name = args.split(',')
|
||||||
|
|
||||||
if this_node.router_state == 'primary':
|
if this_node.router_state == 'primary':
|
||||||
# Lock the command queue
|
# Lock the command queue
|
||||||
|
|
|
@ -657,15 +657,13 @@ d_network = dict()
|
||||||
d_domain = dict()
|
d_domain = dict()
|
||||||
d_osd = dict()
|
d_osd = dict()
|
||||||
d_pool = dict()
|
d_pool = dict()
|
||||||
d_volume = dict()
|
d_volume = dict() # Dict of Dicts
|
||||||
d_snapshot = dict()
|
|
||||||
node_list = []
|
node_list = []
|
||||||
network_list = []
|
network_list = []
|
||||||
domain_list = []
|
domain_list = []
|
||||||
osd_list = []
|
osd_list = []
|
||||||
pool_list = []
|
pool_list = []
|
||||||
volume_list = []
|
volume_list = dict() # Dict of Lists
|
||||||
snapshot_list = []
|
|
||||||
|
|
||||||
if enable_networking:
|
if enable_networking:
|
||||||
# Create an instance of the DNS Aggregator if we're a coordinator
|
# Create an instance of the DNS Aggregator if we're a coordinator
|
||||||
|
@ -829,6 +827,8 @@ if enable_storage:
|
||||||
for pool in new_pool_list:
|
for pool in new_pool_list:
|
||||||
if not pool in pool_list:
|
if not pool in pool_list:
|
||||||
d_pool[pool] = CephInstance.CephPoolInstance(zk_conn, this_node, pool)
|
d_pool[pool] = CephInstance.CephPoolInstance(zk_conn, this_node, pool)
|
||||||
|
d_volume[pool] = dict()
|
||||||
|
volume_list[pool] = []
|
||||||
|
|
||||||
# Remove any deleted Pools from the list
|
# Remove any deleted Pools from the list
|
||||||
for pool in pool_list:
|
for pool in pool_list:
|
||||||
|
@ -840,7 +840,7 @@ if enable_storage:
|
||||||
pool_list = new_pool_list
|
pool_list = new_pool_list
|
||||||
logger.out('{}Pool list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(pool_list)), state='i')
|
logger.out('{}Pool list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(pool_list)), state='i')
|
||||||
|
|
||||||
# Volume objects
|
# Volume objects in each pool
|
||||||
for pool in pool_list:
|
for pool in pool_list:
|
||||||
@zk_conn.ChildrenWatch('/ceph/volumes/{}'.format(pool))
|
@zk_conn.ChildrenWatch('/ceph/volumes/{}'.format(pool))
|
||||||
def update_volumes(new_volume_list):
|
def update_volumes(new_volume_list):
|
||||||
|
@ -848,18 +848,18 @@ if enable_storage:
|
||||||
|
|
||||||
# Add any missing Volumes to the list
|
# Add any missing Volumes to the list
|
||||||
for volume in new_volume_list:
|
for volume in new_volume_list:
|
||||||
if not volume in volume_list:
|
if not volume in volume_list[pool]:
|
||||||
d_volume[volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume)
|
d_volume[pool][volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume)
|
||||||
|
|
||||||
# Remove any deleted Volumes from the list
|
# Remove any deleted Volumes from the list
|
||||||
for volume in volume_list:
|
for volume in volume_list[pool]:
|
||||||
if not volume in new_volume_list:
|
if not volume in new_volume_list:
|
||||||
# Delete the object
|
# Delete the object
|
||||||
del(d_volume[volume])
|
del(d_volume[pool][volume])
|
||||||
|
|
||||||
# Update and print new list
|
# Update and print new list
|
||||||
volume_list = new_volume_list
|
volume_list[pool] = new_volume_list
|
||||||
logger.out('{}Volume list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(volume_list)), state='i')
|
logger.out('{}Volume list [{pool}]:{} {plist}'.format(logger.fmt_blue, logger.fmt_end, pool=pool, plist=' '.join(volume_list[pool])), state='i')
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# PHASE 9 - Run the daemon
|
# PHASE 9 - Run the daemon
|
||||||
|
|
Loading…
Reference in New Issue