Implement volume and snapshot handling by daemon

This seems like a super-gross way to do this, but at the moment
I don't have a better way. Maybe just remove this component since
none of the volume/snapshot stuff is dynamic; will see as this
progresses.
This commit is contained in:
Joshua Boniface 2019-06-19 09:40:20 -04:00
parent 784b428ed0
commit 1c9f606480
1 changed files with 40 additions and 36 deletions

View File

@ -657,6 +657,8 @@ d_network = dict()
d_domain = dict() d_domain = dict()
d_osd = dict() d_osd = dict()
d_pool = dict() d_pool = dict()
d_volume = dict()
d_snapshot = dict()
node_list = [] node_list = []
network_list = [] network_list = []
domain_list = [] domain_list = []
@ -839,44 +841,46 @@ if enable_storage:
logger.out('{}Pool list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(pool_list)), state='i') logger.out('{}Pool list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(pool_list)), state='i')
# Volume objects # Volume objects
@zk_conn.ChildrenWatch('/ceph/volumes') for pool in pool_list:
def update_volumes(new_volume_list): @zk_conn.ChildrenWatch('/ceph/volumes/{}'.format(pool))
global volume_list, d_volume def update_volumes(new_volume_list):
global volume_list, d_volume
# Add any missing Volumes to the list
for volume in new_volume_list:
if not volume in volume_list:
d_volume[volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume)
# Remove any deleted Volumes from the list
for volume in volume_list:
if not volume in new_volume_list:
# Delete the object
del(d_volume[volume])
# Update and print new list
volume_list = new_volume_list
logger.out('{}Volume list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(volume_list)), state='i')
# Add any missing Volumes to the list # Snapshot objects
for volume in new_volume_list:
if not volume in volume_list:
d_volume[volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, volume)
# Remove any deleted Volumes from the list
for volume in volume_list: for volume in volume_list:
if not volume in new_volume_list: @zk_conn.ChildrenWatch('/ceph/snapshots/{}/{}'.format(pool, volume))
# Delete the object def update_snapshots(new_snapshot_list):
del(d_volume[volume]) global snapshot_list, d_snapshot
# Update and print new list # Add any missing Snapshots to the list
volume_list = new_volume_list for snapshot in new_snapshot_list:
logger.out('{}Volume list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(volume_list)), state='i') if not snapshot in snapshot_list:
d_snapshot[snapshot] = CephInstance.CephSnapshotInstance(zk_conn, this_node, pool, volume, snapshot)
# Snapshot objects
@zk_conn.ChildrenWatch('/ceph/snapshots') # Remove any deleted Snapshots from the list
def update_snapshots(new_snapshot_list): for snapshot in snapshot_list:
global snapshot_list, d_snapshot if not snapshot in new_snapshot_list:
# Delete the object
# Add any missing Snapshots to the list del(d_snapshot[snapshot])
for snapshot in new_snapshot_list:
if not snapshot in snapshot_list: # Update and print new list
d_snapshot[snapshot] = CephInstance.CephSnapshotInstance(zk_conn, this_node, snapshot) snapshot_list = new_snapshot_list
logger.out('{}Snapshot list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(snapshot_list)), state='i')
# Remove any deleted Snapshots from the list
for snapshot in snapshot_list:
if not snapshot in new_snapshot_list:
# Delete the object
del(d_snapshot[snapshot])
# Update and print new list
snapshot_list = new_snapshot_list
logger.out('{}Snapshot list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(snapshot_list)), state='i')
############################################################################### ###############################################################################
# PHASE 9 - Run the daemon # PHASE 9 - Run the daemon