Implement volume and snapshot handling by daemon
This seems like a super-gross way to do this, but at the moment I don't have a better way. Maybe just remove this component since none of the volume/snapshot stuff is dynamic; will see as this progresses.
This commit is contained in:
parent
784b428ed0
commit
1c9f606480
|
@ -657,6 +657,8 @@ d_network = dict()
|
||||||
d_domain = dict()
|
d_domain = dict()
|
||||||
d_osd = dict()
|
d_osd = dict()
|
||||||
d_pool = dict()
|
d_pool = dict()
|
||||||
|
d_volume = dict()
|
||||||
|
d_snapshot = dict()
|
||||||
node_list = []
|
node_list = []
|
||||||
network_list = []
|
network_list = []
|
||||||
domain_list = []
|
domain_list = []
|
||||||
|
@ -839,14 +841,15 @@ if enable_storage:
|
||||||
logger.out('{}Pool list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(pool_list)), state='i')
|
logger.out('{}Pool list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(pool_list)), state='i')
|
||||||
|
|
||||||
# Volume objects
|
# Volume objects
|
||||||
@zk_conn.ChildrenWatch('/ceph/volumes')
|
for pool in pool_list:
|
||||||
|
@zk_conn.ChildrenWatch('/ceph/volumes/{}'.format(pool))
|
||||||
def update_volumes(new_volume_list):
|
def update_volumes(new_volume_list):
|
||||||
global volume_list, d_volume
|
global volume_list, d_volume
|
||||||
|
|
||||||
# Add any missing Volumes to the list
|
# Add any missing Volumes to the list
|
||||||
for volume in new_volume_list:
|
for volume in new_volume_list:
|
||||||
if not volume in volume_list:
|
if not volume in volume_list:
|
||||||
d_volume[volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, volume)
|
d_volume[volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, pool, volume)
|
||||||
|
|
||||||
# Remove any deleted Volumes from the list
|
# Remove any deleted Volumes from the list
|
||||||
for volume in volume_list:
|
for volume in volume_list:
|
||||||
|
@ -859,14 +862,15 @@ if enable_storage:
|
||||||
logger.out('{}Volume list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(volume_list)), state='i')
|
logger.out('{}Volume list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(volume_list)), state='i')
|
||||||
|
|
||||||
# Snapshot objects
|
# Snapshot objects
|
||||||
@zk_conn.ChildrenWatch('/ceph/snapshots')
|
for volume in volume_list:
|
||||||
|
@zk_conn.ChildrenWatch('/ceph/snapshots/{}/{}'.format(pool, volume))
|
||||||
def update_snapshots(new_snapshot_list):
|
def update_snapshots(new_snapshot_list):
|
||||||
global snapshot_list, d_snapshot
|
global snapshot_list, d_snapshot
|
||||||
|
|
||||||
# Add any missing Snapshots to the list
|
# Add any missing Snapshots to the list
|
||||||
for snapshot in new_snapshot_list:
|
for snapshot in new_snapshot_list:
|
||||||
if not snapshot in snapshot_list:
|
if not snapshot in snapshot_list:
|
||||||
d_snapshot[snapshot] = CephInstance.CephSnapshotInstance(zk_conn, this_node, snapshot)
|
d_snapshot[snapshot] = CephInstance.CephSnapshotInstance(zk_conn, this_node, pool, volume, snapshot)
|
||||||
|
|
||||||
# Remove any deleted Snapshots from the list
|
# Remove any deleted Snapshots from the list
|
||||||
for snapshot in snapshot_list:
|
for snapshot in snapshot_list:
|
||||||
|
|
Loading…
Reference in New Issue