Add creation of volume and snapshot lists
This commit is contained in:
		| @@ -662,6 +662,8 @@ network_list = [] | |||||||
| domain_list = [] | domain_list = [] | ||||||
| osd_list = [] | osd_list = [] | ||||||
| pool_list = [] | pool_list = [] | ||||||
|  | volume_list = [] | ||||||
|  | snapshot_list = [] | ||||||
|  |  | ||||||
| if enable_networking: | if enable_networking: | ||||||
|     # Create an instance of the DNS Aggregator if we're a coordinator |     # Create an instance of the DNS Aggregator if we're a coordinator | ||||||
| @@ -836,6 +838,46 @@ if enable_storage: | |||||||
|         pool_list = new_pool_list |         pool_list = new_pool_list | ||||||
|         logger.out('{}Pool list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(pool_list)), state='i') |         logger.out('{}Pool list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(pool_list)), state='i') | ||||||
|  |  | ||||||
|  |     # Volume objects | ||||||
|  |     @zk_conn.ChildrenWatch('/ceph/volumes') | ||||||
|  |     def update_volumes(new_volume_list): | ||||||
|  |         global volume_list, d_volume | ||||||
|  |  | ||||||
|  |         # Add any missing Volumes to the list | ||||||
|  |         for volume in new_volume_list: | ||||||
|  |             if not volume in volume_list: | ||||||
|  |                 d_volume[volume] = CephInstance.CephVolumeInstance(zk_conn, this_node, volume) | ||||||
|  |  | ||||||
|  |         # Remove any deleted Volumes from the list | ||||||
|  |         for volume in volume_list: | ||||||
|  |             if not volume in new_volume_list: | ||||||
|  |                 # Delete the object | ||||||
|  |                 del(d_volume[volume]) | ||||||
|  |  | ||||||
|  |         # Update and print new list | ||||||
|  |         volume_list = new_volume_list | ||||||
|  |         logger.out('{}Volume list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(volume_list)), state='i') | ||||||
|  |  | ||||||
|  |     # Snapshot objects | ||||||
|  |     @zk_conn.ChildrenWatch('/ceph/snapshots') | ||||||
|  |     def update_snapshots(new_snapshot_list): | ||||||
|  |         global snapshot_list, d_snapshot | ||||||
|  |  | ||||||
|  |         # Add any missing Snapshots to the list | ||||||
|  |         for snapshot in new_snapshot_list: | ||||||
|  |             if not snapshot in snapshot_list: | ||||||
|  |                 d_snapshot[snapshot] = CephInstance.CephSnapshotInstance(zk_conn, this_node, snapshot) | ||||||
|  |  | ||||||
|  |         # Remove any deleted Snapshots from the list | ||||||
|  |         for snapshot in snapshot_list: | ||||||
|  |             if not snapshot in new_snapshot_list: | ||||||
|  |                 # Delete the object | ||||||
|  |                 del(d_snapshot[snapshot]) | ||||||
|  |  | ||||||
|  |         # Update and print new list | ||||||
|  |         snapshot_list = new_snapshot_list | ||||||
|  |         logger.out('{}Snapshot list:{} {}'.format(logger.fmt_blue, logger.fmt_end, ' '.join(snapshot_list)), state='i') | ||||||
|  |  | ||||||
| ############################################################################### | ############################################################################### | ||||||
| # PHASE 9 - Run the daemon | # PHASE 9 - Run the daemon | ||||||
| ############################################################################### | ############################################################################### | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user