From 688d1a6ae20396660ad74d3c34319cf17f827b08 Mon Sep 17 00:00:00 2001 From: "Joshua M. Boniface" Date: Sat, 29 May 2021 20:29:51 -0400 Subject: [PATCH] Rework ceph library for new zkhandler --- daemon-common/ceph.py | 316 ++++++++++++++++++++---------------------- 1 file changed, 151 insertions(+), 165 deletions(-) diff --git a/daemon-common/ceph.py b/daemon-common/ceph.py index a62b837c..c1ce33c3 100644 --- a/daemon-common/ceph.py +++ b/daemon-common/ceph.py @@ -26,7 +26,6 @@ import time import math import daemon_lib.vm as vm -import daemon_lib.zkhandler as zkhandler import daemon_lib.common as common @@ -35,42 +34,30 @@ import daemon_lib.common as common # # Verify OSD is valid in cluster -def verifyOSD(zk_conn, osd_id): - if zkhandler.exists(zk_conn, '/ceph/osds/{}'.format(osd_id)): - return True - else: - return False +def verifyOSD(zkhandler, osd_id): + return zkhandler.exists('/ceph/osds/{}'.format(osd_id)) # Verify Pool is valid in cluster -def verifyPool(zk_conn, name): - if zkhandler.exists(zk_conn, '/ceph/pools/{}'.format(name)): - return True - else: - return False +def verifyPool(zkhandler, name): + return zkhandler.exists('/ceph/pools/{}'.format(name)) # Verify Volume is valid in cluster -def verifyVolume(zk_conn, pool, name): - if zkhandler.exists(zk_conn, '/ceph/volumes/{}/{}'.format(pool, name)): - return True - else: - return False +def verifyVolume(zkhandler, pool, name): + return zkhandler.exists('/ceph/volumes/{}/{}'.format(pool, name)) # Verify Snapshot is valid in cluster -def verifySnapshot(zk_conn, pool, volume, name): - if zkhandler.exists(zk_conn, '/ceph/snapshots/{}/{}/{}'.format(pool, volume, name)): - return True - else: - return False +def verifySnapshot(zkhandler, pool, volume, name): + return zkhandler.exists('/ceph/snapshots/{}/{}/{}'.format(pool, volume, name)) # Verify OSD path is valid in cluster -def verifyOSDBlock(zk_conn, node, device): - for osd in zkhandler.listchildren(zk_conn, '/ceph/osds'): - osd_node = zkhandler.readdata(zk_conn, '/ceph/osds/{}/node'.format(osd)) - osd_device = zkhandler.readdata(zk_conn, '/ceph/osds/{}/device'.format(osd)) +def verifyOSDBlock(zkhandler, node, device): + for osd in zkhandler.children('/ceph/osds'): + osd_node = zkhandler.read('/ceph/osds/{}/node'.format(osd)) + osd_device = zkhandler.read('/ceph/osds/{}/device'.format(osd)) if node == osd_node and device == osd_device: return osd return None @@ -156,9 +143,9 @@ def format_pct_tohuman(datapct): # # Status functions # -def get_status(zk_conn): - primary_node = zkhandler.readdata(zk_conn, '/primary_node') - ceph_status = zkhandler.readdata(zk_conn, '/ceph').rstrip() +def get_status(zkhandler): + primary_node = zkhandler.read('/primary_node') + ceph_status = zkhandler.read('/ceph').rstrip() # Create a data structure for the information status_data = { @@ -169,9 +156,9 @@ def get_status(zk_conn): return True, status_data -def get_util(zk_conn): - primary_node = zkhandler.readdata(zk_conn, '/primary_node') - ceph_df = zkhandler.readdata(zk_conn, '/ceph/util').rstrip() +def get_util(zkhandler): + primary_node = zkhandler.read('/primary_node') + ceph_df = zkhandler.read('/ceph/util').rstrip() # Create a data structure for the information status_data = { @@ -185,15 +172,14 @@ def get_util(zk_conn): # # OSD functions # -def getClusterOSDList(zk_conn): +def getClusterOSDList(zkhandler): # Get a list of VNIs by listing the children of /networks - osd_list = zkhandler.listchildren(zk_conn, '/ceph/osds') - return osd_list + return zkhandler.children('/ceph/osds') -def getOSDInformation(zk_conn, osd_id): +def getOSDInformation(zkhandler, osd_id): # Parse the stats data - osd_stats_raw = zkhandler.readdata(zk_conn, '/ceph/osds/{}/stats'.format(osd_id)) + osd_stats_raw = zkhandler.read('/ceph/osds/{}/stats'.format(osd_id)) osd_stats = dict(json.loads(osd_stats_raw)) osd_information = { @@ -205,26 +191,26 @@ def getOSDInformation(zk_conn, osd_id): # OSD addition and removal uses the /cmd/ceph pipe # These actions must occur on the specific node they reference -def add_osd(zk_conn, node, device, weight): +def add_osd(zkhandler, node, device, weight): # Verify the target node exists - if not common.verifyNode(zk_conn, node): + if not common.verifyNode(zkhandler, node): return False, 'ERROR: No node named "{}" is present in the cluster.'.format(node) # Verify target block device isn't in use - block_osd = verifyOSDBlock(zk_conn, node, device) + block_osd = verifyOSDBlock(zkhandler, node, device) if block_osd: return False, 'ERROR: Block device "{}" on node "{}" is used by OSD "{}"'.format(device, node, block_osd) # Tell the cluster to create a new OSD for the host add_osd_string = 'osd_add {},{},{}'.format(node, device, weight) - zkhandler.writedata(zk_conn, {'/cmd/ceph': add_osd_string}) + zkhandler.write([('/cmd/ceph', add_osd_string)]) # Wait 1/2 second for the cluster to get the message and start working time.sleep(0.5) # Acquire a read lock, so we get the return exclusively - lock = zkhandler.readlock(zk_conn, '/cmd/ceph') + lock = zkhandler.readlock('/cmd/ceph') with lock: try: - result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0] + result = zkhandler.read('/cmd/ceph').split()[0] if result == 'success-osd_add': message = 'Created new OSD with block device "{}" on node "{}".'.format(device, node) success = True @@ -236,28 +222,28 @@ def add_osd(zk_conn, node, device, weight): success = False # Acquire a write lock to ensure things go smoothly - lock = zkhandler.writelock(zk_conn, '/cmd/ceph') + lock = zkhandler.writelock('/cmd/ceph') with lock: time.sleep(0.5) - zkhandler.writedata(zk_conn, {'/cmd/ceph': ''}) + zkhandler.write([('/cmd/ceph', '')]) return success, message -def remove_osd(zk_conn, osd_id): - if not verifyOSD(zk_conn, osd_id): +def remove_osd(zkhandler, osd_id): + if not verifyOSD(zkhandler, osd_id): return False, 'ERROR: No OSD with ID "{}" is present in the cluster.'.format(osd_id) # Tell the cluster to remove an OSD remove_osd_string = 'osd_remove {}'.format(osd_id) - zkhandler.writedata(zk_conn, {'/cmd/ceph': remove_osd_string}) + zkhandler.write([('/cmd/ceph', remove_osd_string)]) # Wait 1/2 second for the cluster to get the message and start working time.sleep(0.5) # Acquire a read lock, so we get the return exclusively - lock = zkhandler.readlock(zk_conn, '/cmd/ceph') + lock = zkhandler.readlock('/cmd/ceph') with lock: try: - result = zkhandler.readdata(zk_conn, '/cmd/ceph').split()[0] + result = zkhandler.read('/cmd/ceph').split()[0] if result == 'success-osd_remove': message = 'Removed OSD "{}" from the cluster.'.format(osd_id) success = True @@ -269,16 +255,16 @@ def remove_osd(zk_conn, osd_id): message = 'ERROR Command ignored by node.' # Acquire a write lock to ensure things go smoothly - lock = zkhandler.writelock(zk_conn, '/cmd/ceph') + lock = zkhandler.writelock('/cmd/ceph') with lock: time.sleep(0.5) - zkhandler.writedata(zk_conn, {'/cmd/ceph': ''}) + zkhandler.write([('/cmd/ceph', '')]) return success, message -def in_osd(zk_conn, osd_id): - if not verifyOSD(zk_conn, osd_id): +def in_osd(zkhandler, osd_id): + if not verifyOSD(zkhandler, osd_id): return False, 'ERROR: No OSD with ID "{}" is present in the cluster.'.format(osd_id) retcode, stdout, stderr = common.run_os_command('ceph osd in {}'.format(osd_id)) @@ -288,8 +274,8 @@ def in_osd(zk_conn, osd_id): return True, 'Set OSD {} online.'.format(osd_id) -def out_osd(zk_conn, osd_id): - if not verifyOSD(zk_conn, osd_id): +def out_osd(zkhandler, osd_id): + if not verifyOSD(zkhandler, osd_id): return False, 'ERROR: No OSD with ID "{}" is present in the cluster.'.format(osd_id) retcode, stdout, stderr = common.run_os_command('ceph osd out {}'.format(osd_id)) @@ -299,7 +285,7 @@ def out_osd(zk_conn, osd_id): return True, 'Set OSD {} offline.'.format(osd_id) -def set_osd(zk_conn, option): +def set_osd(zkhandler, option): retcode, stdout, stderr = common.run_os_command('ceph osd set {}'.format(option)) if retcode: return False, 'ERROR: Failed to set property "{}": {}'.format(option, stderr) @@ -307,7 +293,7 @@ def set_osd(zk_conn, option): return True, 'Set OSD property "{}".'.format(option) -def unset_osd(zk_conn, option): +def unset_osd(zkhandler, option): retcode, stdout, stderr = common.run_os_command('ceph osd unset {}'.format(option)) if retcode: return False, 'ERROR: Failed to unset property "{}": {}'.format(option, stderr) @@ -315,9 +301,9 @@ def unset_osd(zk_conn, option): return True, 'Unset OSD property "{}".'.format(option) -def get_list_osd(zk_conn, limit, is_fuzzy=True): +def get_list_osd(zkhandler, limit, is_fuzzy=True): osd_list = [] - full_osd_list = zkhandler.listchildren(zk_conn, '/ceph/osds') + full_osd_list = zkhandler.children('/ceph/osds') if is_fuzzy and limit: # Implicitly assume fuzzy limits @@ -330,11 +316,11 @@ def get_list_osd(zk_conn, limit, is_fuzzy=True): if limit: try: if re.match(limit, osd): - osd_list.append(getOSDInformation(zk_conn, osd)) + osd_list.append(getOSDInformation(zkhandler, osd)) except Exception as e: return False, 'Regex Error: {}'.format(e) else: - osd_list.append(getOSDInformation(zk_conn, osd)) + osd_list.append(getOSDInformation(zkhandler, osd)) return True, sorted(osd_list, key=lambda x: int(x['id'])) @@ -342,11 +328,11 @@ def get_list_osd(zk_conn, limit, is_fuzzy=True): # # Pool functions # -def getPoolInformation(zk_conn, pool): +def getPoolInformation(zkhandler, pool): # Parse the stats data - pool_stats_raw = zkhandler.readdata(zk_conn, '/ceph/pools/{}/stats'.format(pool)) + pool_stats_raw = zkhandler.read('/ceph/pools/{}/stats'.format(pool)) pool_stats = dict(json.loads(pool_stats_raw)) - volume_count = len(getCephVolumes(zk_conn, pool)) + volume_count = len(getCephVolumes(zkhandler, pool)) pool_information = { 'name': pool, @@ -356,7 +342,7 @@ def getPoolInformation(zk_conn, pool): return pool_information -def add_pool(zk_conn, name, pgs, replcfg): +def add_pool(zkhandler, name, pgs, replcfg): # Prepare the copies/mincopies variables try: copies, mincopies = replcfg.split(',') @@ -388,24 +374,24 @@ def add_pool(zk_conn, name, pgs, replcfg): return False, 'ERROR: Failed to enable RBD application on pool "{}" : {}'.format(name, stderr) # 4. Add the new pool to Zookeeper - zkhandler.writedata(zk_conn, { - '/ceph/pools/{}'.format(name): '', - '/ceph/pools/{}/pgs'.format(name): pgs, - '/ceph/pools/{}/stats'.format(name): '{}', - '/ceph/volumes/{}'.format(name): '', - '/ceph/snapshots/{}'.format(name): '', - }) + zkhandler.write([ + ('/ceph/pools/{}'.format(name), ''), + ('/ceph/pools/{}/pgs'.format(name), pgs), + ('/ceph/pools/{}/stats'.format(name), '{}'), + ('/ceph/volumes/{}'.format(name), ''), + ('/ceph/snapshots/{}'.format(name), ''), + ]) return True, 'Created RBD pool "{}" with {} PGs'.format(name, pgs) -def remove_pool(zk_conn, name): - if not verifyPool(zk_conn, name): +def remove_pool(zkhandler, name): + if not verifyPool(zkhandler, name): return False, 'ERROR: No pool with name "{}" is present in the cluster.'.format(name) # 1. Remove pool volumes - for volume in zkhandler.listchildren(zk_conn, '/ceph/volumes/{}'.format(name)): - remove_volume(zk_conn, name, volume) + for volume in zkhandler.children('/ceph/volumes/{}'.format(name)): + remove_volume(zkhandler, name, volume) # 2. Remove the pool retcode, stdout, stderr = common.run_os_command('ceph osd pool rm {pool} {pool} --yes-i-really-really-mean-it'.format(pool=name)) @@ -413,16 +399,16 @@ def remove_pool(zk_conn, name): return False, 'ERROR: Failed to remove pool "{}": {}'.format(name, stderr) # 3. Delete pool from Zookeeper - zkhandler.deletekey(zk_conn, '/ceph/pools/{}'.format(name)) - zkhandler.deletekey(zk_conn, '/ceph/volumes/{}'.format(name)) - zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}'.format(name)) + zkhandler.delete('/ceph/pools/{}'.format(name)) + zkhandler.delete('/ceph/volumes/{}'.format(name)) + zkhandler.delete('/ceph/snapshots/{}'.format(name)) return True, 'Removed RBD pool "{}" and all volumes.'.format(name) -def get_list_pool(zk_conn, limit, is_fuzzy=True): +def get_list_pool(zkhandler, limit, is_fuzzy=True): pool_list = [] - full_pool_list = zkhandler.listchildren(zk_conn, '/ceph/pools') + full_pool_list = zkhandler.children('/ceph/pools') if limit: if not is_fuzzy: @@ -432,11 +418,11 @@ def get_list_pool(zk_conn, limit, is_fuzzy=True): if limit: try: if re.match(limit, pool): - pool_list.append(getPoolInformation(zk_conn, pool)) + pool_list.append(getPoolInformation(zkhandler, pool)) except Exception as e: return False, 'Regex Error: {}'.format(e) else: - pool_list.append(getPoolInformation(zk_conn, pool)) + pool_list.append(getPoolInformation(zkhandler, pool)) return True, sorted(pool_list, key=lambda x: int(x['stats']['id'])) @@ -444,23 +430,23 @@ def get_list_pool(zk_conn, limit, is_fuzzy=True): # # Volume functions # -def getCephVolumes(zk_conn, pool): +def getCephVolumes(zkhandler, pool): volume_list = list() if not pool: - pool_list = zkhandler.listchildren(zk_conn, '/ceph/pools') + pool_list = zkhandler.children('/ceph/pools') else: pool_list = [pool] for pool_name in pool_list: - for volume_name in zkhandler.listchildren(zk_conn, '/ceph/volumes/{}'.format(pool_name)): + for volume_name in zkhandler.children('/ceph/volumes/{}'.format(pool_name)): volume_list.append('{}/{}'.format(pool_name, volume_name)) return volume_list -def getVolumeInformation(zk_conn, pool, volume): +def getVolumeInformation(zkhandler, pool, volume): # Parse the stats data - volume_stats_raw = zkhandler.readdata(zk_conn, '/ceph/volumes/{}/{}/stats'.format(pool, volume)) + volume_stats_raw = zkhandler.read('/ceph/volumes/{}/{}/stats'.format(pool, volume)) volume_stats = dict(json.loads(volume_stats_raw)) # Format the size to something nicer volume_stats['size'] = format_bytes_tohuman(volume_stats['size']) @@ -473,9 +459,9 @@ def getVolumeInformation(zk_conn, pool, volume): return volume_information -def add_volume(zk_conn, pool, name, size): +def add_volume(zkhandler, pool, name, size): # 1. Verify the size of the volume - pool_information = getPoolInformation(zk_conn, pool) + pool_information = getPoolInformation(zkhandler, pool) size_bytes = format_bytes_fromhuman(size) if size_bytes >= int(pool_information['stats']['free_bytes']): return False, 'ERROR: Requested volume size is greater than the available free space in the pool' @@ -494,17 +480,17 @@ def add_volume(zk_conn, pool, name, size): volstats = stdout # 3. Add the new volume to Zookeeper - zkhandler.writedata(zk_conn, { - '/ceph/volumes/{}/{}'.format(pool, name): '', - '/ceph/volumes/{}/{}/stats'.format(pool, name): volstats, - '/ceph/snapshots/{}/{}'.format(pool, name): '', - }) + zkhandler.write([ + ('/ceph/volumes/{}/{}'.format(pool, name), ''), + ('/ceph/volumes/{}/{}/stats'.format(pool, name), volstats), + ('/ceph/snapshots/{}/{}'.format(pool, name), ''), + ]) return True, 'Created RBD volume "{}/{}" ({}).'.format(pool, name, size) -def clone_volume(zk_conn, pool, name_src, name_new): - if not verifyVolume(zk_conn, pool, name_src): +def clone_volume(zkhandler, pool, name_src, name_new): + if not verifyVolume(zkhandler, pool, name_src): return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name_src, pool) # 1. Clone the volume @@ -517,17 +503,17 @@ def clone_volume(zk_conn, pool, name_src, name_new): volstats = stdout # 3. Add the new volume to Zookeeper - zkhandler.writedata(zk_conn, { - '/ceph/volumes/{}/{}'.format(pool, name_new): '', - '/ceph/volumes/{}/{}/stats'.format(pool, name_new): volstats, - '/ceph/snapshots/{}/{}'.format(pool, name_new): '', - }) + zkhandler.write([ + ('/ceph/volumes/{}/{}'.format(pool, name_new), ''), + ('/ceph/volumes/{}/{}/stats'.format(pool, name_new), volstats), + ('/ceph/snapshots/{}/{}'.format(pool, name_new), ''), + ]) return True, 'Cloned RBD volume "{}" to "{}" in pool "{}"'.format(name_src, name_new, pool) -def resize_volume(zk_conn, pool, name, size): - if not verifyVolume(zk_conn, pool, name): +def resize_volume(zkhandler, pool, name, size): + if not verifyVolume(zkhandler, pool, name): return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool) # 1. Resize the volume @@ -538,7 +524,7 @@ def resize_volume(zk_conn, pool, name, size): # 2a. Determine the node running this VM if applicable active_node = None volume_vm_name = name.split('_')[0] - retcode, vm_info = vm.get_info(zk_conn, volume_vm_name) + retcode, vm_info = vm.get_info(zkhandler, volume_vm_name) if retcode: for disk in vm_info['disks']: # This block device is present in this VM so we can continue @@ -564,17 +550,17 @@ def resize_volume(zk_conn, pool, name, size): volstats = stdout # 3. Add the new volume to Zookeeper - zkhandler.writedata(zk_conn, { - '/ceph/volumes/{}/{}'.format(pool, name): '', - '/ceph/volumes/{}/{}/stats'.format(pool, name): volstats, - '/ceph/snapshots/{}/{}'.format(pool, name): '', - }) + zkhandler.write([ + ('/ceph/volumes/{}/{}'.format(pool, name), ''), + ('/ceph/volumes/{}/{}/stats'.format(pool, name), volstats), + ('/ceph/snapshots/{}/{}'.format(pool, name), ''), + ]) return True, 'Resized RBD volume "{}" to size "{}" in pool "{}".'.format(name, size, pool) -def rename_volume(zk_conn, pool, name, new_name): - if not verifyVolume(zk_conn, pool, name): +def rename_volume(zkhandler, pool, name, new_name): + if not verifyVolume(zkhandler, pool, name): return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool) # 1. Rename the volume @@ -583,30 +569,30 @@ def rename_volume(zk_conn, pool, name, new_name): return False, 'ERROR: Failed to rename volume "{}" to "{}" in pool "{}": {}'.format(name, new_name, pool, stderr) # 2. Rename the volume in Zookeeper - zkhandler.renamekey(zk_conn, { - '/ceph/volumes/{}/{}'.format(pool, name): '/ceph/volumes/{}/{}'.format(pool, new_name), - '/ceph/snapshots/{}/{}'.format(pool, name): '/ceph/snapshots/{}/{}'.format(pool, new_name) - }) + zkhandler.rename([ + ('/ceph/volumes/{}/{}'.format(pool, name), '/ceph/volumes/{}/{}'.format(pool, new_name)), + ('/ceph/snapshots/{}/{}'.format(pool, name), '/ceph/snapshots/{}/{}'.format(pool, new_name)) + ]) # 3. Get volume stats retcode, stdout, stderr = common.run_os_command('rbd info --format json {}/{}'.format(pool, new_name)) volstats = stdout # 4. Update the volume stats in Zookeeper - zkhandler.writedata(zk_conn, { - '/ceph/volumes/{}/{}/stats'.format(pool, new_name): volstats, - }) + zkhandler.write([ + ('/ceph/volumes/{}/{}/stats'.format(pool, new_name), volstats) + ]) return True, 'Renamed RBD volume "{}" to "{}" in pool "{}".'.format(name, new_name, pool) -def remove_volume(zk_conn, pool, name): - if not verifyVolume(zk_conn, pool, name): +def remove_volume(zkhandler, pool, name): + if not verifyVolume(zkhandler, pool, name): return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool) # 1. Remove volume snapshots - for snapshot in zkhandler.listchildren(zk_conn, '/ceph/snapshots/{}/{}'.format(pool, name)): - remove_snapshot(zk_conn, pool, name, snapshot) + for snapshot in zkhandler.children('/ceph/snapshots/{}/{}'.format(pool, name)): + remove_snapshot(zkhandler, pool, name, snapshot) # 2. Remove the volume retcode, stdout, stderr = common.run_os_command('rbd rm {}/{}'.format(pool, name)) @@ -614,14 +600,14 @@ def remove_volume(zk_conn, pool, name): return False, 'ERROR: Failed to remove RBD volume "{}" in pool "{}": {}'.format(name, pool, stderr) # 3. Delete volume from Zookeeper - zkhandler.deletekey(zk_conn, '/ceph/volumes/{}/{}'.format(pool, name)) - zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}'.format(pool, name)) + zkhandler.delete('/ceph/volumes/{}/{}'.format(pool, name)) + zkhandler.delete('/ceph/snapshots/{}/{}'.format(pool, name)) return True, 'Removed RBD volume "{}" in pool "{}".'.format(name, pool) -def map_volume(zk_conn, pool, name): - if not verifyVolume(zk_conn, pool, name): +def map_volume(zkhandler, pool, name): + if not verifyVolume(zkhandler, pool, name): return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool) # 1. Map the volume onto the local system @@ -639,8 +625,8 @@ def map_volume(zk_conn, pool, name): return True, mapped_volume -def unmap_volume(zk_conn, pool, name): - if not verifyVolume(zk_conn, pool, name): +def unmap_volume(zkhandler, pool, name): + if not verifyVolume(zkhandler, pool, name): return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(name, pool) mapped_volume = '/dev/rbd/{}/{}'.format(pool, name) @@ -657,12 +643,12 @@ def unmap_volume(zk_conn, pool, name): return True, 'Unmapped RBD volume at "{}".'.format(mapped_volume) -def get_list_volume(zk_conn, pool, limit, is_fuzzy=True): +def get_list_volume(zkhandler, pool, limit, is_fuzzy=True): volume_list = [] - if pool and not verifyPool(zk_conn, pool): + if pool and not verifyPool(zkhandler, pool): return False, 'ERROR: No pool with name "{}" is present in the cluster.'.format(pool) - full_volume_list = getCephVolumes(zk_conn, pool) + full_volume_list = getCephVolumes(zkhandler, pool) if limit: if not is_fuzzy: @@ -679,11 +665,11 @@ def get_list_volume(zk_conn, pool, limit, is_fuzzy=True): if limit: try: if re.match(limit, volume_name): - volume_list.append(getVolumeInformation(zk_conn, pool_name, volume_name)) + volume_list.append(getVolumeInformation(zkhandler, pool_name, volume_name)) except Exception as e: return False, 'Regex Error: {}'.format(e) else: - volume_list.append(getVolumeInformation(zk_conn, pool_name, volume_name)) + volume_list.append(getVolumeInformation(zkhandler, pool_name, volume_name)) return True, sorted(volume_list, key=lambda x: str(x['name'])) @@ -691,11 +677,11 @@ def get_list_volume(zk_conn, pool, limit, is_fuzzy=True): # # Snapshot functions # -def getCephSnapshots(zk_conn, pool, volume): +def getCephSnapshots(zkhandler, pool, volume): snapshot_list = list() volume_list = list() - volume_list = getCephVolumes(zk_conn, pool) + volume_list = getCephVolumes(zkhandler, pool) if volume: for volume_entry in volume_list: volume_pool, volume_name = volume_entry.split('/') @@ -703,14 +689,14 @@ def getCephSnapshots(zk_conn, pool, volume): volume_list = ['{}/{}'.format(volume_pool, volume_name)] for volume_entry in volume_list: - for snapshot_name in zkhandler.listchildren(zk_conn, '/ceph/snapshots/{}'.format(volume_entry)): + for snapshot_name in zkhandler.children('/ceph/snapshots/{}'.format(volume_entry)): snapshot_list.append('{}@{}'.format(volume_entry, snapshot_name)) return snapshot_list -def add_snapshot(zk_conn, pool, volume, name): - if not verifyVolume(zk_conn, pool, volume): +def add_snapshot(zkhandler, pool, volume, name): + if not verifyVolume(zkhandler, pool, volume): return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(volume, pool) # 1. Create the snapshot @@ -719,28 +705,28 @@ def add_snapshot(zk_conn, pool, volume, name): return False, 'ERROR: Failed to create RBD snapshot "{}" of volume "{}" in pool "{}": {}'.format(name, volume, pool, stderr) # 2. Add the snapshot to Zookeeper - zkhandler.writedata(zk_conn, { - '/ceph/snapshots/{}/{}/{}'.format(pool, volume, name): '', - '/ceph/snapshots/{}/{}/{}/stats'.format(pool, volume, name): '{}' - }) + zkhandler.write([ + ('/ceph/snapshots/{}/{}/{}'.format(pool, volume, name), ''), + ('/ceph/snapshots/{}/{}/{}/stats'.format(pool, volume, name), '{}') + ]) # 3. Update the count of snapshots on this volume - volume_stats_raw = zkhandler.readdata(zk_conn, '/ceph/volumes/{}/{}/stats'.format(pool, volume)) + volume_stats_raw = zkhandler.read('/ceph/volumes/{}/{}/stats'.format(pool, volume)) volume_stats = dict(json.loads(volume_stats_raw)) # Format the size to something nicer volume_stats['snapshot_count'] = volume_stats['snapshot_count'] + 1 volume_stats_raw = json.dumps(volume_stats) - zkhandler.writedata(zk_conn, { - '/ceph/volumes/{}/{}/stats'.format(pool, volume): volume_stats_raw - }) + zkhandler.write([ + ('/ceph/volumes/{}/{}/stats'.format(pool, volume), volume_stats_raw) + ]) return True, 'Created RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool) -def rename_snapshot(zk_conn, pool, volume, name, new_name): - if not verifyVolume(zk_conn, pool, volume): +def rename_snapshot(zkhandler, pool, volume, name, new_name): + if not verifyVolume(zkhandler, pool, volume): return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(volume, pool) - if not verifySnapshot(zk_conn, pool, volume, name): + if not verifySnapshot(zkhandler, pool, volume, name): return False, 'ERROR: No snapshot with name "{}" is present for volume "{}" in pool "{}".'.format(name, volume, pool) # 1. Rename the snapshot @@ -749,17 +735,17 @@ def rename_snapshot(zk_conn, pool, volume, name, new_name): return False, 'ERROR: Failed to rename RBD snapshot "{}" to "{}" for volume "{}" in pool "{}": {}'.format(name, new_name, volume, pool, stderr) # 2. Rename the snapshot in ZK - zkhandler.renamekey(zk_conn, { - '/ceph/snapshots/{}/{}/{}'.format(pool, volume, name): '/ceph/snapshots/{}/{}/{}'.format(pool, volume, new_name) - }) + zkhandler.rename([ + ('/ceph/snapshots/{}/{}/{}'.format(pool, volume, name), '/ceph/snapshots/{}/{}/{}'.format(pool, volume, new_name)) + ]) return True, 'Renamed RBD snapshot "{}" to "{}" for volume "{}" in pool "{}".'.format(name, new_name, volume, pool) -def remove_snapshot(zk_conn, pool, volume, name): - if not verifyVolume(zk_conn, pool, volume): +def remove_snapshot(zkhandler, pool, volume, name): + if not verifyVolume(zkhandler, pool, volume): return False, 'ERROR: No volume with name "{}" is present in pool "{}".'.format(volume, pool) - if not verifySnapshot(zk_conn, pool, volume, name): + if not verifySnapshot(zkhandler, pool, volume, name): return False, 'ERROR: No snapshot with name "{}" is present of volume {} in pool {}.'.format(name, volume, pool) # 1. Remove the snapshot @@ -768,30 +754,30 @@ def remove_snapshot(zk_conn, pool, volume, name): return False, 'Failed to remove RBD snapshot "{}" of volume "{}" in pool "{}": {}'.format(name, volume, pool, stderr) # 2. Delete snapshot from Zookeeper - zkhandler.deletekey(zk_conn, '/ceph/snapshots/{}/{}/{}'.format(pool, volume, name)) + zkhandler.delete('/ceph/snapshots/{}/{}/{}'.format(pool, volume, name)) # 3. Update the count of snapshots on this volume - volume_stats_raw = zkhandler.readdata(zk_conn, '/ceph/volumes/{}/{}/stats'.format(pool, volume)) + volume_stats_raw = zkhandler.read('/ceph/volumes/{}/{}/stats'.format(pool, volume)) volume_stats = dict(json.loads(volume_stats_raw)) # Format the size to something nicer volume_stats['snapshot_count'] = volume_stats['snapshot_count'] - 1 volume_stats_raw = json.dumps(volume_stats) - zkhandler.writedata(zk_conn, { - '/ceph/volumes/{}/{}/stats'.format(pool, volume): volume_stats_raw - }) + zkhandler.write([ + ('/ceph/volumes/{}/{}/stats'.format(pool, volume), volume_stats_raw) + ]) return True, 'Removed RBD snapshot "{}" of volume "{}" in pool "{}".'.format(name, volume, pool) -def get_list_snapshot(zk_conn, pool, volume, limit, is_fuzzy=True): +def get_list_snapshot(zkhandler, pool, volume, limit, is_fuzzy=True): snapshot_list = [] - if pool and not verifyPool(zk_conn, pool): + if pool and not verifyPool(zkhandler, pool): return False, 'ERROR: No pool with name "{}" is present in the cluster.'.format(pool) - if volume and not verifyPool(zk_conn, volume): + if volume and not verifyPool(zkhandler, volume): return False, 'ERROR: No volume with name "{}" is present in the cluster.'.format(volume) - full_snapshot_list = getCephSnapshots(zk_conn, pool, volume) + full_snapshot_list = getCephSnapshots(zkhandler, pool, volume) if is_fuzzy and limit: # Implicitly assume fuzzy limits