From 84a905b7f08d458088570069603dd942109a933b Mon Sep 17 00:00:00 2001 From: "Joshua M. Boniface" Date: Fri, 5 Jul 2019 00:29:47 -0400 Subject: [PATCH] Refactor ceph client for API --- client-cli/pvc.py | 35 +- client-common/ceph.py | 1471 +++++++++++++++++++---------------------- 2 files changed, 714 insertions(+), 792 deletions(-) diff --git a/client-cli/pvc.py b/client-cli/pvc.py index c5ed7e4d..042a1a19 100755 --- a/client-cli/pvc.py +++ b/client-cli/pvc.py @@ -1143,8 +1143,11 @@ def ceph_status(): """ zk_conn = pvc_common.startZKConnection(zk_host) - retcode, retmsg = pvc_ceph.get_status(zk_conn) - cleanup(retcode, retmsg, zk_conn) + retcode, retdata = pvc_ceph.get_status(zk_conn) + if retdata: + pvc_ceph.format_status(retdata) + retdata = '' + cleanup(retcode, retdata, zk_conn) ############################################################################### # pvc ceph osd @@ -1303,8 +1306,11 @@ def ceph_osd_list(limit): """ zk_conn = pvc_common.startZKConnection(zk_host) - retcode, retmsg = pvc_ceph.get_list_osd(zk_conn, limit) - cleanup(retcode, retmsg, zk_conn) + retcode, retdata = pvc_ceph.get_list_osd(zk_conn, limit) + if retcode: + pvc_ceph.format_list_osd(retdata) + retdata = '' + cleanup(retcode, retdata, zk_conn) ############################################################################### # pvc ceph pool @@ -1377,8 +1383,11 @@ def ceph_pool_list(limit): """ zk_conn = pvc_common.startZKConnection(zk_host) - retcode, retmsg = pvc_ceph.get_list_pool(zk_conn, limit) - cleanup(retcode, retmsg, zk_conn) + retcode, retdata = pvc_ceph.get_list_pool(zk_conn, limit) + if retcode: + pvc_ceph.format_list_pool(retdata) + retdata = '' + cleanup(retcode, retdata, zk_conn) ############################################################################### # pvc ceph volume @@ -1460,8 +1469,11 @@ def ceph_volume_list(limit, pool): """ zk_conn = pvc_common.startZKConnection(zk_host) - retcode, retmsg = pvc_ceph.get_list_volume(zk_conn, pool, limit) - cleanup(retcode, retmsg, zk_conn) + retcode, retdata = pvc_ceph.get_list_volume(zk_conn, pool, limit) + if retcode: + pvc_ceph.format_list_volume(retdata) + retdata = '' + cleanup(retcode, retdata, zk_conn) ############################################################################### # pvc ceph volume snapshot @@ -1551,8 +1563,11 @@ def ceph_volume_snapshot_list(pool, volume, limit): """ zk_conn = pvc_common.startZKConnection(zk_host) - retcode, retmsg = pvc_ceph.get_list_snapshot(zk_conn, pool, volume, limit) - cleanup(retcode, retmsg, zk_conn) + retcode, retdata = pvc_ceph.get_list_snapshot(zk_conn, pool, volume, limit) + if retcode: + pvc_ceph.format_list_snapshot(retdata) + retdata = '' + cleanup(retcode, retdata, zk_conn) ############################################################################### diff --git a/client-common/ceph.py b/client-common/ceph.py index 6d64a71c..e4315cd4 100644 --- a/client-common/ceph.py +++ b/client-common/ceph.py @@ -103,17 +103,21 @@ def format_bytes_fromhuman(datahuman): # # Status functions # -def getCephStatus(zk_conn): - status_data = zkhandler.readdata(zk_conn, '/ceph').rstrip() - primary_node = zkhandler.readdata(zk_conn, '/primary_node') - return status_data, primary_node - def get_status(zk_conn): - status_data, primary_node = getCephStatus(zk_conn) - click.echo('{bold}Ceph cluster status (primary node {end}{blue}{primary}{end}{bold}){end}\n'.format(bold=ansiprint.bold(), end=ansiprint.end(), blue=ansiprint.blue(), primary=primary_node)) - click.echo(status_data) + primary_node = zkhandler.readdata(zk_conn, '/primary_node') + ceph_status = zkhandler.readdata(zk_conn, '/ceph').rstrip() + + # Create a data structure for the information + status_data = { + 'primary_node': primary_node, + 'ceph_status': ceph_status + } + return status_data + +def format_status(status_data): + click.echo('{bold}Ceph cluster status (primary node {end}{blue}{primary}{end}{bold}){end}\n'.format(bold=ansiprint.bold(), end=ansiprint.end(), blue=ansiprint.blue(), primary=status_data['primary_node'])) + click.echo(status_data['ceph_status']) click.echo('') - return True, '' # @@ -132,298 +136,31 @@ def getOSDInformation(zk_conn, osd_id): databytes = osd_stats['kb'] * 1024 databytes_formatted = format_bytes_tohuman(databytes) osd_stats['size'] = databytes_formatted - return osd_stats -def getCephOSDs(zk_conn): - osd_list = zkhandler.listchildren(zk_conn, '/ceph/osds') - return osd_list + osd_information = { + 'id': osd_id, + 'stats': osd_stats + } + return osd_information -def formatOSDList(zk_conn, osd_list): - osd_list_output = [] +def getOutputColoursOSD(osd_information): + # Set the UP status + if osd_information['stats']['up'] == 1: + osd_up_flag = 'Yes' + osd_up_colour = ansiprint.green() + else: + osd_up_flag = 'No' + osd_up_colour = ansiprint.red() - osd_uuid = dict() - osd_up = dict() - osd_up_colour = dict() - osd_in = dict() - osd_in_colour = dict() - osd_size = dict() - osd_weight = dict() - osd_reweight = dict() - osd_pgs = dict() - osd_node = dict() - osd_used = dict() - osd_free = dict() - osd_util = dict() - osd_var= dict() - osd_wrops = dict() - osd_wrdata = dict() - osd_rdops = dict() - osd_rddata = dict() + # Set the IN status + if osd_information['stats']['in'] == 1: + osd_in = 'Yes' + osd_in_colour = ansiprint.green() + else: + osd_in = 'No' + osd_in_colour = ansiprint.red() - osd_id_length = 3 - osd_up_length = 4 - osd_in_length = 4 - osd_size_length = 5 - osd_weight_length = 3 - osd_reweight_length = 5 - osd_pgs_length = 4 - osd_node_length = 5 - osd_used_length = 5 - osd_free_length = 6 - osd_util_length = 6 - osd_var_length = 6 - osd_wrops_length = 4 - osd_wrdata_length = 5 - osd_rdops_length = 4 - osd_rddata_length = 5 - - for osd in osd_list: - # Set the OSD ID length - _osd_id_length = len(osd) + 1 - if _osd_id_length > osd_id_length: - osd_id_length = _osd_id_length - - # Get stats - osd_stats = getOSDInformation(zk_conn, osd) - - # Set the parent node and length - try: - osd_node[osd] = osd_stats['node'] - # If this happens, the node hasn't checked in fully yet, so just ignore it - if osd_node[osd] == '|': - continue - except KeyError: - continue - - _osd_node_length = len(osd_node[osd]) + 1 - if _osd_node_length > osd_node_length: - osd_node_length = _osd_node_length - - # Set the UP status - if osd_stats['up'] == 1: - osd_up[osd] = 'Yes' - osd_up_colour[osd] = ansiprint.green() - else: - osd_up[osd] = 'No' - osd_up_colour[osd] = ansiprint.red() - - # Set the IN status - if osd_stats['in'] == 1: - osd_in[osd] = 'Yes' - osd_in_colour[osd] = ansiprint.green() - else: - osd_in[osd] = 'No' - osd_in_colour[osd] = ansiprint.red() - - # Set the size and length - osd_size[osd] = osd_stats['size'] - _osd_size_length = len(str(osd_size[osd])) + 1 - if _osd_size_length > osd_size_length: - osd_size_length = _osd_size_length - - # Set the weight and length - osd_weight[osd] = osd_stats['weight'] - _osd_weight_length = len(str(osd_weight[osd])) + 1 - if _osd_weight_length > osd_weight_length: - osd_weight_length = _osd_weight_length - - # Set the reweight and length - osd_reweight[osd] = osd_stats['reweight'] - _osd_reweight_length = len(str(osd_reweight[osd])) + 1 - if _osd_reweight_length > osd_reweight_length: - osd_reweight_length = _osd_reweight_length - - # Set the pgs and length - osd_pgs[osd] = osd_stats['pgs'] - _osd_pgs_length = len(str(osd_pgs[osd])) + 1 - if _osd_pgs_length > osd_pgs_length: - osd_pgs_length = _osd_pgs_length - - # Set the used/available/utlization%/variance and lengths - osd_used[osd] = osd_stats['used'] - _osd_used_length = len(osd_used[osd]) + 1 - if _osd_used_length > osd_used_length: - osd_used_length = _osd_used_length - osd_free[osd] = osd_stats['avail'] - _osd_free_length = len(osd_free[osd]) + 1 - if _osd_free_length > osd_free_length: - osd_free_length = _osd_free_length - osd_util[osd] = round(osd_stats['utilization'], 2) - _osd_util_length = len(str(osd_util[osd])) + 1 - if _osd_util_length > osd_util_length: - osd_util_length = _osd_util_length - osd_var[osd] = round(osd_stats['var'], 2) - _osd_var_length = len(str(osd_var[osd])) + 1 - if _osd_var_length > osd_var_length: - osd_var_length = _osd_var_length - - # Set the write IOPS/data and length - osd_wrops[osd] = osd_stats['wr_ops'] - _osd_wrops_length = len(osd_wrops[osd]) + 1 - if _osd_wrops_length > osd_wrops_length: - osd_wrops_length = _osd_wrops_length - osd_wrdata[osd] = osd_stats['wr_data'] - _osd_wrdata_length = len(osd_wrdata[osd]) + 1 - if _osd_wrdata_length > osd_wrdata_length: - osd_wrdata_length = _osd_wrdata_length - - # Set the read IOPS/data and length - osd_rdops[osd] = osd_stats['rd_ops'] - _osd_rdops_length = len(osd_rdops[osd]) + 1 - if _osd_rdops_length > osd_rdops_length: - osd_rdops_length = _osd_rdops_length - osd_rddata[osd] = osd_stats['rd_data'] - _osd_rddata_length = len(osd_rddata[osd]) + 1 - if _osd_rddata_length > osd_rddata_length: - osd_rddata_length = _osd_rddata_length - - # Format the output header - osd_list_output_header = '{bold}\ -{osd_id: <{osd_id_length}} \ -{osd_node: <{osd_node_length}} \ -{osd_up: <{osd_up_length}} \ -{osd_in: <{osd_in_length}} \ -{osd_size: <{osd_size_length}} \ -{osd_pgs: <{osd_pgs_length}} \ -{osd_weight: <{osd_weight_length}} \ -{osd_reweight: <{osd_reweight_length}} \ -Sp: {osd_used: <{osd_used_length}} \ -{osd_free: <{osd_free_length}} \ -{osd_util: <{osd_util_length}} \ -{osd_var: <{osd_var_length}} \ -Rd: {osd_rdops: <{osd_rdops_length}} \ -{osd_rddata: <{osd_rddata_length}} \ -Wr: {osd_wrops: <{osd_wrops_length}} \ -{osd_wrdata: <{osd_wrdata_length}} \ -{end_bold}'.format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - osd_id_length=osd_id_length, - osd_node_length=osd_node_length, - osd_up_length=osd_up_length, - osd_in_length=osd_in_length, - osd_size_length=osd_size_length, - osd_pgs_length=osd_pgs_length, - osd_weight_length=osd_weight_length, - osd_reweight_length=osd_reweight_length, - osd_used_length=osd_used_length, - osd_free_length=osd_free_length, - osd_util_length=osd_util_length, - osd_var_length=osd_var_length, - osd_wrops_length=osd_wrops_length, - osd_wrdata_length=osd_wrdata_length, - osd_rdops_length=osd_rdops_length, - osd_rddata_length=osd_rddata_length, - osd_id='ID', - osd_node='Node', - osd_up='Up', - osd_in='In', - osd_size='Size', - osd_pgs='PGs', - osd_weight='Wt', - osd_reweight='ReWt', - osd_used='Used', - osd_free='Free', - osd_util='Util%', - osd_var='Var', - osd_wrops='OPS', - osd_wrdata='Data', - osd_rdops='OPS', - osd_rddata='Data' - ) - - for osd in osd_list: - # Format the output header - osd_list_output.append('{bold}\ -{osd_id: <{osd_id_length}} \ -{osd_node: <{osd_node_length}} \ -{osd_up_colour}{osd_up: <{osd_up_length}}{end_colour} \ -{osd_in_colour}{osd_in: <{osd_in_length}}{end_colour} \ -{osd_size: <{osd_size_length}} \ -{osd_pgs: <{osd_pgs_length}} \ -{osd_weight: <{osd_weight_length}} \ -{osd_reweight: <{osd_reweight_length}} \ - {osd_used: <{osd_used_length}} \ -{osd_free: <{osd_free_length}} \ -{osd_util: <{osd_util_length}} \ -{osd_var: <{osd_var_length}} \ - {osd_rdops: <{osd_rdops_length}} \ -{osd_rddata: <{osd_rddata_length}} \ - {osd_wrops: <{osd_wrops_length}} \ -{osd_wrdata: <{osd_wrdata_length}} \ -{end_bold}'.format( - bold='', - end_bold='', - end_colour=ansiprint.end(), - osd_id_length=osd_id_length, - osd_node_length=osd_node_length, - osd_up_length=osd_up_length, - osd_in_length=osd_in_length, - osd_size_length=osd_size_length, - osd_pgs_length=osd_pgs_length, - osd_weight_length=osd_weight_length, - osd_reweight_length=osd_reweight_length, - osd_used_length=osd_used_length, - osd_free_length=osd_free_length, - osd_util_length=osd_util_length, - osd_var_length=osd_var_length, - osd_wrops_length=osd_wrops_length, - osd_wrdata_length=osd_wrdata_length, - osd_rdops_length=osd_rdops_length, - osd_rddata_length=osd_rddata_length, - osd_id=osd, - osd_node=osd_node[osd], - osd_up_colour=osd_up_colour[osd], - osd_up=osd_up[osd], - osd_in_colour=osd_in_colour[osd], - osd_in=osd_in[osd], - osd_size=osd_size[osd], - osd_pgs=osd_pgs[osd], - osd_weight=osd_weight[osd], - osd_reweight=osd_reweight[osd], - osd_used=osd_used[osd], - osd_free=osd_free[osd], - osd_util=osd_util[osd], - osd_var=osd_var[osd], - osd_wrops=osd_wrops[osd], - osd_wrdata=osd_wrdata[osd], - osd_rdops=osd_rdops[osd], - osd_rddata=osd_rddata[osd] - ) - ) - - output_string = osd_list_output_header + '\n' + '\n'.join(sorted(osd_list_output)) - return output_string - -def get_list_osd(zk_conn, limit): - osd_list = [] - full_osd_list = getCephOSDs(zk_conn) - - if limit: - try: - # Implicitly assume fuzzy limits - if not re.match('\^.*', limit): - limit = '.*' + limit - if not re.match('.*\$', limit): - limit = limit + '.*' - except Exception as e: - return False, 'Regex Error: {}'.format(e) - - for osd in full_osd_list: - valid_osd = False - if limit: - if re.match(limit, osd['osd_id']): - valid_osd = True - else: - valid_osd = True - - if valid_osd: - osd_list.append(osd) - - output_string = formatOSDList(zk_conn, osd_list) - click.echo(output_string) - - return True, '' + return osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour def add_osd(zk_conn, node, device, weight): # Verify the target node exists @@ -612,20 +349,257 @@ def unset_osd(zk_conn, option): return success, message +def get_list_osd(zk_conn, limit): + osd_list = [] + full_osd_list = zkhandler.listchildren(zk_conn, '/ceph/osds') + for osd in full_osd_list: + if limit: + try: + # Implicitly assume fuzzy limits + if not re.match('\^.*', limit): + limit = '.*' + limit + if not re.match('.*\$', limit): + limit = limit + '.*' + + if re.match(limit, osd): + osd_list.append(getOSDInformation(zk_conn, osd)) + except Exception as e: + return False, 'Regex Error: {}'.format(e) + else: + osd_list.append(getOSDInformation(zk_conn, osd)) + + return True, osd_list + +def format_list_osd(osd_list): + osd_list_output = [] + + osd_id_length = 3 + osd_up_length = 4 + osd_in_length = 4 + osd_size_length = 5 + osd_weight_length = 3 + osd_reweight_length = 5 + osd_pgs_length = 4 + osd_node_length = 5 + osd_used_length = 5 + osd_free_length = 6 + osd_util_length = 6 + osd_var_length = 6 + osd_wrops_length = 4 + osd_wrdata_length = 5 + osd_rdops_length = 4 + osd_rddata_length = 5 + + for osd_information in osd_list: + try: + # If this happens, the node hasn't checked in fully yet, so just ignore it + if osd_information['stats']['node'] == '|': + continue + except KeyError: + continue + + # Set the OSD ID length + _osd_id_length = len(osd_information['id']) + 1 + if _osd_id_length > osd_id_length: + osd_id_length = _osd_id_length + + _osd_node_length = len(osd_information['stats']['node']) + 1 + if _osd_node_length > osd_node_length: + osd_node_length = _osd_node_length + + # Set the size and length + _osd_size_length = len(str(osd_information['stats']['size'])) + 1 + if _osd_size_length > osd_size_length: + osd_size_length = _osd_size_length + + # Set the weight and length + _osd_weight_length = len(str(osd_information['stats']['weight'])) + 1 + if _osd_weight_length > osd_weight_length: + osd_weight_length = _osd_weight_length + + # Set the reweight and length + _osd_reweight_length = len(str(osd_information['stats']['reweight'])) + 1 + if _osd_reweight_length > osd_reweight_length: + osd_reweight_length = _osd_reweight_length + + # Set the pgs and length + _osd_pgs_length = len(str(osd_information['stats']['pgs'])) + 1 + if _osd_pgs_length > osd_pgs_length: + osd_pgs_length = _osd_pgs_length + + # Set the used/available/utlization%/variance and lengths + _osd_used_length = len(osd_information['stats']['used']) + 1 + if _osd_used_length > osd_used_length: + osd_used_length = _osd_used_length + + _osd_free_length = len(osd_information['stats']['avail']) + 1 + if _osd_free_length > osd_free_length: + osd_free_length = _osd_free_length + + osd_util = round(osd_information['stats']['utilization'], 2) + _osd_util_length = len(str(osd_util)) + 1 + if _osd_util_length > osd_util_length: + osd_util_length = _osd_util_length + + osd_var = round(osd_information['stats']['var'], 2) + _osd_var_length = len(str(osd_var)) + 1 + if _osd_var_length > osd_var_length: + osd_var_length = _osd_var_length + + # Set the read/write IOPS/data and length + _osd_wrops_length = len(osd_information['stats']['wr_ops']) + 1 + if _osd_wrops_length > osd_wrops_length: + osd_wrops_length = _osd_wrops_length + + _osd_wrdata_length = len(osd_information['stats']['wr_data']) + 1 + if _osd_wrdata_length > osd_wrdata_length: + osd_wrdata_length = _osd_wrdata_length + + _osd_rdops_length = len(osd_information['stats']['rd_ops']) + 1 + if _osd_rdops_length > osd_rdops_length: + osd_rdops_length = _osd_rdops_length + + _osd_rddata_length = len(osd_information['stats']['rd_data']) + 1 + if _osd_rddata_length > osd_rddata_length: + osd_rddata_length = _osd_rddata_length + + # Format the output header + osd_list_output.append('{bold}\ +{osd_id: <{osd_id_length}} \ +{osd_node: <{osd_node_length}} \ +{osd_up: <{osd_up_length}} \ +{osd_in: <{osd_in_length}} \ +{osd_size: <{osd_size_length}} \ +{osd_pgs: <{osd_pgs_length}} \ +{osd_weight: <{osd_weight_length}} \ +{osd_reweight: <{osd_reweight_length}} \ +Sp: {osd_used: <{osd_used_length}} \ +{osd_free: <{osd_free_length}} \ +{osd_util: <{osd_util_length}} \ +{osd_var: <{osd_var_length}} \ +Rd: {osd_rdops: <{osd_rdops_length}} \ +{osd_rddata: <{osd_rddata_length}} \ +Wr: {osd_wrops: <{osd_wrops_length}} \ +{osd_wrdata: <{osd_wrdata_length}} \ +{end_bold}'.format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + osd_id_length=osd_id_length, + osd_node_length=osd_node_length, + osd_up_length=osd_up_length, + osd_in_length=osd_in_length, + osd_size_length=osd_size_length, + osd_pgs_length=osd_pgs_length, + osd_weight_length=osd_weight_length, + osd_reweight_length=osd_reweight_length, + osd_used_length=osd_used_length, + osd_free_length=osd_free_length, + osd_util_length=osd_util_length, + osd_var_length=osd_var_length, + osd_wrops_length=osd_wrops_length, + osd_wrdata_length=osd_wrdata_length, + osd_rdops_length=osd_rdops_length, + osd_rddata_length=osd_rddata_length, + osd_id='ID', + osd_node='Node', + osd_up='Up', + osd_in='In', + osd_size='Size', + osd_pgs='PGs', + osd_weight='Wt', + osd_reweight='ReWt', + osd_used='Used', + osd_free='Free', + osd_util='Util%', + osd_var='Var', + osd_wrops='OPS', + osd_wrdata='Data', + osd_rdops='OPS', + osd_rddata='Data' + ) + ) + + for osd_information in osd_list: + try: + # If this happens, the node hasn't checked in fully yet, so just ignore it + if osd_information['stats']['node'] == '|': + continue + except KeyError: + continue + + osd_up_flag, osd_up_colour, osd_in_flag, osd_in_colour = getOutputColoursOSD(osd_information) + osd_util = round(osd_information['stats']['utilization'], 2) + osd_var = round(osd_information['stats']['var'], 2) + + # Format the output header + osd_list_output.append('{bold}\ +{osd_id: <{osd_id_length}} \ +{osd_node: <{osd_node_length}} \ +{osd_up_colour}{osd_up: <{osd_up_length}}{end_colour} \ +{osd_in_colour}{osd_in: <{osd_in_length}}{end_colour} \ +{osd_size: <{osd_size_length}} \ +{osd_pgs: <{osd_pgs_length}} \ +{osd_weight: <{osd_weight_length}} \ +{osd_reweight: <{osd_reweight_length}} \ + {osd_used: <{osd_used_length}} \ +{osd_free: <{osd_free_length}} \ +{osd_util: <{osd_util_length}} \ +{osd_var: <{osd_var_length}} \ + {osd_rdops: <{osd_rdops_length}} \ +{osd_rddata: <{osd_rddata_length}} \ + {osd_wrops: <{osd_wrops_length}} \ +{osd_wrdata: <{osd_wrdata_length}} \ +{end_bold}'.format( + bold='', + end_bold='', + end_colour=ansiprint.end(), + osd_id_length=osd_id_length, + osd_node_length=osd_node_length, + osd_up_length=osd_up_length, + osd_in_length=osd_in_length, + osd_size_length=osd_size_length, + osd_pgs_length=osd_pgs_length, + osd_weight_length=osd_weight_length, + osd_reweight_length=osd_reweight_length, + osd_used_length=osd_used_length, + osd_free_length=osd_free_length, + osd_util_length=osd_util_length, + osd_var_length=osd_var_length, + osd_wrops_length=osd_wrops_length, + osd_wrdata_length=osd_wrdata_length, + osd_rdops_length=osd_rdops_length, + osd_rddata_length=osd_rddata_length, + osd_id=osd_information['id'], + osd_node=osd_information['stats']['node'], + osd_up_colour=osd_up_colour, + osd_up=osd_up_flag, + osd_in_colour=osd_in_colour, + osd_in=osd_in_flag, + osd_size=osd_information['stats']['size'], + osd_pgs=osd_information['stats']['pgs'], + osd_weight=osd_information['stats']['weight'], + osd_reweight=osd_information['stats']['reweight'], + osd_used=osd_information['stats']['used'], + osd_free=osd_information['stats']['avail'], + osd_util=osd_util[osd], + osd_var=osd_var[osd], + osd_wrops=osd_information['stats']['wr_ops'], + osd_wrdata=osd_information['stats']['wr_data'], + osd_rdops=osd_information['stats']['rd_ops'], + osd_rddata=osd_information['stats']['rd_data'] + ) + ) + + click.echo('\n'.join(sorted(osd_list_output))) # # Pool functions # -def getClusterPoolList(zk_conn): - # Get a list of pools under /ceph/pools - pool_list = zkhandler.listchildren(zk_conn, '/ceph/pools') - return pool_list - -def getPoolInformation(zk_conn, name): +def getPoolInformation(zk_conn, pool): # Parse the stats data - pool_stats_raw = zkhandler.readdata(zk_conn, '/ceph/pools/{}/stats'.format(name)) + pool_stats_raw = zkhandler.readdata(zk_conn, '/ceph/pools/{}/stats'.format(pool)) pool_stats = dict(json.loads(pool_stats_raw)) # Deal with the size issues for datatype in 'size_bytes', 'read_bytes', 'write_bytes': @@ -633,226 +607,12 @@ def getPoolInformation(zk_conn, name): databytes_formatted = format_bytes_tohuman(databytes) new_name = datatype.replace('bytes', 'formatted') pool_stats[new_name] = databytes_formatted - return pool_stats -def getCephPools(zk_conn): - pool_list = zkhandler.listchildren(zk_conn, '/ceph/pools') - return pool_list - -def formatPoolList(zk_conn, pool_list): - pool_list_output = [] - - pool_id = dict() - pool_size = dict() - pool_num_objects = dict() - pool_num_clones = dict() - pool_num_copies = dict() - pool_num_degraded = dict() - pool_read_ops = dict() - pool_read_data = dict() - pool_write_ops = dict() - pool_write_data = dict() - - pool_name_length = 5 - pool_id_length = 3 - pool_size_length = 5 - pool_num_objects_length = 6 - pool_num_clones_length = 7 - pool_num_copies_length = 7 - pool_num_degraded_length = 9 - pool_read_ops_length = 4 - pool_read_data_length = 5 - pool_write_ops_length = 4 - pool_write_data_length = 5 - - for pool in pool_list: - # Set the Pool name length - _pool_name_length = len(pool) + 1 - if _pool_name_length > pool_name_length: - pool_name_length = _pool_name_length - - # Get stats - pool_stats = getPoolInformation(zk_conn, pool) - - # Set the parent node and length - try: - pool_id[pool] = pool_stats['id'] - # If this happens, the node hasn't checked in fully yet, so just ignore it - if not pool_id[pool]: - continue - except KeyError: - continue - - # Set the id and length - pool_id[pool] = pool_stats['id'] - _pool_id_length = len(str(pool_id[pool])) + 1 - if _pool_id_length > pool_id_length: - pool_id_length = _pool_id_length - - # Set the size and length - pool_size[pool] = pool_stats['size_formatted'] - _pool_size_length = len(str(pool_size[pool])) + 1 - if _pool_size_length > pool_size_length: - pool_size_length = _pool_size_length - - # Set the num_objects and length - pool_num_objects[pool] = pool_stats['num_objects'] - _pool_num_objects_length = len(str(pool_num_objects[pool])) + 1 - if _pool_num_objects_length > pool_num_objects_length: - pool_num_objects_length = _pool_num_objects_length - - # Set the num_clones and length - pool_num_clones[pool] = pool_stats['num_object_clones'] - _pool_num_clones_length = len(str(pool_num_clones[pool])) + 1 - if _pool_num_clones_length > pool_num_clones_length: - pool_num_clones_length = _pool_num_clones_length - - # Set the num_copies and length - pool_num_copies[pool] = pool_stats['num_object_copies'] - _pool_num_copies_length = len(str(pool_num_copies[pool])) + 1 - if _pool_num_copies_length > pool_num_copies_length: - pool_num_copies_length = _pool_num_copies_length - - # Set the num_degraded and length - pool_num_degraded[pool] = pool_stats['num_objects_degraded'] - _pool_num_degraded_length = len(str(pool_num_degraded[pool])) + 1 - if _pool_num_degraded_length > pool_num_degraded_length: - pool_num_degraded_length = _pool_num_degraded_length - - # Set the write IOPS/data and length - pool_write_ops[pool] = pool_stats['write_ops'] - _pool_write_ops_length = len(str(pool_write_ops[pool])) + 1 - if _pool_write_ops_length > pool_write_ops_length: - pool_write_ops_length = _pool_write_ops_length - pool_write_data[pool] = pool_stats['write_formatted'] - _pool_write_data_length = len(pool_write_data[pool]) + 1 - if _pool_write_data_length > pool_write_data_length: - pool_write_data_length = _pool_write_data_length - - # Set the read IOPS/data and length - pool_read_ops[pool] = pool_stats['read_ops'] - _pool_read_ops_length = len(str(pool_read_ops[pool])) + 1 - if _pool_read_ops_length > pool_read_ops_length: - pool_read_ops_length = _pool_read_ops_length - pool_read_data[pool] = pool_stats['read_formatted'] - _pool_read_data_length = len(pool_read_data[pool]) + 1 - if _pool_read_data_length > pool_read_data_length: - pool_read_data_length = _pool_read_data_length - - # Format the output header - pool_list_output_header = '{bold}\ -{pool_id: <{pool_id_length}} \ -{pool_name: <{pool_name_length}} \ -{pool_size: <{pool_size_length}} \ -Obj: {pool_objects: <{pool_objects_length}} \ -{pool_clones: <{pool_clones_length}} \ -{pool_copies: <{pool_copies_length}} \ -{pool_degraded: <{pool_degraded_length}} \ -Rd: {pool_read_ops: <{pool_read_ops_length}} \ -{pool_read_data: <{pool_read_data_length}} \ -Wr: {pool_write_ops: <{pool_write_ops_length}} \ -{pool_write_data: <{pool_write_data_length}} \ -{end_bold}'.format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - pool_id_length=pool_id_length, - pool_name_length=pool_name_length, - pool_size_length=pool_size_length, - pool_objects_length=pool_num_objects_length, - pool_clones_length=pool_num_clones_length, - pool_copies_length=pool_num_copies_length, - pool_degraded_length=pool_num_degraded_length, - pool_write_ops_length=pool_write_ops_length, - pool_write_data_length=pool_write_data_length, - pool_read_ops_length=pool_read_ops_length, - pool_read_data_length=pool_read_data_length, - pool_id='ID', - pool_name='Name', - pool_size='Used', - pool_objects='Count', - pool_clones='Clones', - pool_copies='Copies', - pool_degraded='Degraded', - pool_write_ops='OPS', - pool_write_data='Data', - pool_read_ops='OPS', - pool_read_data='Data' - ) - - for pool in pool_list: - # Format the output header - pool_list_output.append('{bold}\ -{pool_id: <{pool_id_length}} \ -{pool_name: <{pool_name_length}} \ -{pool_size: <{pool_size_length}} \ - {pool_objects: <{pool_objects_length}} \ -{pool_clones: <{pool_clones_length}} \ -{pool_copies: <{pool_copies_length}} \ -{pool_degraded: <{pool_degraded_length}} \ - {pool_read_ops: <{pool_read_ops_length}} \ -{pool_read_data: <{pool_read_data_length}} \ - {pool_write_ops: <{pool_write_ops_length}} \ -{pool_write_data: <{pool_write_data_length}} \ -{end_bold}'.format( - bold='', - end_bold='', - pool_id_length=pool_id_length, - pool_name_length=pool_name_length, - pool_size_length=pool_size_length, - pool_objects_length=pool_num_objects_length, - pool_clones_length=pool_num_clones_length, - pool_copies_length=pool_num_copies_length, - pool_degraded_length=pool_num_degraded_length, - pool_write_ops_length=pool_write_ops_length, - pool_write_data_length=pool_write_data_length, - pool_read_ops_length=pool_read_ops_length, - pool_read_data_length=pool_read_data_length, - pool_id=pool_id[pool], - pool_name=pool, - pool_size=pool_size[pool], - pool_objects=pool_num_objects[pool], - pool_clones=pool_num_clones[pool], - pool_copies=pool_num_copies[pool], - pool_degraded=pool_num_degraded[pool], - pool_write_ops=pool_write_ops[pool], - pool_write_data=pool_write_data[pool], - pool_read_ops=pool_read_ops[pool], - pool_read_data=pool_read_data[pool] - ) - ) - - output_string = pool_list_output_header + '\n' + '\n'.join(sorted(pool_list_output)) - return output_string - -def get_list_pool(zk_conn, limit): - pool_list = [] - full_pool_list = getCephPools(zk_conn) - - if limit: - try: - # Implicitly assume fuzzy limits - if not re.match('\^.*', limit): - limit = '.*' + limit - if not re.match('.*\$', limit): - limit = limit + '.*' - except Exception as e: - return False, 'Regex Error: {}'.format(e) - - for pool in full_pool_list: - valid_pool = False - if limit: - if re.match(limit, pool['pool_id']): - valid_pool = True - else: - valid_pool = True - - if valid_pool: - pool_list.append(pool) - - output_string = formatPoolList(zk_conn, pool_list) - click.echo(output_string) - - return True, '' + pool_information = { + 'name': pool, + 'stats': pool_stats + } + return pool_information def add_pool(zk_conn, name, pgs): # Tell the cluster to create a new pool @@ -915,6 +675,180 @@ def remove_pool(zk_conn, name): return success, message +def get_list_pool(zk_conn, limit): + pool_list = [] + full_pool_list = zkhandler.listchildren(zk_conn, '/ceph/pools') + + for pool in full_pool_list: + if limit: + try: + # Implicitly assume fuzzy limits + if not re.match('\^.*', limit): + limit = '.*' + limit + if not re.match('.*\$', limit): + limit = limit + '.*' + + if re.match(limit, pool): + pool_list.append(getPoolInformation[zk_conn, pool]) + except Exception as e: + return False, 'Regex Error: {}'.format(e) + else: + pool_list.append(getPoolInformation[zk_conn, pool]) + + return True, pool_list + +def format_list_pool(pool_list): + pool_list_output = [] + + pool_name_length = 5 + pool_id_length = 3 + pool_size_length = 5 + pool_num_objects_length = 6 + pool_num_clones_length = 7 + pool_num_copies_length = 7 + pool_num_degraded_length = 9 + pool_read_ops_length = 4 + pool_read_data_length = 5 + pool_write_ops_length = 4 + pool_write_data_length = 5 + + for pool_information in pool_list: + # Set the Pool name length + _pool_name_length = len(pool_information['name']) + 1 + if _pool_name_length > pool_name_length: + pool_name_length = _pool_name_length + + # Set the id and length + _pool_id_length = len(str(pool_information['stats']['id'])) + 1 + if _pool_id_length > pool_id_length: + pool_id_length = _pool_id_length + + # Set the size and length + _pool_size_length = len(str(pool_information['stats']['size_formatted'])) + 1 + if _pool_size_length > pool_size_length: + pool_size_length = _pool_size_length + + # Set the num_objects and length + _pool_num_objects_length = len(str(pool_information['stats']['num_objects'])) + 1 + if _pool_num_objects_length > pool_num_objects_length: + pool_num_objects_length = _pool_num_objects_length + + # Set the num_clones and length + _pool_num_clones_length = len(str(pool_information['stats']['num_object_clones'])) + 1 + if _pool_num_clones_length > pool_num_clones_length: + pool_num_clones_length = _pool_num_clones_length + + # Set the num_copies and length + _pool_num_copies_length = len(str(pool_information['stats']['num_object_copies'])) + 1 + if _pool_num_copies_length > pool_num_copies_length: + pool_num_copies_length = _pool_num_copies_length + + # Set the num_degraded and length + _pool_num_degraded_length = len(str(pool_information['stats']['num_objects_degraded'])) + 1 + if _pool_num_degraded_length > pool_num_degraded_length: + pool_num_degraded_length = _pool_num_degraded_length + + # Set the read/write IOPS/data and length + _pool_write_ops_length = len(str(pool_information['stats']['write_ops'])) + 1 + if _pool_write_ops_length > pool_write_ops_length: + pool_write_ops_length = _pool_write_ops_length + + _pool_write_data_length = len(pool_information['stats']['write_formatted']) + 1 + if _pool_write_data_length > pool_write_data_length: + pool_write_data_length = _pool_write_data_length + + _pool_read_ops_length = len(str(pool_information['stats']['read_ops'])) + 1 + if _pool_read_ops_length > pool_read_ops_length: + pool_read_ops_length = _pool_read_ops_length + + _pool_read_data_length = len(pool_information['stats']['read_formatted']) + 1 + if _pool_read_data_length > pool_read_data_length: + pool_read_data_length = _pool_read_data_length + + # Format the output header + pool_list_output.append('{bold}\ +{pool_id: <{pool_id_length}} \ +{pool_name: <{pool_name_length}} \ +{pool_size: <{pool_size_length}} \ +Obj: {pool_objects: <{pool_objects_length}} \ +{pool_clones: <{pool_clones_length}} \ +{pool_copies: <{pool_copies_length}} \ +{pool_degraded: <{pool_degraded_length}} \ +Rd: {pool_read_ops: <{pool_read_ops_length}} \ +{pool_read_data: <{pool_read_data_length}} \ +Wr: {pool_write_ops: <{pool_write_ops_length}} \ +{pool_write_data: <{pool_write_data_length}} \ +{end_bold}'.format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + pool_id_length=pool_id_length, + pool_name_length=pool_name_length, + pool_size_length=pool_size_length, + pool_objects_length=pool_num_objects_length, + pool_clones_length=pool_num_clones_length, + pool_copies_length=pool_num_copies_length, + pool_degraded_length=pool_num_degraded_length, + pool_write_ops_length=pool_write_ops_length, + pool_write_data_length=pool_write_data_length, + pool_read_ops_length=pool_read_ops_length, + pool_read_data_length=pool_read_data_length, + pool_id='ID', + pool_name='Name', + pool_size='Used', + pool_objects='Count', + pool_clones='Clones', + pool_copies='Copies', + pool_degraded='Degraded', + pool_write_ops='OPS', + pool_write_data='Data', + pool_read_ops='OPS', + pool_read_data='Data' + ) + ) + + for pool in pool_list: + # Format the output header + pool_list_output.append('{bold}\ +{pool_id: <{pool_id_length}} \ +{pool_name: <{pool_name_length}} \ +{pool_size: <{pool_size_length}} \ + {pool_objects: <{pool_objects_length}} \ +{pool_clones: <{pool_clones_length}} \ +{pool_copies: <{pool_copies_length}} \ +{pool_degraded: <{pool_degraded_length}} \ + {pool_read_ops: <{pool_read_ops_length}} \ +{pool_read_data: <{pool_read_data_length}} \ + {pool_write_ops: <{pool_write_ops_length}} \ +{pool_write_data: <{pool_write_data_length}} \ +{end_bold}'.format( + bold='', + end_bold='', + pool_id_length=pool_id_length, + pool_name_length=pool_name_length, + pool_size_length=pool_size_length, + pool_objects_length=pool_num_objects_length, + pool_clones_length=pool_num_clones_length, + pool_copies_length=pool_num_copies_length, + pool_degraded_length=pool_num_degraded_length, + pool_write_ops_length=pool_write_ops_length, + pool_write_data_length=pool_write_data_length, + pool_read_ops_length=pool_read_ops_length, + pool_read_data_length=pool_read_data_length, + pool_id=pool_information['stats']['id'], + pool_name=pool_information['name'], + pool_size=pool_information['stats']['size_formatted'], + pool_objects=pool_information['stats']['num_objects'], + pool_clones=pool_information['stats']['num_objects_clones'], + pool_copies=pool_information['stats']['num_objects_copies'], + pool_degraded=pool_information['stats']['num_objects_degraded'], + pool_write_ops=pool_information['stats']['write_ops'], + pool_write_data=pool_information['stats']['write_formatted'], + pool_read_ops=pool_information['stats']['read_ops'], + pool_read_data=pool_information['stats']['read_formatted'] + ) + ) + + click.echo('\n'.join(sorted(pool_list_output))) # @@ -933,170 +867,19 @@ def getCephVolumes(zk_conn, pool): return volume_list -def getVolumeInformation(zk_conn, pool, name): +def getVolumeInformation(zk_conn, pool, volume): # Parse the stats data - volume_stats_raw = zkhandler.readdata(zk_conn, '/ceph/volumes/{}/{}/stats'.format(pool, name)) + volume_stats_raw = zkhandler.readdata(zk_conn, '/ceph/volumes/{}/{}/stats'.format(pool, volume)) volume_stats = dict(json.loads(volume_stats_raw)) # Format the size to something nicer volume_stats['size'] = format_bytes_tohuman(volume_stats['size']) - return volume_stats -def formatVolumeList(zk_conn, volume_list): - volume_list_output = [] - - volume_size = dict() - volume_objects = dict() - volume_order = dict() - volume_format = dict() - volume_features = dict() - - volume_name_length = 5 - volume_pool_length = 5 - volume_size_length = 5 - volume_objects_length = 8 - volume_order_length = 6 - volume_format_length = 7 - volume_features_length = 10 - - for volume in volume_list: - volume_pool, volume_name = volume.split('/') - - # Set the Volume name length - _volume_name_length = len(volume_name) + 1 - if _volume_name_length > volume_name_length: - volume_name_length = _volume_name_length - - # Set the Volume pool length - _volume_pool_length = len(volume_pool) + 1 - if _volume_pool_length > volume_pool_length: - volume_pool_length = _volume_pool_length - - # Get stats - volume_stats = getVolumeInformation(zk_conn, volume_pool, volume_name) - - # Set the size and length - volume_size[volume] = volume_stats['size'] - _volume_size_length = len(str(volume_size[volume])) + 1 - if _volume_size_length > volume_size_length: - volume_size_length = _volume_size_length - - # Set the num_objects and length - volume_objects[volume] = volume_stats['objects'] - _volume_objects_length = len(str(volume_objects[volume])) + 1 - if _volume_objects_length > volume_objects_length: - volume_objects_length = _volume_objects_length - - # Set the order and length - volume_order[volume] = volume_stats['order'] - _volume_order_length = len(str(volume_order[volume])) + 1 - if _volume_order_length > volume_order_length: - volume_order_length = _volume_order_length - - # Set the format and length - volume_format[volume] = volume_stats['format'] - _volume_format_length = len(str(volume_format[volume])) + 1 - if _volume_format_length > volume_format_length: - volume_format_length = _volume_format_length - - # Set the features and length - volume_features[volume] = ','.join(volume_stats['features']) - _volume_features_length = len(str(volume_features[volume])) + 1 - if _volume_features_length > volume_features_length: - volume_features_length = _volume_features_length - - # Format the output header - volume_list_output_header = '{bold}\ -{volume_name: <{volume_name_length}} \ -{volume_pool: <{volume_pool_length}} \ -{volume_size: <{volume_size_length}} \ -{volume_objects: <{volume_objects_length}} \ -{volume_order: <{volume_order_length}} \ -{volume_format: <{volume_format_length}} \ -{volume_features: <{volume_features_length}} \ -{end_bold}'.format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - volume_name_length=volume_name_length, - volume_pool_length=volume_pool_length, - volume_size_length=volume_size_length, - volume_objects_length=volume_objects_length, - volume_order_length=volume_order_length, - volume_format_length=volume_format_length, - volume_features_length=volume_features_length, - volume_name='Name', - volume_pool='Pool', - volume_size='Size', - volume_objects='Objects', - volume_order='Order', - volume_format='Format', - volume_features='Features', - ) - - for volume in volume_list: - volume_pool, volume_name = volume.split('/') - volume_list_output.append('{bold}\ -{volume_name: <{volume_name_length}} \ -{volume_pool: <{volume_pool_length}} \ -{volume_size: <{volume_size_length}} \ -{volume_objects: <{volume_objects_length}} \ -{volume_order: <{volume_order_length}} \ -{volume_format: <{volume_format_length}} \ -{volume_features: <{volume_features_length}} \ -{end_bold}'.format( - bold='', - end_bold='', - volume_name_length=volume_name_length, - volume_pool_length=volume_pool_length, - volume_size_length=volume_size_length, - volume_objects_length=volume_objects_length, - volume_order_length=volume_order_length, - volume_format_length=volume_format_length, - volume_features_length=volume_features_length, - volume_name=volume_name, - volume_pool=volume_pool, - volume_size=volume_size[volume], - volume_objects=volume_objects[volume], - volume_order=volume_order[volume], - volume_format=volume_format[volume], - volume_features=volume_features[volume], - ) - ) - - output_string = volume_list_output_header + '\n' + '\n'.join(sorted(volume_list_output)) - return output_string - -def get_list_volume(zk_conn, pool, limit): - volume_list = [] - if pool != 'all' and not verifyPool(zk_conn, name): - return False, 'ERROR: No pool with name "{}" is present in the cluster.'.format(name) - - full_volume_list = getCephVolumes(zk_conn, pool) - - if limit: - try: - # Implicitly assume fuzzy limits - if not re.match('\^.*', limit): - limit = '.*' + limit - if not re.match('.*\$', limit): - limit = limit + '.*' - except Exception as e: - return False, 'Regex Error: {}'.format(e) - - for volume in full_volume_list: - valid_volume = False - if limit: - if re.match(limit, volume): - valid_volume = True - else: - valid_volume = True - - if valid_volume: - volume_list.append(volume) - - output_string = formatVolumeList(zk_conn, volume_list) - click.echo(output_string) - - return True, '' + volume_information = { + 'name': volume, + 'pool': pool, + 'stats': volume_stats + } + return volume_information def add_volume(zk_conn, pool, name, size): # Tell the cluster to create a new volume @@ -1160,6 +943,138 @@ def remove_volume(zk_conn, pool, name): return success, message +def get_list_volume(zk_conn, pool, limit): + volume_list = [] + if pool != 'all' and not verifyPool(zk_conn, name): + return False, 'ERROR: No pool with name "{}" is present in the cluster.'.format(name) + + full_volume_list = getCephVolumes(zk_conn, pool) + + for volume in full_volume_list: + pool_name, volume_name = volume.split('/') + if limit: + try: + # Implicitly assume fuzzy limits + if not re.match('\^.*', limit): + limit = '.*' + limit + if not re.match('.*\$', limit): + limit = limit + '.*' + + if re.match(limit, volume): + volume_list.append(getVolumeInformation(zk_conn, pool_name, volume_name)) + except Exception as e: + return False, 'Regex Error: {}'.format(e) + else: + volume_list.append(getVolumeInformation(zk_conn, pool_name, volume_name)) + + return True, volume_list + +def format_list_volume(volume_list): + volume_list_output = [] + + volume_name_length = 5 + volume_pool_length = 5 + volume_size_length = 5 + volume_objects_length = 8 + volume_order_length = 6 + volume_format_length = 7 + volume_features_length = 10 + + for volume_information in volume_list: + # Set the Volume name length + _volume_name_length = len(volume_information['name']) + 1 + if _volume_name_length > volume_name_length: + volume_name_length = _volume_name_length + + # Set the Volume pool length + _volume_pool_length = len(volume_information['pool']) + 1 + if _volume_pool_length > volume_pool_length: + volume_pool_length = _volume_pool_length + + # Set the size and length + _volume_size_length = len(str(volume_information['stats']['size'])) + 1 + if _volume_size_length > volume_size_length: + volume_size_length = _volume_size_length + + # Set the num_objects and length + _volume_objects_length = len(str(volume_information['stats']['objects'])) + 1 + if _volume_objects_length > volume_objects_length: + volume_objects_length = _volume_objects_length + + # Set the order and length + _volume_order_length = len(str(volume_information['stats']['order'])) + 1 + if _volume_order_length > volume_order_length: + volume_order_length = _volume_order_length + + # Set the format and length + _volume_format_length = len(str(volume_information['stats']['format'])) + 1 + if _volume_format_length > volume_format_length: + volume_format_length = _volume_format_length + + # Set the features and length + _volume_features_length = len(str(volume_information['stats']['features'])) + 1 + if _volume_features_length > volume_features_length: + volume_features_length = _volume_features_length + + # Format the output header + volume_list_output.append('{bold}\ +{volume_name: <{volume_name_length}} \ +{volume_pool: <{volume_pool_length}} \ +{volume_size: <{volume_size_length}} \ +{volume_objects: <{volume_objects_length}} \ +{volume_order: <{volume_order_length}} \ +{volume_format: <{volume_format_length}} \ +{volume_features: <{volume_features_length}} \ +{end_bold}'.format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + volume_name_length=volume_name_length, + volume_pool_length=volume_pool_length, + volume_size_length=volume_size_length, + volume_objects_length=volume_objects_length, + volume_order_length=volume_order_length, + volume_format_length=volume_format_length, + volume_features_length=volume_features_length, + volume_name='Name', + volume_pool='Pool', + volume_size='Size', + volume_objects='Objects', + volume_order='Order', + volume_format='Format', + volume_features='Features', + ) + ) + + for volume_information in volume_list: + volume_list_output.append('{bold}\ +{volume_name: <{volume_name_length}} \ +{volume_pool: <{volume_pool_length}} \ +{volume_size: <{volume_size_length}} \ +{volume_objects: <{volume_objects_length}} \ +{volume_order: <{volume_order_length}} \ +{volume_format: <{volume_format_length}} \ +{volume_features: <{volume_features_length}} \ +{end_bold}'.format( + bold='', + end_bold='', + volume_name_length=volume_name_length, + volume_pool_length=volume_pool_length, + volume_size_length=volume_size_length, + volume_objects_length=volume_objects_length, + volume_order_length=volume_order_length, + volume_format_length=volume_format_length, + volume_features_length=volume_features_length, + volume_name=volume_information['name'], + volume_pool=volume_information['pool'], + volume_size=volume_information['stats']['size'], + volume_objects=volume_information['stats']['objects'], + volume_order=volume_information['stats']['order'], + volume_format=volume_information['stats']['format'], + volume_features=volume_information['stats']['features'], + ) + ) + + click.echo('\n'.join(sorted(volume_list_output))) # @@ -1183,106 +1098,6 @@ def getCephSnapshots(zk_conn, pool, volume): return snapshot_list -def formatSnapshotList(zk_conn, snapshot_list): - snapshot_list_output = [] - - snapshot_name_length = 5 - snapshot_volume_length = 7 - snapshot_pool_length = 5 - - for snapshot in snapshot_list: - snapshot_pool, snapshot_detail = snapshot.split('/') - snapshot_volume, snapshot_name = snapshot_detail.split('@') - - # Set the Snapshot name length - _snapshot_name_length = len(snapshot_name) + 1 - if _snapshot_name_length > snapshot_name_length: - snapshot_name_length = _snapshot_name_length - - # Set the Snapshot volume length - _snapshot_volume_length = len(snapshot_volume) + 1 - if _snapshot_volume_length > snapshot_volume_length: - snapshot_volume_length = _snapshot_volume_length - - # Set the Snapshot pool length - _snapshot_pool_length = len(snapshot_pool) + 1 - if _snapshot_pool_length > snapshot_pool_length: - snapshot_pool_length = _snapshot_pool_length - - # Format the output header - snapshot_list_output_header = '{bold}\ -{snapshot_name: <{snapshot_name_length}} \ -{snapshot_volume: <{snapshot_volume_length}} \ -{snapshot_pool: <{snapshot_pool_length}} \ -{end_bold}'.format( - bold=ansiprint.bold(), - end_bold=ansiprint.end(), - snapshot_name_length=snapshot_name_length, - snapshot_volume_length=snapshot_volume_length, - snapshot_pool_length=snapshot_pool_length, - snapshot_name='Name', - snapshot_volume='Volume', - snapshot_pool='Pool', - ) - - for snapshot in snapshot_list: - snapshot_pool, snapshot_detail = snapshot.split('/') - snapshot_volume, snapshot_name = snapshot_detail.split('@') - snapshot_list_output.append('{bold}\ -{snapshot_name: <{snapshot_name_length}} \ -{snapshot_volume: <{snapshot_volume_length}} \ -{snapshot_pool: <{snapshot_pool_length}} \ -{end_bold}'.format( - bold='', - end_bold='', - snapshot_name_length=snapshot_name_length, - snapshot_volume_length=snapshot_volume_length, - snapshot_pool_length=snapshot_pool_length, - snapshot_name=snapshot_name, - snapshot_volume=snapshot_volume, - snapshot_pool=snapshot_pool, - ) - ) - - output_string = snapshot_list_output_header + '\n' + '\n'.join(sorted(snapshot_list_output)) - return output_string - -def get_list_snapshot(zk_conn, pool, volume, limit): - snapshot_list = [] - if pool != 'all' and not verifyPool(zk_conn, pool): - return False, 'ERROR: No pool with name "{}" is present in the cluster.'.format(pool) - - if volume != 'all' and not verifyPool(zk_conn, volume): - return False, 'ERROR: No volume with name "{}" is present in the cluster.'.format(volume) - - full_snapshot_list = getCephSnapshots(zk_conn, pool, volume) - - if limit: - try: - # Implicitly assume fuzzy limits - if not re.match('\^.*', limit): - limit = '.*' + limit - if not re.match('.*\$', limit): - limit = limit + '.*' - except Exception as e: - return False, 'Regex Error: {}'.format(e) - - for snapshot in full_snapshot_list: - valid_snapshot = False - if limit: - if re.match(limit, snapshot): - valid_snapshot = True - else: - valid_snapshot = True - - if valid_snapshot: - snapshot_list.append(snapshot) - - output_string = formatSnapshotList(zk_conn, snapshot_list) - click.echo(output_string) - - return True, '' - def add_snapshot(zk_conn, pool, volume, name): # Tell the cluster to create a new snapshot add_snapshot_string = 'snapshot_add {},{},{}'.format(pool, volume, name) @@ -1344,3 +1159,95 @@ def remove_snapshot(zk_conn, pool, volume, name): return success, message +def get_list_snapshot(zk_conn, pool, volume, limit): + snapshot_list = [] + if pool != 'all' and not verifyPool(zk_conn, pool): + return False, 'ERROR: No pool with name "{}" is present in the cluster.'.format(pool) + + if volume != 'all' and not verifyPool(zk_conn, volume): + return False, 'ERROR: No volume with name "{}" is present in the cluster.'.format(volume) + + full_snapshot_list = getCephSnapshots(zk_conn, pool, volume) + + for snapshot in full_snapshot_list: + volume, snapshot_name = snapshot.split('@') + pool_name, volume_name = volume.split('/') + if limit: + try: + # Implicitly assume fuzzy limits + if not re.match('\^.*', limit): + limit = '.*' + limit + if not re.match('.*\$', limit): + limit = limit + '.*' + if re.match(limit, snapshot): + snapshot_list.append(getVolumeInformation(zk_conn, pool_name, volume_name, snapshot_name)) + except Exception as e: + return False, 'Regex Error: {}'.format(e) + else: + snapshot_list.append(getVolumeInformation(zk_conn, pool_name, volume_name, snapshot_name)) + + return True, snapshot_list + +def format_list_snapshot(snapshot_list): + snapshot_list_output = [] + + snapshot_name_length = 5 + snapshot_volume_length = 7 + snapshot_pool_length = 5 + + for snapshot in snapshot_list: + volume, snapshot_name = snapshot.split('@') + snapshot_pool, snapshot_volume = volume.split('/') + + # Set the Snapshot name length + _snapshot_name_length = len(snapshot_name) + 1 + if _snapshot_name_length > snapshot_name_length: + snapshot_name_length = _snapshot_name_length + + # Set the Snapshot volume length + _snapshot_volume_length = len(snapshot_volume) + 1 + if _snapshot_volume_length > snapshot_volume_length: + snapshot_volume_length = _snapshot_volume_length + + # Set the Snapshot pool length + _snapshot_pool_length = len(snapshot_pool) + 1 + if _snapshot_pool_length > snapshot_pool_length: + snapshot_pool_length = _snapshot_pool_length + + # Format the output header + snapshot_list_output.append('{bold}\ +{snapshot_name: <{snapshot_name_length}} \ +{snapshot_volume: <{snapshot_volume_length}} \ +{snapshot_pool: <{snapshot_pool_length}} \ +{end_bold}'.format( + bold=ansiprint.bold(), + end_bold=ansiprint.end(), + snapshot_name_length=snapshot_name_length, + snapshot_volume_length=snapshot_volume_length, + snapshot_pool_length=snapshot_pool_length, + snapshot_name='Name', + snapshot_volume='Volume', + snapshot_pool='Pool', + ) + ) + + for snapshot in snapshot_list: + volume, snapshot_name = snapshot.split('@') + snapshot_pool, snapshot_volume = volume.split('/') + snapshot_list_output.append('{bold}\ +{snapshot_name: <{snapshot_name_length}} \ +{snapshot_volume: <{snapshot_volume_length}} \ +{snapshot_pool: <{snapshot_pool_length}} \ +{end_bold}'.format( + bold='', + end_bold='', + snapshot_name_length=snapshot_name_length, + snapshot_volume_length=snapshot_volume_length, + snapshot_pool_length=snapshot_pool_length, + snapshot_name=snapshot_name, + snapshot_volume=snapshot_volume, + snapshot_pool=snapshot_pool, + ) + ) + + click.echo('\n'.join(sorted(snapshot_list_output)))