Display Ceph health in PVC status as well
Makes this output a little more realistic and allows proper monitoring of the Ceph cluster status (separate from the PVC status which is tracking only OSD up/in state).
This commit is contained in:
parent
985ad5edc0
commit
2b4d980685
|
@ -359,6 +359,10 @@ class API_Status(Resource):
|
|||
type: string
|
||||
description: The overall cluster health
|
||||
example: Optimal
|
||||
storage_health:
|
||||
type: string
|
||||
description: The overall storage cluster health
|
||||
example: Optimal
|
||||
primary_node:
|
||||
type: string
|
||||
description: The current primary coordinator node
|
||||
|
|
|
@ -92,10 +92,19 @@ def format_info(cluster_information, oformat):
|
|||
else:
|
||||
health_colour = ansiprint.yellow()
|
||||
|
||||
if cluster_information['storage_health'] == 'Optimal':
|
||||
storage_health_colour = ansiprint.green()
|
||||
elif cluster_information['storage_health'] == 'Maintenance':
|
||||
storage_health_colour = ansiprint.blue()
|
||||
else:
|
||||
storage_health_colour = ansiprint.yellow()
|
||||
|
||||
ainformation = []
|
||||
ainformation.append('{}PVC cluster status:{}'.format(ansiprint.bold(), ansiprint.end()))
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Cluster health:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), health_colour, cluster_information['health'], ansiprint.end()))
|
||||
ainformation.append('{}Storage health:{} {}{}{}'.format(ansiprint.purple(), ansiprint.end(), storage_health_colour, cluster_information['storage_health'], ansiprint.end()))
|
||||
ainformation.append('')
|
||||
ainformation.append('{}Primary node:{} {}'.format(ansiprint.purple(), ansiprint.end(), cluster_information['primary_node']))
|
||||
ainformation.append('{}Cluster upstream IP:{} {}'.format(ansiprint.purple(), ansiprint.end(), cluster_information['upstream_ip']))
|
||||
ainformation.append('')
|
||||
|
|
|
@ -127,6 +127,15 @@ def getClusterInformation(zk_conn):
|
|||
else:
|
||||
cluster_health = 'Optimal'
|
||||
|
||||
# Find out our storage health from Ceph
|
||||
ceph_health = zkhandler.readdata(zk_conn, '/ceph').split('\n')[2].split()[-1]
|
||||
if maint_state == 'true':
|
||||
storage_health = 'Maintenance'
|
||||
elif ceph_health != 'HEALTH_OK':
|
||||
storage_health = 'Degraded'
|
||||
else:
|
||||
storage_health = 'Optimal'
|
||||
|
||||
# State lists
|
||||
node_state_combinations = [
|
||||
'run,ready', 'run,flush', 'run,flushed', 'run,unflush',
|
||||
|
@ -174,6 +183,7 @@ def getClusterInformation(zk_conn):
|
|||
# Format the status data
|
||||
cluster_information = {
|
||||
'health': cluster_health,
|
||||
'storage_health': storage_health,
|
||||
'primary_node': common.getPrimaryNode(zk_conn),
|
||||
'upstream_ip': zkhandler.readdata(zk_conn, '/upstream_ip'),
|
||||
'nodes': formatted_node_states,
|
||||
|
|
Loading…
Reference in New Issue