Fix linting of cluster.py file
This commit is contained in:
parent
6c7be492b8
commit
ec79acf061
|
@ -46,14 +46,14 @@ def set_maintenance(zkhandler, maint_state):
|
||||||
|
|
||||||
def getClusterHealth(zkhandler, node_list, vm_list, ceph_osd_list):
|
def getClusterHealth(zkhandler, node_list, vm_list, ceph_osd_list):
|
||||||
health_delta_map = {
|
health_delta_map = {
|
||||||
'node_stopped': 50,
|
"node_stopped": 50,
|
||||||
'node_flushed': 10,
|
"node_flushed": 10,
|
||||||
'vm_stopped': 10,
|
"vm_stopped": 10,
|
||||||
'osd_out': 50,
|
"osd_out": 50,
|
||||||
'osd_down': 10,
|
"osd_down": 10,
|
||||||
'memory_overprovisioned': 50,
|
"memory_overprovisioned": 50,
|
||||||
'ceph_err': 50,
|
"ceph_err": 50,
|
||||||
'ceph_warn': 10,
|
"ceph_warn": 10,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Generate total cluster health numbers
|
# Generate total cluster health numbers
|
||||||
|
@ -62,23 +62,29 @@ def getClusterHealth(zkhandler, node_list, vm_list, ceph_osd_list):
|
||||||
|
|
||||||
for index, node in enumerate(node_list):
|
for index, node in enumerate(node_list):
|
||||||
# Apply node health values to total health number
|
# Apply node health values to total health number
|
||||||
cluster_health -= 100 - node['health']
|
cluster_health -= 100 - node["health"]
|
||||||
for entry in node['health_details']:
|
for entry in node["health_details"]:
|
||||||
if entry['health_delta'] > 0:
|
if entry["health_delta"] > 0:
|
||||||
messages.append(f"{node['name']}: plugin {entry['plugin_name']}: {entry['message']}")
|
messages.append(
|
||||||
|
f"{node['name']}: plugin {entry['plugin_name']}: {entry['message']}"
|
||||||
|
)
|
||||||
|
|
||||||
# Handle unhealthy node states
|
# Handle unhealthy node states
|
||||||
if node['daemon_state'] not in ['run']:
|
if node["daemon_state"] not in ["run"]:
|
||||||
cluster_health -= health_delta_map['node_stopped']
|
cluster_health -= health_delta_map["node_stopped"]
|
||||||
messages.append(f"cluster: {node['name']} in {node['daemon_state']} daemon state")
|
messages.append(
|
||||||
elif node['domain_state'] not in ['ready']:
|
f"cluster: {node['name']} in {node['daemon_state']} daemon state"
|
||||||
cluster_health -= health_delta_map['node_flushed']
|
)
|
||||||
messages.append(f"cluster: {node['name']} in {node['domain_state']} domain state")
|
elif node["domain_state"] not in ["ready"]:
|
||||||
|
cluster_health -= health_delta_map["node_flushed"]
|
||||||
|
messages.append(
|
||||||
|
f"cluster: {node['name']} in {node['domain_state']} domain state"
|
||||||
|
)
|
||||||
|
|
||||||
for index, vm in enumerate(vm_list):
|
for index, vm in enumerate(vm_list):
|
||||||
# Handle unhealthy VM states
|
# Handle unhealthy VM states
|
||||||
if vm['state'] not in ["start", "disable", "migrate", "unmigrate", "provision"]:
|
if vm["state"] not in ["start", "disable", "migrate", "unmigrate", "provision"]:
|
||||||
cluster_health -= health_delta_map['vm_stopped']
|
cluster_health -= health_delta_map["vm_stopped"]
|
||||||
messages.append(f"cluster: {vm['name']} in {vm['state']} state")
|
messages.append(f"cluster: {vm['name']} in {vm['state']} state")
|
||||||
|
|
||||||
for index, ceph_osd in enumerate(ceph_osd_list):
|
for index, ceph_osd in enumerate(ceph_osd_list):
|
||||||
|
@ -87,11 +93,15 @@ def getClusterHealth(zkhandler, node_list, vm_list, ceph_osd_list):
|
||||||
|
|
||||||
# Handle unhealthy OSD states
|
# Handle unhealthy OSD states
|
||||||
if in_texts[ceph_osd["stats"]["in"]] not in ["in"]:
|
if in_texts[ceph_osd["stats"]["in"]] not in ["in"]:
|
||||||
cluster_health -= health_delta_map['osd_out']
|
cluster_health -= health_delta_map["osd_out"]
|
||||||
messages.append(f"cluster: OSD {ceph_osd['id']} in {in_texts[ceph_osd['stats']['in']]} state")
|
messages.append(
|
||||||
elif up_texts[ceph_osd["stats"]["up"]] not in ['up']:
|
f"cluster: OSD {ceph_osd['id']} in {in_texts[ceph_osd['stats']['in']]} state"
|
||||||
cluster_health -= health_delta_map['osd_down']
|
)
|
||||||
messages.append(f"cluster: OSD {ceph_osd['id']} in {up_texts[ceph_osd['stats']['up']]} state")
|
elif up_texts[ceph_osd["stats"]["up"]] not in ["up"]:
|
||||||
|
cluster_health -= health_delta_map["osd_down"]
|
||||||
|
messages.append(
|
||||||
|
f"cluster: OSD {ceph_osd['id']} in {up_texts[ceph_osd['stats']['up']]} state"
|
||||||
|
)
|
||||||
|
|
||||||
# Check for (n-1) overprovisioning
|
# Check for (n-1) overprovisioning
|
||||||
# Assume X nodes. If the total VM memory allocation (counting only running VMss) is greater than
|
# Assume X nodes. If the total VM memory allocation (counting only running VMss) is greater than
|
||||||
|
@ -116,20 +126,26 @@ def getClusterHealth(zkhandler, node_list, vm_list, ceph_osd_list):
|
||||||
for index, node in enumerate(n_minus_1_node_list):
|
for index, node in enumerate(n_minus_1_node_list):
|
||||||
n_minus_1_total += node["memory"]["total"]
|
n_minus_1_total += node["memory"]["total"]
|
||||||
if alloc_total > n_minus_1_total:
|
if alloc_total > n_minus_1_total:
|
||||||
cluster_health -= health_delta_map['memory_overprovisioned']
|
cluster_health -= health_delta_map["memory_overprovisioned"]
|
||||||
messages.append(f"cluster: Total VM memory is overprovisioned ({alloc_total} > {n_minus_1_total} n-1)")
|
messages.append(
|
||||||
|
f"cluster: Total VM memory is overprovisioned ({alloc_total} > {n_minus_1_total} n-1)"
|
||||||
|
)
|
||||||
|
|
||||||
# Check Ceph cluster health
|
# Check Ceph cluster health
|
||||||
ceph_health = loads(zkhandler.read("base.storage.health"))
|
ceph_health = loads(zkhandler.read("base.storage.health"))
|
||||||
ceph_health_status = ceph_health["status"]
|
ceph_health_status = ceph_health["status"]
|
||||||
ceph_health_entries = ceph_health["checks"].keys()
|
ceph_health_entries = ceph_health["checks"].keys()
|
||||||
|
|
||||||
if ceph_health_status == 'HEALTH_ERR':
|
if ceph_health_status == "HEALTH_ERR":
|
||||||
cluster_health -= health_delta_map['ceph_err']
|
cluster_health -= health_delta_map["ceph_err"]
|
||||||
messages.append(f"cluster: Ceph cluster in ERROR state: {', '.join(ceph_health_entries)}")
|
messages.append(
|
||||||
elif ceph_health_status == 'HEALTH_WARN':
|
f"cluster: Ceph cluster in ERROR state: {', '.join(ceph_health_entries)}"
|
||||||
cluster_health -= health_delta_map['ceph_warn']
|
)
|
||||||
messages.append(f"cluster: Ceph cluster in WARNING state: {', '.join(ceph_health_entries)}")
|
elif ceph_health_status == "HEALTH_WARN":
|
||||||
|
cluster_health -= health_delta_map["ceph_warn"]
|
||||||
|
messages.append(
|
||||||
|
f"cluster: Ceph cluster in WARNING state: {', '.join(ceph_health_entries)}"
|
||||||
|
)
|
||||||
|
|
||||||
return cluster_health, messages
|
return cluster_health, messages
|
||||||
|
|
||||||
|
@ -236,7 +252,9 @@ def getClusterInformation(zkhandler):
|
||||||
formatted_osd_states[state] = state_count
|
formatted_osd_states[state] = state_count
|
||||||
|
|
||||||
# Get cluster health data
|
# Get cluster health data
|
||||||
cluster_health, cluster_health_messages = getClusterHealth(zkhandler, node_list, vm_list, ceph_osd_list)
|
cluster_health, cluster_health_messages = getClusterHealth(
|
||||||
|
zkhandler, node_list, vm_list, ceph_osd_list
|
||||||
|
)
|
||||||
|
|
||||||
# Format the status data
|
# Format the status data
|
||||||
cluster_information = {
|
cluster_information = {
|
||||||
|
|
Loading…
Reference in New Issue