2020-08-24 14:57:52 -04:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
# benchmark.py - PVC API Benchmark functions
|
|
|
|
# Part of the Parallel Virtual Cluster (PVC) system
|
|
|
|
#
|
2022-10-06 11:55:27 -04:00
|
|
|
# Copyright (C) 2018-2022 Joshua M. Boniface <joshua@boniface.me>
|
2020-08-24 14:57:52 -04:00
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
2021-03-25 16:57:17 -04:00
|
|
|
# the Free Software Foundation, version 3.
|
2020-08-24 14:57:52 -04:00
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <https://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
###############################################################################
|
|
|
|
|
|
|
|
import psycopg2
|
|
|
|
import psycopg2.extras
|
|
|
|
|
2023-11-16 19:21:36 -05:00
|
|
|
from datetime import datetime
|
2021-10-02 01:36:27 -04:00
|
|
|
from json import loads, dumps
|
|
|
|
|
2021-05-28 23:33:36 -04:00
|
|
|
from pvcapid.Daemon import config
|
2020-08-24 14:57:52 -04:00
|
|
|
|
2021-07-05 14:12:56 -04:00
|
|
|
from daemon_lib.zkhandler import ZKHandler
|
2023-11-16 19:21:36 -05:00
|
|
|
from daemon_lib.celery import start, fail, log_info, update, finish
|
2021-05-29 00:24:53 -04:00
|
|
|
|
2020-08-24 14:57:52 -04:00
|
|
|
import daemon_lib.common as pvc_common
|
|
|
|
import daemon_lib.ceph as pvc_ceph
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2023-11-16 19:21:36 -05:00
|
|
|
# Define the current test format
|
|
|
|
TEST_FORMAT = 1
|
|
|
|
|
|
|
|
|
2022-11-03 11:59:37 -04:00
|
|
|
# We run a total of 8 tests, to give a generalized idea of performance on the cluster:
|
|
|
|
# 1. A sequential read test of 8GB with a 4M block size
|
|
|
|
# 2. A sequential write test of 8GB with a 4M block size
|
|
|
|
# 3. A random read test of 8GB with a 4M block size
|
|
|
|
# 4. A random write test of 8GB with a 4M block size
|
|
|
|
# 5. A random read test of 8GB with a 256k block size
|
|
|
|
# 6. A random write test of 8GB with a 256k block size
|
|
|
|
# 7. A random read test of 8GB with a 4k block size
|
|
|
|
# 8. A random write test of 8GB with a 4k block size
|
|
|
|
# Taken together, these 8 results should give a very good indication of the overall storage performance
|
|
|
|
# for a variety of workloads.
|
|
|
|
test_matrix = {
|
|
|
|
"seq_read": {
|
|
|
|
"direction": "read",
|
|
|
|
"iodepth": "64",
|
|
|
|
"bs": "4M",
|
|
|
|
"rw": "read",
|
|
|
|
},
|
|
|
|
"seq_write": {
|
|
|
|
"direction": "write",
|
|
|
|
"iodepth": "64",
|
|
|
|
"bs": "4M",
|
|
|
|
"rw": "write",
|
|
|
|
},
|
|
|
|
"rand_read_4M": {
|
|
|
|
"direction": "read",
|
|
|
|
"iodepth": "64",
|
|
|
|
"bs": "4M",
|
|
|
|
"rw": "randread",
|
|
|
|
},
|
|
|
|
"rand_write_4M": {
|
|
|
|
"direction": "write",
|
|
|
|
"iodepth": "64",
|
|
|
|
"bs": "4M",
|
|
|
|
"rw": "randwrite",
|
|
|
|
},
|
|
|
|
"rand_read_4K": {
|
|
|
|
"direction": "read",
|
|
|
|
"iodepth": "64",
|
|
|
|
"bs": "4K",
|
|
|
|
"rw": "randread",
|
|
|
|
},
|
|
|
|
"rand_write_4K": {
|
|
|
|
"direction": "write",
|
|
|
|
"iodepth": "64",
|
|
|
|
"bs": "4K",
|
|
|
|
"rw": "randwrite",
|
|
|
|
},
|
|
|
|
"rand_read_4K_lowdepth": {
|
|
|
|
"direction": "read",
|
|
|
|
"iodepth": "1",
|
|
|
|
"bs": "4K",
|
|
|
|
"rw": "randread",
|
|
|
|
},
|
|
|
|
"rand_write_4K_lowdepth": {
|
|
|
|
"direction": "write",
|
|
|
|
"iodepth": "1",
|
|
|
|
"bs": "4K",
|
|
|
|
"rw": "randwrite",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# Specify the benchmark volume name and size
|
|
|
|
benchmark_volume_name = "pvcbenchmark"
|
|
|
|
benchmark_volume_size = "8G"
|
|
|
|
|
|
|
|
|
2020-08-24 14:57:52 -04:00
|
|
|
#
|
|
|
|
# Exceptions (used by Celery tasks)
|
|
|
|
#
|
|
|
|
class BenchmarkError(Exception):
|
2023-11-16 19:21:36 -05:00
|
|
|
pass
|
2020-08-24 14:57:52 -04:00
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
|
2020-08-24 14:57:52 -04:00
|
|
|
#
|
|
|
|
# Common functions
|
|
|
|
#
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2023-11-16 19:21:36 -05:00
|
|
|
def cleanup(job_name, db_conn=None, db_cur=None, zkhandler=None):
|
|
|
|
if db_conn is not None and db_cur is not None:
|
|
|
|
# Clean up our dangling result
|
|
|
|
query = "DELETE FROM storage_benchmarks WHERE job = %s;"
|
|
|
|
args = (job_name,)
|
|
|
|
db_cur.execute(query, args)
|
|
|
|
db_conn.commit()
|
|
|
|
# Close the database connections cleanly
|
|
|
|
close_database(db_conn, db_cur)
|
|
|
|
if zkhandler is not None:
|
|
|
|
zkhandler.disconnect()
|
|
|
|
del zkhandler
|
|
|
|
|
|
|
|
|
2020-08-24 14:57:52 -04:00
|
|
|
# Database connections
|
|
|
|
def open_database(config):
|
|
|
|
conn = psycopg2.connect(
|
2021-11-06 03:02:43 -04:00
|
|
|
host=config["database_host"],
|
|
|
|
port=config["database_port"],
|
|
|
|
dbname=config["database_name"],
|
|
|
|
user=config["database_user"],
|
|
|
|
password=config["database_password"],
|
2020-08-24 14:57:52 -04:00
|
|
|
)
|
|
|
|
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
|
|
|
return conn, cur
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-08-24 14:57:52 -04:00
|
|
|
def close_database(conn, cur, failed=False):
|
|
|
|
if not failed:
|
|
|
|
conn.commit()
|
|
|
|
cur.close()
|
|
|
|
conn.close()
|
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2020-08-24 14:57:52 -04:00
|
|
|
def list_benchmarks(job=None):
|
2020-08-25 12:23:12 -04:00
|
|
|
if job is not None:
|
2021-11-06 03:02:43 -04:00
|
|
|
query = "SELECT * FROM {} WHERE job = %s;".format("storage_benchmarks")
|
|
|
|
args = (job,)
|
2020-08-24 14:57:52 -04:00
|
|
|
else:
|
2021-11-06 03:02:43 -04:00
|
|
|
query = "SELECT * FROM {} ORDER BY id DESC;".format("storage_benchmarks")
|
2020-08-24 14:57:52 -04:00
|
|
|
args = ()
|
|
|
|
|
|
|
|
conn, cur = open_database(config)
|
|
|
|
cur.execute(query, args)
|
|
|
|
orig_data = cur.fetchall()
|
|
|
|
data = list()
|
|
|
|
for benchmark in orig_data:
|
|
|
|
benchmark_data = dict()
|
2021-11-06 03:02:43 -04:00
|
|
|
benchmark_data["id"] = benchmark["id"]
|
|
|
|
benchmark_data["job"] = benchmark["job"]
|
|
|
|
benchmark_data["test_format"] = benchmark["test_format"]
|
|
|
|
if benchmark["result"] == "Running":
|
|
|
|
benchmark_data["benchmark_result"] = "Running"
|
2021-10-02 01:54:51 -04:00
|
|
|
else:
|
|
|
|
try:
|
2021-11-06 03:02:43 -04:00
|
|
|
benchmark_data["benchmark_result"] = loads(benchmark["result"])
|
2021-10-02 01:54:51 -04:00
|
|
|
except Exception:
|
2021-11-06 03:02:43 -04:00
|
|
|
benchmark_data["benchmark_result"] = {}
|
2020-08-24 14:57:52 -04:00
|
|
|
# Append the new data to our actual output structure
|
|
|
|
data.append(benchmark_data)
|
|
|
|
close_database(conn, cur)
|
|
|
|
if data:
|
|
|
|
return data, 200
|
|
|
|
else:
|
2021-11-06 03:02:43 -04:00
|
|
|
return {"message": "No benchmark found."}, 404
|
2020-08-24 14:57:52 -04:00
|
|
|
|
2020-11-07 14:45:24 -05:00
|
|
|
|
2022-11-03 11:59:37 -04:00
|
|
|
def prepare_benchmark_volume(
|
|
|
|
pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
|
|
|
):
|
|
|
|
# Create the RBD volume
|
|
|
|
retcode, retmsg = pvc_ceph.add_volume(
|
|
|
|
zkhandler, pool, benchmark_volume_name, benchmark_volume_size
|
|
|
|
)
|
|
|
|
if not retcode:
|
2023-11-16 19:21:36 -05:00
|
|
|
cleanup(
|
|
|
|
job_name,
|
2022-11-03 11:59:37 -04:00
|
|
|
db_conn=db_conn,
|
|
|
|
db_cur=db_cur,
|
|
|
|
zkhandler=zkhandler,
|
|
|
|
)
|
2023-11-16 19:21:36 -05:00
|
|
|
fail(
|
|
|
|
None,
|
|
|
|
f'Failed to create volume "{benchmark_volume_name}" on pool "{pool}": {retmsg}',
|
|
|
|
)
|
2022-11-03 11:59:37 -04:00
|
|
|
else:
|
2023-11-16 19:21:36 -05:00
|
|
|
log_info(None, retmsg)
|
2022-11-03 11:59:37 -04:00
|
|
|
|
|
|
|
|
|
|
|
def cleanup_benchmark_volume(
|
|
|
|
pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
|
|
|
):
|
|
|
|
# Remove the RBD volume
|
|
|
|
retcode, retmsg = pvc_ceph.remove_volume(zkhandler, pool, benchmark_volume_name)
|
|
|
|
if not retcode:
|
2023-11-16 19:21:36 -05:00
|
|
|
cleanup(
|
|
|
|
job_name,
|
2022-11-03 11:59:37 -04:00
|
|
|
db_conn=db_conn,
|
|
|
|
db_cur=db_cur,
|
|
|
|
zkhandler=zkhandler,
|
|
|
|
)
|
2023-11-16 19:21:36 -05:00
|
|
|
fail(
|
|
|
|
None,
|
|
|
|
f'Failed to remove volume "{benchmark_volume_name}" from pool "{pool}": {retmsg}',
|
|
|
|
)
|
2022-11-03 11:59:37 -04:00
|
|
|
else:
|
2023-11-16 19:21:36 -05:00
|
|
|
log_info(None, retmsg)
|
2022-11-03 11:59:37 -04:00
|
|
|
|
|
|
|
|
|
|
|
def run_benchmark_job(
|
|
|
|
test, pool, job_name=None, db_conn=None, db_cur=None, zkhandler=None
|
|
|
|
):
|
|
|
|
test_spec = test_matrix[test]
|
2023-11-16 19:21:36 -05:00
|
|
|
log_info(None, f"Running test '{test}'")
|
2022-11-03 11:59:37 -04:00
|
|
|
fio_cmd = """
|
|
|
|
fio \
|
|
|
|
--name={test} \
|
|
|
|
--ioengine=rbd \
|
|
|
|
--pool={pool} \
|
|
|
|
--rbdname={volume} \
|
|
|
|
--output-format=json \
|
|
|
|
--direct=1 \
|
|
|
|
--randrepeat=1 \
|
|
|
|
--numjobs=1 \
|
|
|
|
--time_based \
|
|
|
|
--runtime=75 \
|
|
|
|
--group_reporting \
|
|
|
|
--iodepth={iodepth} \
|
|
|
|
--bs={bs} \
|
|
|
|
--readwrite={rw}
|
|
|
|
""".format(
|
|
|
|
test=test,
|
|
|
|
pool=pool,
|
|
|
|
volume=benchmark_volume_name,
|
|
|
|
iodepth=test_spec["iodepth"],
|
|
|
|
bs=test_spec["bs"],
|
|
|
|
rw=test_spec["rw"],
|
|
|
|
)
|
|
|
|
|
2023-11-16 19:21:36 -05:00
|
|
|
log_info(None, "Running fio job: {}".format(" ".join(fio_cmd.split())))
|
2022-11-03 11:59:37 -04:00
|
|
|
retcode, stdout, stderr = pvc_common.run_os_command(fio_cmd)
|
2023-11-16 19:21:36 -05:00
|
|
|
try:
|
|
|
|
jstdout = loads(stdout)
|
|
|
|
if retcode:
|
|
|
|
raise
|
|
|
|
except Exception:
|
|
|
|
cleanup(
|
|
|
|
job_name,
|
2022-11-03 11:59:37 -04:00
|
|
|
db_conn=db_conn,
|
|
|
|
db_cur=db_cur,
|
|
|
|
zkhandler=zkhandler,
|
|
|
|
)
|
2023-11-16 19:21:36 -05:00
|
|
|
fail(
|
|
|
|
None,
|
|
|
|
f"Failed to run fio test '{test}': {stderr}",
|
|
|
|
)
|
2022-11-03 11:59:37 -04:00
|
|
|
|
2023-11-16 19:21:36 -05:00
|
|
|
return jstdout
|
2022-11-03 11:59:37 -04:00
|
|
|
|
|
|
|
|
2021-07-05 14:12:56 -04:00
|
|
|
def run_benchmark(self, pool):
|
2020-08-24 14:57:52 -04:00
|
|
|
# Phase 0 - connect to databases
|
2021-07-05 14:12:56 -04:00
|
|
|
try:
|
|
|
|
zkhandler = ZKHandler(config)
|
|
|
|
zkhandler.connect()
|
|
|
|
except Exception:
|
2023-11-16 19:21:36 -05:00
|
|
|
fail(
|
|
|
|
self,
|
|
|
|
"Failed to connect to Zookeeper",
|
|
|
|
)
|
2021-07-05 14:12:56 -04:00
|
|
|
|
2021-11-06 03:02:43 -04:00
|
|
|
cur_time = datetime.now().isoformat(timespec="seconds")
|
|
|
|
cur_primary = zkhandler.read("base.config.primary_node")
|
2023-11-16 19:21:36 -05:00
|
|
|
job_name = f"{cur_time}_{cur_primary}"
|
|
|
|
|
|
|
|
current_stage = 0
|
|
|
|
total_stages = 13
|
|
|
|
start(
|
|
|
|
self,
|
|
|
|
f"Running storage benchmark '{job_name}' on pool '{pool}'",
|
|
|
|
current=current_stage,
|
|
|
|
total=total_stages,
|
|
|
|
)
|
2021-09-28 09:58:22 -04:00
|
|
|
|
2023-11-16 19:21:36 -05:00
|
|
|
try:
|
|
|
|
db_conn, db_cur = open_database(config)
|
|
|
|
except Exception:
|
|
|
|
cleanup(
|
|
|
|
job_name,
|
|
|
|
db_conn=None,
|
|
|
|
db_cur=None,
|
|
|
|
zkhandler=zkhandler,
|
|
|
|
)
|
|
|
|
fail(
|
|
|
|
self,
|
|
|
|
"Failed to connect to Postgres",
|
|
|
|
)
|
|
|
|
|
|
|
|
current_stage += 1
|
|
|
|
update(
|
|
|
|
self,
|
|
|
|
"Storing running status in database",
|
|
|
|
current=current_stage,
|
|
|
|
total=total_stages,
|
|
|
|
)
|
2021-09-28 09:58:22 -04:00
|
|
|
|
2020-08-24 14:57:52 -04:00
|
|
|
try:
|
2021-10-02 01:49:47 -04:00
|
|
|
query = "INSERT INTO storage_benchmarks (job, test_format, result) VALUES (%s, %s, %s);"
|
2021-11-06 03:02:43 -04:00
|
|
|
args = (
|
|
|
|
job_name,
|
|
|
|
TEST_FORMAT,
|
|
|
|
"Running",
|
|
|
|
)
|
2020-08-24 14:57:52 -04:00
|
|
|
db_cur.execute(query, args)
|
|
|
|
db_conn.commit()
|
|
|
|
except Exception as e:
|
2023-11-16 19:21:36 -05:00
|
|
|
cleanup(
|
|
|
|
job_name,
|
2021-11-06 03:02:43 -04:00
|
|
|
db_conn=db_conn,
|
|
|
|
db_cur=db_cur,
|
|
|
|
zkhandler=zkhandler,
|
|
|
|
)
|
2023-11-16 19:21:36 -05:00
|
|
|
fail(self, f"Failed to store running status: {e}", exception=BenchmarkError)
|
|
|
|
|
|
|
|
current_stage += 1
|
|
|
|
update(
|
|
|
|
self,
|
|
|
|
"Creating benchmark volume",
|
|
|
|
current=current_stage,
|
|
|
|
total=total_stages,
|
2021-11-06 03:02:43 -04:00
|
|
|
)
|
2020-08-24 14:57:52 -04:00
|
|
|
|
2022-11-03 11:59:37 -04:00
|
|
|
prepare_benchmark_volume(
|
|
|
|
pool,
|
|
|
|
job_name=job_name,
|
|
|
|
db_conn=db_conn,
|
|
|
|
db_cur=db_cur,
|
|
|
|
zkhandler=zkhandler,
|
|
|
|
)
|
2020-08-24 14:57:52 -04:00
|
|
|
|
|
|
|
# Phase 2 - benchmark run
|
2021-10-02 01:36:27 -04:00
|
|
|
results = dict()
|
2020-08-24 14:57:52 -04:00
|
|
|
for test in test_matrix:
|
2023-11-16 19:21:36 -05:00
|
|
|
current_stage += 1
|
|
|
|
update(
|
|
|
|
self,
|
|
|
|
f"Running benchmark job '{test}'",
|
|
|
|
current=current_stage,
|
|
|
|
total=total_stages,
|
|
|
|
)
|
|
|
|
|
2022-11-03 11:59:37 -04:00
|
|
|
results[test] = run_benchmark_job(
|
|
|
|
test,
|
|
|
|
pool,
|
|
|
|
job_name=job_name,
|
|
|
|
db_conn=db_conn,
|
|
|
|
db_cur=db_cur,
|
|
|
|
zkhandler=zkhandler,
|
2021-11-06 03:02:43 -04:00
|
|
|
)
|
2020-08-24 14:57:52 -04:00
|
|
|
|
|
|
|
# Phase 3 - cleanup
|
2023-11-16 19:21:36 -05:00
|
|
|
current_stage += 1
|
|
|
|
update(
|
|
|
|
self,
|
|
|
|
"Cleaning up venchmark volume",
|
|
|
|
current=current_stage,
|
|
|
|
total=total_stages,
|
2021-11-06 03:02:43 -04:00
|
|
|
)
|
2020-08-24 14:57:52 -04:00
|
|
|
|
2022-11-03 11:59:37 -04:00
|
|
|
cleanup_benchmark_volume(
|
|
|
|
pool,
|
|
|
|
job_name=job_name,
|
|
|
|
db_conn=db_conn,
|
|
|
|
db_cur=db_cur,
|
|
|
|
zkhandler=zkhandler,
|
|
|
|
)
|
2020-08-24 14:57:52 -04:00
|
|
|
|
2023-11-16 19:21:36 -05:00
|
|
|
current_stage += 1
|
|
|
|
update(
|
|
|
|
self,
|
|
|
|
"Storing results in database",
|
|
|
|
current=current_stage,
|
|
|
|
total=total_stages,
|
|
|
|
)
|
|
|
|
|
2020-08-24 14:57:52 -04:00
|
|
|
try:
|
|
|
|
query = "UPDATE storage_benchmarks SET result = %s WHERE job = %s;"
|
2021-10-02 01:36:27 -04:00
|
|
|
args = (dumps(results), job_name)
|
2020-08-24 14:57:52 -04:00
|
|
|
db_cur.execute(query, args)
|
|
|
|
db_conn.commit()
|
|
|
|
except Exception as e:
|
2023-11-16 19:21:36 -05:00
|
|
|
cleanup(
|
|
|
|
job_name,
|
2021-11-06 03:02:43 -04:00
|
|
|
db_conn=db_conn,
|
|
|
|
db_cur=db_cur,
|
|
|
|
zkhandler=zkhandler,
|
|
|
|
)
|
2023-11-16 19:21:36 -05:00
|
|
|
fail(self, f"Failed to store test results: {e}", exception=BenchmarkError)
|
2020-08-24 14:57:52 -04:00
|
|
|
|
2023-11-16 19:21:36 -05:00
|
|
|
cleanup(
|
|
|
|
job_name,
|
|
|
|
db_conn=db_conn,
|
|
|
|
db_cur=db_cur,
|
|
|
|
zkhandler=zkhandler,
|
|
|
|
)
|
2021-07-05 14:12:56 -04:00
|
|
|
|
2023-11-16 19:21:36 -05:00
|
|
|
current_stage += 1
|
|
|
|
return finish(
|
|
|
|
self,
|
|
|
|
f"Storage benchmark {job_name} completed successfully.",
|
|
|
|
current=current_stage,
|
|
|
|
total=total_stages,
|
|
|
|
)
|