Revamp benchmark tests

1. Move to a time-based (60s) benchmark to avoid these taking an absurd
amount of time to show the same information.

2. Eliminate the 256k random benchmarks, since they don't really add
anything.

3. Add in a 4k single-queue benchmark as this might provide valuable
insight into latency.

4. Adjust the output to reflect the above changes.

While this does change the benchmarking, this should not invalidate any
existing benchmarks since most of the test suit is unchanged (especially
the most important 4M sequential and 4K random tests). It simply removes
an unused entry and adds a more helpful one. The time-based change
should not significantly affect the results either, just reduces the
total runtime for long-tests and increase the runtime for quick tests to
provide a better picture.
This commit is contained in:
Joshua Boniface 2021-09-28 13:26:09 -04:00
parent f39b041471
commit 252175fb6f
3 changed files with 45 additions and 21 deletions

View File

@ -171,66 +171,77 @@ def run_benchmark(self, pool):
test_matrix = { test_matrix = {
'seq_read': { 'seq_read': {
'direction': 'read', 'direction': 'read',
'iodepth': '64',
'bs': '4M', 'bs': '4M',
'rw': 'read' 'rw': 'read'
}, },
'seq_write': { 'seq_write': {
'direction': 'write', 'direction': 'write',
'iodepth': '64',
'bs': '4M', 'bs': '4M',
'rw': 'write' 'rw': 'write'
}, },
'rand_read_4M': { 'rand_read_4M': {
'direction': 'read', 'direction': 'read',
'iodepth': '64',
'bs': '4M', 'bs': '4M',
'rw': 'randread' 'rw': 'randread'
}, },
'rand_write_4M': { 'rand_write_4M': {
'direction': 'write', 'direction': 'write',
'iodepth': '64',
'bs': '4M', 'bs': '4M',
'rw': 'randwrite' 'rw': 'randwrite'
}, },
'rand_read_256K': {
'direction': 'read',
'bs': '256K',
'rw': 'randread'
},
'rand_write_256K': {
'direction': 'write',
'bs': '256K',
'rw': 'randwrite'
},
'rand_read_4K': { 'rand_read_4K': {
'direction': 'read', 'direction': 'read',
'iodepth': '64',
'bs': '4K', 'bs': '4K',
'rw': 'randread' 'rw': 'randread'
}, },
'rand_write_4K': { 'rand_write_4K': {
'direction': 'write', 'direction': 'write',
'iodepth': '64',
'bs': '4K', 'bs': '4K',
'rw': 'randwrite' 'rw': 'randwrite'
} },
'rand_read_4K_lowdepth': {
'direction': 'read',
'iodepth': '1',
'bs': '4K',
'rw': 'randread'
},
'rand_write_4K_lowdepth': {
'direction': 'write',
'iodepth': '1',
'bs': '4K',
'rw': 'randwrite'
},
} }
parsed_results = dict() parsed_results = dict()
for test in test_matrix: for test in test_matrix:
print("Running test '{}'".format(test)) print("Running test '{}'".format(test))
fio_cmd = """ fio_cmd = """
fio \ fio \
--output-format=terse \ --name={test} \
--terse-version=5 \
--ioengine=rbd \ --ioengine=rbd \
--pool={pool} \ --pool={pool} \
--rbdname={volume} \ --rbdname={volume} \
--output-format=terse \
--terse-version=5 \
--direct=1 \ --direct=1 \
--randrepeat=1 \ --randrepeat=1 \
--iodepth=64 \ --iodepth={iodepth} \
--size=8G \ --numjobs=1 \
--name={test} \ --time_based \
--runtime=60 \
--bs={bs} \ --bs={bs} \
--readwrite={rw} --readwrite={rw}
""".format( """.format(
test=test,
pool=pool, pool=pool,
volume=volume, volume=volume,
test=test, iodepth=test_matrix[test]['iodepth'],
bs=test_matrix[test]['bs'], bs=test_matrix[test]['bs'],
rw=test_matrix[test]['rw']) rw=test_matrix[test]['rw'])

View File

@ -1606,10 +1606,10 @@ def format_info_benchmark(config, benchmark_information):
"seq_write": "Sequential Write (4M blocks)", "seq_write": "Sequential Write (4M blocks)",
"rand_read_4M": "Random Read (4M blocks)", "rand_read_4M": "Random Read (4M blocks)",
"rand_write_4M": "Random Write (4M blocks)", "rand_write_4M": "Random Write (4M blocks)",
"rand_read_256K": "Random Read (256K blocks)",
"rand_write_256K": "Random Write (256K blocks)",
"rand_read_4K": "Random Read (4K blocks)", "rand_read_4K": "Random Read (4K blocks)",
"rand_write_4K": "Random Write (4K blocks)" "rand_write_4K": "Random Write (4K blocks)",
"rand_read_4K_lowdepth": "Random Read (4K blocks, single-queue)",
"rand_write_4K_lowdepth": "Random Write (4K blocks, single-queue)",
} }
test_name_length = 30 test_name_length = 30
@ -1622,7 +1622,16 @@ def format_info_benchmark(config, benchmark_information):
cpuutil_label_length = 11 cpuutil_label_length = 11
cpuutil_column_length = 9 cpuutil_column_length = 9
# Work around old results that did not have these tests
if 'rand_read_4K_lowdepth' not in benchmark_details:
del nice_test_name_map['rand_read_4K_lowdepth']
del nice_test_name_map['rand_write_4K_lowdepth']
for test in benchmark_details: for test in benchmark_details:
# Work around old results that had these obsolete tests
if test == 'rand_read_256K' or test == 'rand_write_256K':
continue
_test_name_length = len(nice_test_name_map[test]) _test_name_length = len(nice_test_name_map[test])
if _test_name_length > test_name_length: if _test_name_length > test_name_length:
test_name_length = _test_name_length test_name_length = _test_name_length
@ -1659,6 +1668,10 @@ def format_info_benchmark(config, benchmark_information):
cpuutil_column_length = _element_length cpuutil_column_length = _element_length
for test in benchmark_details: for test in benchmark_details:
# Work around old results that had these obsolete tests
if test == 'rand_read_256K' or test == 'rand_write_256K':
continue
ainformation.append('') ainformation.append('')
test_details = benchmark_details[test] test_details = benchmark_details[test]

View File

@ -2526,7 +2526,7 @@ def ceph_benchmark_run(pool):
Run a storage benchmark on POOL in the background. Run a storage benchmark on POOL in the background.
""" """
try: try:
click.confirm('NOTE: Storage benchmarks generate significant load on the cluster and can take a very long time to complete on slow storage. They should be run sparingly. Continue', prompt_suffix='? ', abort=True) click.confirm('NOTE: Storage benchmarks take approximately 8 minutes to run and generate significant load on the storage cluster; they should be run sparingly. Continue', prompt_suffix='? ', abort=True)
except Exception: except Exception:
exit(0) exit(0)