mirror of
git://sourceware.org/git/glibc.git
synced 2025-01-30 12:31:53 +08:00
benchtests: improve argument parsing through argparse library
The argparse library is used on compare_bench script to improve command line argument parsing. The 'schema validation file' is now optional, reducing by one the number of required parameters. * benchtests/scripts/compare_bench.py (__main__): use the argparse library to improve command line parsing. (__main__): make schema file as optional parameter (--schema), defaulting to benchtests/scripts/benchout.schema.json. (main): move out of the parsing stuff to __main_ and leave it only as caller of main comparison functions.
This commit is contained in:
parent
e84bd8514c
commit
1cf4ae7fe6
@ -1,3 +1,12 @@
|
||||
2018-07-19 Leonardo Sandoval <leonardo.sandoval.gonzalez@intel.com>
|
||||
|
||||
* benchtests/scripts/compare_bench.py (__main__): use the argparse
|
||||
library to improve command line parsing.
|
||||
(__main__): make schema file as optional parameter (--schema),
|
||||
defaulting to benchtests/scripts/benchout.schema.json.
|
||||
(main): move out of the parsing stuff to __main_ and leave it
|
||||
only as caller of main comparison functions.
|
||||
|
||||
2018-07-19 H.J. Lu <hongjiu.lu@intel.com>
|
||||
|
||||
* NEWS: Add a note for Intel CET status.
|
||||
|
@ -25,6 +25,7 @@ import sys
|
||||
import os
|
||||
import pylab
|
||||
import import_bench as bench
|
||||
import argparse
|
||||
|
||||
def do_compare(func, var, tl1, tl2, par, threshold):
|
||||
"""Compare one of the aggregate measurements
|
||||
@ -151,26 +152,9 @@ def plot_graphs(bench1, bench2):
|
||||
print('Writing out %s' % filename)
|
||||
pylab.savefig(filename)
|
||||
|
||||
|
||||
def main(args):
|
||||
"""Program Entry Point
|
||||
|
||||
Take two benchmark output files and compare their timings.
|
||||
"""
|
||||
if len(args) > 4 or len(args) < 3:
|
||||
print('Usage: %s <schema> <file1> <file2> [threshold in %%]' % sys.argv[0])
|
||||
sys.exit(os.EX_USAGE)
|
||||
|
||||
bench1 = bench.parse_bench(args[1], args[0])
|
||||
bench2 = bench.parse_bench(args[2], args[0])
|
||||
if len(args) == 4:
|
||||
threshold = float(args[3])
|
||||
else:
|
||||
threshold = 10.0
|
||||
|
||||
if (bench1['timing_type'] != bench2['timing_type']):
|
||||
print('Cannot compare benchmark outputs: timing types are different')
|
||||
return
|
||||
def main(bench1, bench2, schema, threshold):
|
||||
bench1 = bench.parse_bench(bench1, schema)
|
||||
bench2 = bench.parse_bench(bench2, schema)
|
||||
|
||||
plot_graphs(bench1, bench2)
|
||||
|
||||
@ -181,4 +165,18 @@ def main(args):
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1:])
|
||||
parser = argparse.ArgumentParser(description='Take two benchmark and compare their timings.')
|
||||
|
||||
# Required parameters
|
||||
parser.add_argument('bench1', help='First bench to compare')
|
||||
parser.add_argument('bench2', help='Second bench to compare')
|
||||
|
||||
# Optional parameters
|
||||
parser.add_argument('--schema',
|
||||
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),'benchout.schema.json'),
|
||||
help='JSON file to validate source/dest files (default: %(default)s)')
|
||||
parser.add_argument('--threshold', default=10.0, help='Only print those with equal or higher threshold (default: %(default)s)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
main(args.bench1, args.bench2, args.schema, args.threshold)
|
||||
|
Loading…
Reference in New Issue
Block a user