diff --git a/doc/usage.rst b/doc/usage.rst index 95e9c25d..eba41005 100644 --- a/doc/usage.rst +++ b/doc/usage.rst @@ -185,7 +185,7 @@ options:: -v, --verbose Print more output -O STYLE, --output_style STYLE What style the benchmark output should take. - Valid options are 'normal' and 'table'. + Valid options are 'normal', 'table', and 'table_github'. Default is normal. --csv CSV_FILE Name of a file the results will be written to, as a three-column CSV file containing minimum diff --git a/pyperformance/cli.py b/pyperformance/cli.py index df68dc3d..bde9a9eb 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -113,10 +113,10 @@ def parse_args(): cmd.add_argument("-v", "--verbose", action="store_true", help="Print more output") cmd.add_argument("-O", "--output_style", metavar="STYLE", - choices=("normal", "table"), + choices=("normal", "table", "table_github"), default="normal", help=("What style the benchmark output should take." - " Valid options are 'normal' and 'table'." + " Valid options are 'normal', 'table', and 'table_github'." " Default is normal.")) cmd.add_argument("--csv", metavar="CSV_FILE", action="store", default=None, diff --git a/pyperformance/compare.py b/pyperformance/compare.py index 1302733c..e3cf7a05 100644 --- a/pyperformance/compare.py +++ b/pyperformance/compare.py @@ -202,6 +202,29 @@ def format_table(base_label, changed_label, results): output.insert(2, "".join(header_sep_line)) return "\n".join(output) +def _format_github_row(items): + return "| " + " | ".join(items) + " |" + +def format_github_table(base_label, changed_label, results): + columns = ("Benchmark", base_label, changed_label, "Change", "Significance") + output = [_format_github_row(columns), "| --- " * len(columns) + "|"] + + for (bench_name, result) in results: + format_value = result.base.format_value + avg_base = result.base.mean() + avg_changed = result.changed.mean() + delta_avg = quantity_delta(result.base, result.changed) + msg = significant_msg(result.base, result.changed) + rows = (bench_name, + # Limit the precision for conciseness in the table. + format_value(avg_base), + format_value(avg_changed), + delta_avg, + msg) + output.append(_format_github_row(rows)) + + return "\n".join(output) + class BenchmarkResult(object): """An object representing data from a succesful benchmark run.""" @@ -355,6 +378,9 @@ def compare_results(options): elif options.output_style == "table": if shown: print(format_table(base_label, changed_label, shown)) + elif options.output_style == "table_github": + if shown: + print(format_github_table(base_label, changed_label, shown)) else: raise ValueError("Invalid output_style: %r" % options.output_style)