mirror of
https://github.com/bitcoin/bitcoin.git
synced 2025-01-25 10:43:19 -03:00
Merge bitcoin/bitcoin#25107: bench: Add --sanity-check
flag, use it in make check
4f31c21b7f
bench: Make all arguments -kebab-case (laanwj)652b54e532
bench: Add `--sanity-check` flag, use it in `make check` (laanwj) Pull request description: The benchmarks are run as part of `make check` for a crash-sanity check. The actual results are being ignored. So only run them for one iteration. This makes the `bench_bitcoin` part take 2m00 instead of 5m20 here. Which is still too long (imo), but this needs to be solved in the `WalletLoading*` benchmarks which take that long per iteration. Also change all `bench_bitcoin` arguments to kebab-case to be consistent with the other tools (in a separate commit). ACKs for top commit: jonatack: ACK4f31c21b7f
on the sanity-check version per `git diff c52a71e 4f31c28` (modulo s/--sanity check/--sanity-check/ in src/bench/bench.cpp::L61) hebasto: ACK4f31c21b7f
, tested on Ubuntu 22.04. Tree-SHA512: 2661d130fd82e57c9041755190997a4af588fadddcdd05e04fd024f75da1202480e9feab5764566e8dfe7930e8ae0ec71e93f40ac373274953d274072723980d
This commit is contained in:
commit
dd8a2df488
4 changed files with 19 additions and 9 deletions
|
@ -364,8 +364,8 @@ endif
|
|||
if TARGET_WINDOWS
|
||||
else
|
||||
if ENABLE_BENCH
|
||||
@echo "Running bench/bench_bitcoin ..."
|
||||
$(BENCH_BINARY) > /dev/null
|
||||
@echo "Running bench/bench_bitcoin (one iteration sanity check)..."
|
||||
$(BENCH_BINARY) --sanity-check > /dev/null
|
||||
endif
|
||||
endif
|
||||
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C secp256k1 check
|
||||
|
|
|
@ -57,6 +57,10 @@ void benchmark::BenchRunner::RunAll(const Args& args)
|
|||
std::regex reFilter(args.regex_filter);
|
||||
std::smatch baseMatch;
|
||||
|
||||
if (args.sanity_check) {
|
||||
std::cout << "Running with --sanity check option, benchmark results will be useless." << std::endl;
|
||||
}
|
||||
|
||||
std::vector<ankerl::nanobench::Result> benchmarkResults;
|
||||
for (const auto& p : benchmarks()) {
|
||||
if (!std::regex_match(p.first, baseMatch, reFilter)) {
|
||||
|
@ -69,6 +73,9 @@ void benchmark::BenchRunner::RunAll(const Args& args)
|
|||
}
|
||||
|
||||
Bench bench;
|
||||
if (args.sanity_check) {
|
||||
bench.epochs(1).epochIterations(1);
|
||||
}
|
||||
bench.name(p.first);
|
||||
if (args.min_time > 0ms) {
|
||||
// convert to nanos before dividing to reduce rounding errors
|
||||
|
|
|
@ -43,6 +43,7 @@ typedef std::function<void(Bench&)> BenchFunction;
|
|||
|
||||
struct Args {
|
||||
bool is_list_only;
|
||||
bool sanity_check;
|
||||
std::chrono::milliseconds min_time;
|
||||
std::vector<double> asymptote;
|
||||
fs::path output_csv;
|
||||
|
|
|
@ -26,9 +26,10 @@ static void SetupBenchArgs(ArgsManager& argsman)
|
|||
argsman.AddArg("-asymptote=<n1,n2,n3,...>", "Test asymptotic growth of the runtime of an algorithm, if supported by the benchmark", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
argsman.AddArg("-filter=<regex>", strprintf("Regular expression filter to select benchmark by name (default: %s)", DEFAULT_BENCH_FILTER), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
argsman.AddArg("-list", "List benchmarks without executing them", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
argsman.AddArg("-min_time=<milliseconds>", strprintf("Minimum runtime per benchmark, in milliseconds (default: %d)", DEFAULT_MIN_TIME_MS), ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_NEGATION, OptionsCategory::OPTIONS);
|
||||
argsman.AddArg("-output_csv=<output.csv>", "Generate CSV file with the most important benchmark results", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
argsman.AddArg("-output_json=<output.json>", "Generate JSON file with all benchmark results", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
argsman.AddArg("-min-time=<milliseconds>", strprintf("Minimum runtime per benchmark, in milliseconds (default: %d)", DEFAULT_MIN_TIME_MS), ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_NEGATION, OptionsCategory::OPTIONS);
|
||||
argsman.AddArg("-output-csv=<output.csv>", "Generate CSV file with the most important benchmark results", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
argsman.AddArg("-output-json=<output.json>", "Generate JSON file with all benchmark results", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
argsman.AddArg("-sanity-check", "Run benchmarks for only one iteration", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
|
||||
}
|
||||
|
||||
// parses a comma separated list like "10,20,30,50"
|
||||
|
@ -73,7 +74,7 @@ int main(int argc, char** argv)
|
|||
" sure each run has exactly the same preconditions.\n"
|
||||
"\n"
|
||||
" * If results are still not reliable, increase runtime with e.g.\n"
|
||||
" -min_time=5000 to let a benchmark run for at least 5 seconds.\n"
|
||||
" -min-time=5000 to let a benchmark run for at least 5 seconds.\n"
|
||||
"\n"
|
||||
" * bench_bitcoin uses nanobench [3] for which there is extensive\n"
|
||||
" documentation available online.\n"
|
||||
|
@ -108,10 +109,11 @@ int main(int argc, char** argv)
|
|||
benchmark::Args args;
|
||||
args.asymptote = parseAsymptote(argsman.GetArg("-asymptote", ""));
|
||||
args.is_list_only = argsman.GetBoolArg("-list", false);
|
||||
args.min_time = std::chrono::milliseconds(argsman.GetIntArg("-min_time", DEFAULT_MIN_TIME_MS));
|
||||
args.output_csv = argsman.GetPathArg("-output_csv");
|
||||
args.output_json = argsman.GetPathArg("-output_json");
|
||||
args.min_time = std::chrono::milliseconds(argsman.GetIntArg("-min-time", DEFAULT_MIN_TIME_MS));
|
||||
args.output_csv = argsman.GetPathArg("-output-csv");
|
||||
args.output_json = argsman.GetPathArg("-output-json");
|
||||
args.regex_filter = argsman.GetArg("-filter", DEFAULT_BENCH_FILTER);
|
||||
args.sanity_check = argsman.GetBoolArg("-sanity-check", false);
|
||||
|
||||
benchmark::BenchRunner::RunAll(args);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue