#![deny(missing_docs)]
#![deny(bare_trait_objects)]
#![deny(warnings)]
#![cfg_attr(feature = "real_blackbox", feature(test))]
#![cfg_attr(
feature = "cargo-clippy",
allow(
clippy::used_underscore_binding,
clippy::just_underscores_and_digits,
clippy::transmute_ptr_to_ptr
)
)]
#[cfg(test)]
#[macro_use]
extern crate approx;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
use clap::value_t;
use regex::Regex;
#[macro_use]
extern crate lazy_static;
use atty;
use criterion_plot;
#[cfg(feature = "real_blackbox")]
extern crate test;
#[macro_use]
extern crate serde_derive;
#[macro_use]
mod macros_private;
#[macro_use]
mod analysis;
mod benchmark;
#[macro_use]
mod benchmark_group;
mod csv_report;
mod error;
mod estimate;
mod format;
mod fs;
mod html;
mod kde;
mod macros;
pub mod measurement;
mod plot;
pub mod profiler;
mod report;
mod routine;
mod stats;
use std::cell::RefCell;
use std::collections::{BTreeMap, HashSet};
use std::default::Default;
use std::fmt;
use std::iter::IntoIterator;
use std::marker::PhantomData;
use std::time::Duration;
use std::time::Instant;
use criterion_plot::{Version, VersionError};
use crate::benchmark::BenchmarkConfig;
use crate::benchmark::NamedRoutine;
use crate::csv_report::FileCsvReport;
use crate::estimate::{Distributions, Estimates, Statistic};
use crate::html::Html;
use crate::measurement::{Measurement, WallTime};
use crate::plot::{Gnuplot, Plotter, PlottersBackend};
use crate::profiler::{ExternalProfiler, Profiler};
use crate::report::{CliReport, Report, ReportContext, Reports};
use crate::routine::Function;
pub use crate::benchmark::{Benchmark, BenchmarkDefinition, ParameterizedBenchmark};
pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
lazy_static! {
static ref DEBUG_ENABLED: bool = { std::env::vars().any(|(key, _)| key == "CRITERION_DEBUG") };
static ref GNUPLOT_VERSION: Result<Version, VersionError> = { criterion_plot::version() };
static ref DEFAULT_PLOTTING_BACKEND: PlottingBackend = {
match &*GNUPLOT_VERSION {
Ok(_) => PlottingBackend::Gnuplot,
Err(e) => {
match e {
VersionError::Exec(_) => println!("Gnuplot not found, using plotters backend"),
e => println!(
"Gnuplot not found or not usable, using plotters backend\n{}",
e
),
};
PlottingBackend::Plotters
}
}
};
}
fn debug_enabled() -> bool {
*DEBUG_ENABLED
}
#[cfg(feature = "real_blackbox")]
pub fn black_box<T>(dummy: T) -> T {
test::black_box(dummy)
}
#[cfg(not(feature = "real_blackbox"))]
pub fn black_box<T>(dummy: T) -> T {
unsafe {
let ret = std::ptr::read_volatile(&dummy);
std::mem::forget(dummy);
ret
}
}
#[doc(hidden)]
pub struct Fun<I: fmt::Debug, M: Measurement + 'static = WallTime> {
f: NamedRoutine<I, M>,
_phantom: PhantomData<M>,
}
impl<I, M: Measurement> Fun<I, M>
where
I: fmt::Debug + 'static,
{
pub fn new<F>(name: &str, f: F) -> Fun<I, M>
where
F: FnMut(&mut Bencher<'_, M>, &I) + 'static,
{
let routine = NamedRoutine {
id: name.to_owned(),
f: Box::new(RefCell::new(Function::new(f))),
};
Fun {
f: routine,
_phantom: PhantomData,
}
}
}
#[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
pub enum BatchSize {
SmallInput,
LargeInput,
PerIteration,
NumBatches(u64),
NumIterations(u64),
#[doc(hidden)]
__NonExhaustive,
}
impl BatchSize {
fn iters_per_batch(self, iters: u64) -> u64 {
match self {
BatchSize::SmallInput => (iters + 10 - 1) / 10,
BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
BatchSize::PerIteration => 1,
BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
BatchSize::NumIterations(size) => size,
BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
}
}
}
pub struct Bencher<'a, M: Measurement = WallTime> {
iterated: bool, iters: u64, value: M::Value, measurement: &'a M, elapsed_time: Duration, }
impl<'a, M: Measurement> Bencher<'a, M> {
#[inline(never)]
pub fn iter<O, R>(&mut self, mut routine: R)
where
R: FnMut() -> O,
{
self.iterated = true;
let time_start = Instant::now();
let start = self.measurement.start();
for _ in 0..self.iters {
black_box(routine());
}
self.value = self.measurement.end(start);
self.elapsed_time = time_start.elapsed();
}
#[inline(never)]
pub fn iter_custom<R>(&mut self, mut routine: R)
where
R: FnMut(u64) -> M::Value,
{
self.iterated = true;
let time_start = Instant::now();
self.value = routine(self.iters);
self.elapsed_time = time_start.elapsed();
}
#[doc(hidden)]
pub fn iter_with_setup<I, O, S, R>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
self.iter_batched(setup, routine, BatchSize::PerIteration);
}
pub fn iter_with_large_drop<O, R>(&mut self, mut routine: R)
where
R: FnMut() -> O,
{
self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput);
}
#[doc(hidden)]
pub fn iter_with_large_setup<I, O, S, R>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
self.iter_batched(setup, routine, BatchSize::NumBatches(1));
}
#[inline(never)]
pub fn iter_batched<I, O, S, R>(&mut self, mut setup: S, mut routine: R, size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
self.iterated = true;
let batch_size = size.iters_per_batch(self.iters);
assert!(batch_size != 0, "Batch size must not be zero.");
let time_start = Instant::now();
self.value = self.measurement.zero();
if batch_size == 1 {
for _ in 0..self.iters {
let input = black_box(setup());
let start = self.measurement.start();
let output = routine(input);
let end = self.measurement.end(start);
self.value = self.measurement.add(&self.value, &end);
drop(black_box(output));
}
} else {
let mut iteration_counter = 0;
while iteration_counter < self.iters {
let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter);
let inputs = black_box((0..batch_size).map(|_| setup()).collect::<Vec<_>>());
let mut outputs = Vec::with_capacity(batch_size as usize);
let start = self.measurement.start();
outputs.extend(inputs.into_iter().map(&mut routine));
let end = self.measurement.end(start);
self.value = self.measurement.add(&self.value, &end);
black_box(outputs);
iteration_counter += batch_size;
}
}
self.elapsed_time = time_start.elapsed();
}
#[inline(never)]
pub fn iter_batched_ref<I, O, S, R>(&mut self, mut setup: S, mut routine: R, size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(&mut I) -> O,
{
self.iterated = true;
let batch_size = size.iters_per_batch(self.iters);
assert!(batch_size != 0, "Batch size must not be zero.");
let time_start = Instant::now();
self.value = self.measurement.zero();
if batch_size == 1 {
for _ in 0..self.iters {
let mut input = black_box(setup());
let start = self.measurement.start();
let output = routine(&mut input);
let end = self.measurement.end(start);
self.value = self.measurement.add(&self.value, &end);
drop(black_box(output));
drop(black_box(input));
}
} else {
let mut iteration_counter = 0;
while iteration_counter < self.iters {
let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter);
let mut inputs = black_box((0..batch_size).map(|_| setup()).collect::<Vec<_>>());
let mut outputs = Vec::with_capacity(batch_size as usize);
let start = self.measurement.start();
outputs.extend(inputs.iter_mut().map(&mut routine));
let end = self.measurement.end(start);
self.value = self.measurement.add(&self.value, &end);
black_box(outputs);
iteration_counter += batch_size;
}
}
self.elapsed_time = time_start.elapsed();
}
fn assert_iterated(&mut self) {
if !self.iterated {
panic!("Benchmark function must call Bencher::iter or related method.");
}
self.iterated = false;
}
}
#[derive(Debug, Clone, Copy)]
pub enum Baseline {
Compare,
Save,
}
#[derive(Debug, Clone, Copy)]
pub enum PlottingBackend {
Gnuplot,
Plotters,
}
pub struct Criterion<M: Measurement = WallTime> {
config: BenchmarkConfig,
plotting_backend: PlottingBackend,
plotting_enabled: bool,
filter: Option<Regex>,
report: Box<dyn Report>,
output_directory: String,
baseline_directory: String,
baseline: Baseline,
profile_time: Option<Duration>,
load_baseline: Option<String>,
test_mode: bool,
list_mode: bool,
all_directories: HashSet<String>,
all_titles: HashSet<String>,
measurement: M,
profiler: Box<RefCell<dyn Profiler>>,
}
impl Default for Criterion {
fn default() -> Criterion {
let mut reports: Vec<Box<dyn Report>> = vec![];
reports.push(Box::new(CliReport::new(false, false, false)));
reports.push(Box::new(FileCsvReport));
let output_directory =
match std::env::vars().find(|&(ref key, _)| key == "CARGO_TARGET_DIR") {
Some((_, value)) => format!("{}/criterion", value),
None => "target/criterion".to_owned(),
};
Criterion {
config: BenchmarkConfig {
confidence_level: 0.95,
measurement_time: Duration::new(5, 0),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
warm_up_time: Duration::new(3, 0),
},
plotting_backend: *DEFAULT_PLOTTING_BACKEND,
plotting_enabled: true,
filter: None,
report: Box::new(Reports::new(reports)),
baseline_directory: "base".to_owned(),
baseline: Baseline::Save,
profile_time: None,
load_baseline: None,
test_mode: false,
list_mode: false,
output_directory,
all_directories: HashSet::new(),
all_titles: HashSet::new(),
measurement: WallTime,
profiler: Box::new(RefCell::new(ExternalProfiler)),
}
}
}
impl<M: Measurement> Criterion<M> {
pub fn with_measurement<M2: Measurement>(self, m: M2) -> Criterion<M2> {
Criterion {
config: self.config,
plotting_backend: self.plotting_backend,
plotting_enabled: self.plotting_enabled,
filter: self.filter,
report: self.report,
baseline_directory: self.baseline_directory,
baseline: self.baseline,
profile_time: self.profile_time,
load_baseline: self.load_baseline,
test_mode: self.test_mode,
list_mode: self.list_mode,
output_directory: self.output_directory,
all_directories: self.all_directories,
all_titles: self.all_titles,
measurement: m,
profiler: self.profiler,
}
}
pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
Criterion {
profiler: Box::new(RefCell::new(p)),
..self
}
}
pub fn plotting_backend(self, backend: PlottingBackend) -> Criterion<M> {
if let PlottingBackend::Gnuplot = backend {
if GNUPLOT_VERSION.is_err() {
panic!("Gnuplot plotting backend was requested, but gnuplot is not available. To continue, either install Gnuplot or allow Criterion.rs to fall back to using plotters.");
}
}
Criterion {
plotting_backend: backend,
..self
}
}
pub fn sample_size(mut self, n: usize) -> Criterion<M> {
assert!(n >= 10);
self.config.sample_size = n;
self
}
pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
assert!(dur.to_nanos() > 0);
self.config.warm_up_time = dur;
self
}
pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
assert!(dur.to_nanos() > 0);
self.config.measurement_time = dur;
self
}
pub fn nresamples(mut self, n: usize) -> Criterion<M> {
assert!(n > 0);
if n <= 1000 {
println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.config.nresamples = n;
self
}
pub fn noise_threshold(mut self, threshold: f64) -> Criterion<M> {
assert!(threshold >= 0.0);
self.config.noise_threshold = threshold;
self
}
pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
assert!(cl > 0.0 && cl < 1.0);
self.config.confidence_level = cl;
self
}
pub fn significance_level(mut self, sl: f64) -> Criterion<M> {
assert!(sl > 0.0 && sl < 1.0);
self.config.significance_level = sl;
self
}
fn create_plotter(&self) -> Box<dyn Plotter> {
match self.plotting_backend {
PlottingBackend::Gnuplot => Box::new(Gnuplot::default()),
PlottingBackend::Plotters => Box::new(PlottersBackend::default()),
}
}
pub fn with_plots(mut self) -> Criterion<M> {
self.plotting_enabled = true;
let mut reports: Vec<Box<dyn Report>> = vec![];
reports.push(Box::new(CliReport::new(false, false, false)));
reports.push(Box::new(FileCsvReport));
reports.push(Box::new(Html::new(self.create_plotter())));
self.report = Box::new(Reports::new(reports));
self
}
pub fn without_plots(mut self) -> Criterion<M> {
self.plotting_enabled = false;
let mut reports: Vec<Box<dyn Report>> = vec![];
reports.push(Box::new(CliReport::new(false, false, false)));
reports.push(Box::new(FileCsvReport));
self.report = Box::new(Reports::new(reports));
self
}
pub fn can_plot(&self) -> bool {
true
}
pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
self.baseline_directory = baseline;
self.baseline = Baseline::Save;
self
}
pub fn retain_baseline(mut self, baseline: String) -> Criterion<M> {
self.baseline_directory = baseline;
self.baseline = Baseline::Compare;
self
}
pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
let filter_text = filter.into();
let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
panic!(
"Unable to parse '{}' as a regular expression: {}",
filter_text, err
)
});
self.filter = Some(filter);
self
}
#[doc(hidden)]
pub fn output_directory(mut self, path: &std::path::Path) -> Criterion<M> {
self.output_directory = path.to_string_lossy().into_owned();
self
}
#[doc(hidden)]
pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
self.profile_time = profile_time;
self
}
#[doc(hidden)]
pub fn final_summary(&self) {
if self.profile_time.is_some() || self.test_mode {
return;
}
let report_context = ReportContext {
output_directory: self.output_directory.clone(),
plot_config: PlotConfiguration::default(),
test_mode: self.test_mode,
};
self.report.final_summary(&report_context);
}
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))]
pub fn configure_from_args(mut self) -> Criterion<M> {
use clap::{App, Arg};
let matches = App::new("Criterion Benchmark")
.arg(Arg::with_name("FILTER")
.help("Skip benchmarks whose names do not contain FILTER.")
.index(1))
.arg(Arg::with_name("color")
.short("c")
.long("color")
.alias("colour")
.takes_value(true)
.possible_values(&["auto", "always", "never"])
.default_value("auto")
.help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
.arg(Arg::with_name("verbose")
.short("v")
.long("verbose")
.help("Print additional statistical information."))
.arg(Arg::with_name("noplot")
.short("n")
.long("noplot")
.help("Disable plot and HTML generation."))
.arg(Arg::with_name("save-baseline")
.short("s")
.long("save-baseline")
.default_value("base")
.help("Save results under a named baseline."))
.arg(Arg::with_name("baseline")
.short("b")
.long("baseline")
.takes_value(true)
.conflicts_with("save-baseline")
.help("Compare to a named baseline."))
.arg(Arg::with_name("list")
.long("list")
.help("List all benchmarks"))
.arg(Arg::with_name("profile-time")
.long("profile-time")
.takes_value(true)
.help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler."))
.arg(Arg::with_name("load-baseline")
.long("load-baseline")
.takes_value(true)
.conflicts_with("profile-time")
.requires("baseline")
.help("Load a previous baseline instead of sampling new data."))
.arg(Arg::with_name("sample-size")
.long("sample-size")
.takes_value(true)
.help("Changes the default size of the sample for this run."))
.arg(Arg::with_name("warm-up-time")
.long("warm-up-time")
.takes_value(true)
.help("Changes the default warm up time for this run."))
.arg(Arg::with_name("measurement-time")
.long("measurement-time")
.takes_value(true)
.help("Changes the default measurement time for this run."))
.arg(Arg::with_name("nresamples")
.long("nresamples")
.takes_value(true)
.help("Changes the default number of resamples for this run."))
.arg(Arg::with_name("noise-threshold")
.long("noise-threshold")
.takes_value(true)
.help("Changes the default noise threshold for this run."))
.arg(Arg::with_name("confidence-level")
.long("confidence-level")
.takes_value(true)
.help("Changes the default confidence level for this run."))
.arg(Arg::with_name("significance-level")
.long("significance-level")
.takes_value(true)
.help("Changes the default significance level for this run."))
.arg(Arg::with_name("test")
.hidden(true)
.long("test")
.help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results."))
.arg(Arg::with_name("bench")
.hidden(true)
.long("bench"))
.arg(Arg::with_name("plotting-backend")
.long("plotting-backend")
.takes_value(true)
.possible_values(&["gnuplot", "plotters"])
.help("Set the plotting backend. By default, Criterion will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
.arg(Arg::with_name("version")
.hidden(true)
.short("V")
.long("version"))
.after_help("
This executable is a Criterion.rs benchmark.
See https://github.com/bheisler/criterion.rs for more details.
To enable debug output, define the environment variable CRITERION_DEBUG.
Criterion.rs will output more debug information and will save the gnuplot
scripts alongside the generated plots.
To test that the benchmarks work, run `cargo test --benches`
")
.get_matches();
if let Some(filter) = matches.value_of("FILTER") {
self = self.with_filter(filter);
}
let verbose = matches.is_present("verbose");
let stdout_isatty = atty::is(atty::Stream::Stdout);
let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
let enable_text_coloring;
match matches.value_of("color") {
Some("always") => {
enable_text_coloring = true;
}
Some("never") => {
enable_text_coloring = false;
enable_text_overwrite = false;
}
_ => enable_text_coloring = stdout_isatty,
}
match matches.value_of("plotting-backend") {
Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
Some(val) => panic!("Unexpected plotting backend '{}'", val),
None => {}
}
if matches.is_present("noplot") || matches.is_present("test") {
self = self.without_plots();
} else {
self = self.with_plots();
}
if let Some(dir) = matches.value_of("save-baseline") {
self.baseline = Baseline::Save;
self.baseline_directory = dir.to_owned()
}
if let Some(dir) = matches.value_of("baseline") {
self.baseline = Baseline::Compare;
self.baseline_directory = dir.to_owned();
}
let mut reports: Vec<Box<dyn Report>> = vec![];
reports.push(Box::new(CliReport::new(
enable_text_overwrite,
enable_text_coloring,
verbose,
)));
reports.push(Box::new(FileCsvReport));
if matches.is_present("profile-time") {
let num_seconds = value_t!(matches.value_of("profile-time"), u64).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
if num_seconds < 1 {
println!("Profile time must be at least one second.");
std::process::exit(1);
}
self.profile_time = Some(Duration::from_secs(num_seconds));
}
if let Some(dir) = matches.value_of("load-baseline") {
self.load_baseline = Some(dir.to_owned());
}
let bench = matches.is_present("bench");
let test = matches.is_present("test");
self.test_mode = match (bench, test) {
(true, true) => true, (true, false) => false, (false, _) => true, };
if matches.is_present("sample-size") {
let num_size = value_t!(matches.value_of("sample-size"), usize).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
assert!(num_size >= 10);
self.config.sample_size = num_size;
}
if matches.is_present("warm-up-time") {
let num_seconds = value_t!(matches.value_of("warm-up-time"), u64).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
let dur = std::time::Duration::new(num_seconds, 0);
assert!(dur.to_nanos() > 0);
self.config.warm_up_time = dur;
}
if matches.is_present("measurement-time") {
let num_seconds =
value_t!(matches.value_of("measurement-time"), u64).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
let dur = std::time::Duration::new(num_seconds, 0);
assert!(dur.to_nanos() > 0);
self.config.measurement_time = dur;
}
if matches.is_present("nresamples") {
let num_resamples =
value_t!(matches.value_of("nresamples"), usize).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
assert!(num_resamples > 0);
self.config.nresamples = num_resamples;
}
if matches.is_present("noise-threshold") {
let num_noise_threshold = value_t!(matches.value_of("noise-threshold"), f64)
.unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
assert!(num_noise_threshold > 0.0);
self.config.noise_threshold = num_noise_threshold;
}
if matches.is_present("confidence-level") {
let num_confidence_level = value_t!(matches.value_of("confidence-level"), f64)
.unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
self.config.confidence_level = num_confidence_level;
}
if matches.is_present("significance-level") {
let num_significance_level = value_t!(matches.value_of("significance-level"), f64)
.unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
self.config.significance_level = num_significance_level;
}
if matches.is_present("list") {
self.test_mode = true;
self.list_mode = true;
}
if self.profile_time.is_none() && self.plotting_enabled {
reports.push(Box::new(Html::new(self.create_plotter())));
}
self.report = Box::new(Reports::new(reports));
self
}
fn filter_matches(&self, id: &str) -> bool {
match self.filter {
Some(ref regex) => regex.is_match(id),
None => true,
}
}
pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
BenchmarkGroup::new(self, group_name.into())
}
}
impl<M> Criterion<M>
where
M: Measurement + 'static,
{
pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion<M>
where
F: FnMut(&mut Bencher<'_, M>),
{
self.benchmark_group(id)
.bench_function(BenchmarkId::no_function(), f);
self
}
pub fn bench_with_input<F, I>(&mut self, id: BenchmarkId, input: &I, f: F) -> &mut Criterion<M>
where
F: FnMut(&mut Bencher<'_, M>, &I),
{
let group_name = id.function_name.unwrap();
let parameter = id.parameter.unwrap();
self.benchmark_group(group_name).bench_with_input(
BenchmarkId::no_function_with_input(parameter),
input,
f,
);
self
}
#[doc(hidden)] pub fn bench_function_over_inputs<I, F>(
&mut self,
id: &str,
f: F,
inputs: I,
) -> &mut Criterion<M>
where
I: IntoIterator,
I::Item: fmt::Debug + 'static,
F: FnMut(&mut Bencher<'_, M>, &I::Item) + 'static,
{
self.bench(id, ParameterizedBenchmark::new(id, f, inputs))
}
#[doc(hidden)] pub fn bench_functions<I>(
&mut self,
id: &str,
funs: Vec<Fun<I, M>>,
input: I,
) -> &mut Criterion<M>
where
I: fmt::Debug + 'static,
{
let benchmark = ParameterizedBenchmark::with_functions(
funs.into_iter().map(|fun| fun.f).collect(),
vec![input],
);
self.bench(id, benchmark)
}
#[doc(hidden)] pub fn bench<B: BenchmarkDefinition<M>>(
&mut self,
group_id: &str,
benchmark: B,
) -> &mut Criterion<M> {
benchmark.run(group_id, self);
self
}
}
trait DurationExt {
fn to_nanos(&self) -> u64;
}
const NANOS_PER_SEC: u64 = 1_000_000_000;
impl DurationExt for Duration {
fn to_nanos(&self) -> u64 {
self.as_secs() * NANOS_PER_SEC + u64::from(self.subsec_nanos())
}
}
#[derive(Clone, Copy, PartialEq, Deserialize, Serialize, Debug)]
struct ConfidenceInterval {
confidence_level: f64,
lower_bound: f64,
upper_bound: f64,
}
#[derive(Clone, Copy, PartialEq, Deserialize, Serialize, Debug)]
struct Estimate {
confidence_interval: ConfidenceInterval,
point_estimate: f64,
standard_error: f64,
}
fn build_estimates(
distributions: &Distributions,
points: &BTreeMap<Statistic, f64>,
cl: f64,
) -> Estimates {
distributions
.iter()
.map(|(&statistic, distribution)| {
let point_estimate = points[&statistic];
let (lb, ub) = distribution.confidence_interval(cl);
(
statistic,
Estimate {
confidence_interval: ConfidenceInterval {
confidence_level: cl,
lower_bound: lb,
upper_bound: ub,
},
point_estimate,
standard_error: distribution.std_dev(None),
},
)
})
.collect()
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum Throughput {
Bytes(u64),
Elements(u64),
}
#[derive(Debug, Clone, Copy)]
pub enum AxisScale {
Linear,
Logarithmic,
}
#[derive(Debug, Clone)]
pub struct PlotConfiguration {
summary_scale: AxisScale,
}
impl Default for PlotConfiguration {
fn default() -> PlotConfiguration {
PlotConfiguration {
summary_scale: AxisScale::Linear,
}
}
}
impl PlotConfiguration {
pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
self.summary_scale = new_scale;
self
}
}
#[doc(hidden)]
pub fn runner(benches: &[&dyn Fn()]) {
for bench in benches {
bench();
}
Criterion::default().configure_from_args().final_summary();
}