#![warn(missing_docs)]
#![warn(bare_trait_objects)]
#![cfg_attr(feature = "real_blackbox", feature(test))]
#![cfg_attr(
feature = "cargo-clippy",
allow(
clippy::just_underscores_and_digits, // Used in the stats code
clippy::transmute_ptr_to_ptr, // Used in the stats code
clippy::option_as_ref_deref, // Remove when MSRV bumped above 1.40
clippy::manual_non_exhaustive, // Remove when MSRV bumped above 1.40
clippy::match_like_matches_macro, // Remove when MSRV bumped above 1.42
)
)]
#[cfg(test)]
extern crate approx;
#[cfg(test)]
extern crate quickcheck;
use clap::value_t;
use regex::Regex;
#[macro_use]
extern crate lazy_static;
#[cfg(feature = "real_blackbox")]
extern crate test;
#[macro_use]
extern crate serde_derive;
#[macro_use]
mod macros_private;
#[macro_use]
mod analysis;
mod benchmark;
#[macro_use]
mod benchmark_group;
pub mod async_executor;
mod bencher;
mod connection;
mod csv_report;
mod error;
mod estimate;
mod format;
mod fs;
mod html;
mod kde;
mod macros;
pub mod measurement;
mod plot;
pub mod profiler;
mod report;
mod routine;
mod stats;
use std::cell::RefCell;
use std::collections::HashSet;
use std::default::Default;
use std::env;
use std::fmt;
use std::iter::IntoIterator;
use std::marker::PhantomData;
use std::net::TcpStream;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::sync::{Mutex, MutexGuard};
use std::time::Duration;
use criterion_plot::{Version, VersionError};
use crate::benchmark::BenchmarkConfig;
use crate::benchmark::NamedRoutine;
use crate::connection::Connection;
use crate::connection::OutgoingMessage;
use crate::csv_report::FileCsvReport;
use crate::html::Html;
use crate::measurement::{Measurement, WallTime};
use crate::plot::{Gnuplot, Plotter, PlottersBackend};
use crate::profiler::{ExternalProfiler, Profiler};
use crate::report::{BencherReport, CliReport, Report, ReportContext, Reports};
use crate::routine::Function;
#[cfg(feature = "async")]
pub use crate::bencher::AsyncBencher;
pub use crate::bencher::Bencher;
#[allow(deprecated)]
pub use crate::benchmark::{Benchmark, BenchmarkDefinition, ParameterizedBenchmark};
pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
lazy_static! {
static ref DEBUG_ENABLED: bool = std::env::var_os("CRITERION_DEBUG").is_some();
static ref GNUPLOT_VERSION: Result<Version, VersionError> = criterion_plot::version();
static ref DEFAULT_PLOTTING_BACKEND: PlottingBackend = {
match &*GNUPLOT_VERSION {
Ok(_) => PlottingBackend::Gnuplot,
Err(e) => {
match e {
VersionError::Exec(_) => println!("Gnuplot not found, using plotters backend"),
e => println!(
"Gnuplot not found or not usable, using plotters backend\n{}",
e
),
};
PlottingBackend::Plotters
}
}
};
static ref CARGO_CRITERION_CONNECTION: Option<Mutex<Connection>> = {
match std::env::var("CARGO_CRITERION_PORT") {
Ok(port_str) => {
let port: u16 = port_str.parse().ok()?;
let stream = TcpStream::connect(("localhost", port)).ok()?;
Some(Mutex::new(Connection::new(stream).ok()?))
}
Err(_) => None,
}
};
static ref DEFAULT_OUTPUT_DIRECTORY: PathBuf = {
if let Some(value) = env::var_os("CRITERION_HOME") {
PathBuf::from(value)
} else if let Some(path) = cargo_target_directory() {
path.join("criterion")
} else {
PathBuf::from("target/criterion")
}
};
}
fn debug_enabled() -> bool {
*DEBUG_ENABLED
}
#[cfg(feature = "real_blackbox")]
pub fn black_box<T>(dummy: T) -> T {
test::black_box(dummy)
}
#[cfg(not(feature = "real_blackbox"))]
pub fn black_box<T>(dummy: T) -> T {
unsafe {
let ret = std::ptr::read_volatile(&dummy);
std::mem::forget(dummy);
ret
}
}
#[doc(hidden)]
pub struct Fun<I: fmt::Debug, M: Measurement + 'static = WallTime> {
f: NamedRoutine<I, M>,
_phantom: PhantomData<M>,
}
impl<I, M: Measurement> Fun<I, M>
where
I: fmt::Debug + 'static,
{
pub fn new<F>(name: &str, f: F) -> Fun<I, M>
where
F: FnMut(&mut Bencher<'_, M>, &I) + 'static,
{
let routine = NamedRoutine {
id: name.to_owned(),
f: Box::new(RefCell::new(Function::new(f))),
};
Fun {
f: routine,
_phantom: PhantomData,
}
}
}
#[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
pub enum BatchSize {
SmallInput,
LargeInput,
PerIteration,
NumBatches(u64),
NumIterations(u64),
#[doc(hidden)]
__NonExhaustive,
}
impl BatchSize {
fn iters_per_batch(self, iters: u64) -> u64 {
match self {
BatchSize::SmallInput => (iters + 10 - 1) / 10,
BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
BatchSize::PerIteration => 1,
BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
BatchSize::NumIterations(size) => size,
BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
}
}
}
#[derive(Debug, Clone, Copy)]
pub enum Baseline {
Compare,
Save,
}
#[derive(Debug, Clone, Copy)]
pub enum PlottingBackend {
Gnuplot,
Plotters,
}
impl PlottingBackend {
fn create_plotter(&self) -> Box<dyn Plotter> {
match self {
PlottingBackend::Gnuplot => Box::new(Gnuplot::default()),
PlottingBackend::Plotters => Box::new(PlottersBackend::default()),
}
}
}
#[derive(Debug, Clone)]
pub(crate) enum Mode {
Benchmark,
List,
Test,
Profile(Duration),
}
impl Mode {
pub fn is_benchmark(&self) -> bool {
match self {
Mode::Benchmark => true,
_ => false,
}
}
}
pub struct Criterion<M: Measurement = WallTime> {
config: BenchmarkConfig,
filter: Option<Regex>,
report: Reports,
output_directory: PathBuf,
baseline_directory: String,
baseline: Baseline,
load_baseline: Option<String>,
all_directories: HashSet<String>,
all_titles: HashSet<String>,
measurement: M,
profiler: Box<RefCell<dyn Profiler>>,
connection: Option<MutexGuard<'static, Connection>>,
mode: Mode,
}
fn cargo_target_directory() -> Option<PathBuf> {
#[derive(Deserialize)]
struct Metadata {
target_directory: PathBuf,
}
env::var_os("CARGO_TARGET_DIR")
.map(PathBuf::from)
.or_else(|| {
let output = Command::new(env::var_os("CARGO")?)
.args(&["metadata", "--format-version", "1"])
.output()
.ok()?;
let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?;
Some(metadata.target_directory)
})
}
impl Default for Criterion {
fn default() -> Criterion {
let reports = Reports {
cli_enabled: true,
cli: CliReport::new(false, false, false),
bencher_enabled: false,
bencher: BencherReport,
html_enabled: true,
html: Html::new(DEFAULT_PLOTTING_BACKEND.create_plotter()),
csv_enabled: true,
csv: FileCsvReport,
};
let mut criterion = Criterion {
config: BenchmarkConfig {
confidence_level: 0.95,
measurement_time: Duration::new(5, 0),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
warm_up_time: Duration::new(3, 0),
sampling_mode: SamplingMode::Auto,
},
filter: None,
report: reports,
baseline_directory: "base".to_owned(),
baseline: Baseline::Save,
load_baseline: None,
output_directory: DEFAULT_OUTPUT_DIRECTORY.clone(),
all_directories: HashSet::new(),
all_titles: HashSet::new(),
measurement: WallTime,
profiler: Box::new(RefCell::new(ExternalProfiler)),
connection: CARGO_CRITERION_CONNECTION
.as_ref()
.map(|mtx| mtx.lock().unwrap()),
mode: Mode::Benchmark,
};
if criterion.connection.is_some() {
criterion.report.cli_enabled = false;
criterion.report.bencher_enabled = false;
criterion.report.csv_enabled = false;
criterion.report.html_enabled = false;
}
criterion
}
}
impl<M: Measurement> Criterion<M> {
pub fn with_measurement<M2: Measurement>(self, m: M2) -> Criterion<M2> {
Criterion {
config: self.config,
filter: self.filter,
report: self.report,
baseline_directory: self.baseline_directory,
baseline: self.baseline,
load_baseline: self.load_baseline,
output_directory: self.output_directory,
all_directories: self.all_directories,
all_titles: self.all_titles,
measurement: m,
profiler: self.profiler,
connection: self.connection,
mode: self.mode,
}
}
pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
Criterion {
profiler: Box::new(RefCell::new(p)),
..self
}
}
pub fn plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M> {
if let PlottingBackend::Gnuplot = backend {
if GNUPLOT_VERSION.is_err() {
panic!("Gnuplot plotting backend was requested, but gnuplot is not available. To continue, either install Gnuplot or allow Criterion.rs to fall back to using plotters.");
}
}
self.report.html = Html::new(backend.create_plotter());
self
}
pub fn sample_size(mut self, n: usize) -> Criterion<M> {
assert!(n >= 10);
self.config.sample_size = n;
self
}
pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
assert!(dur.to_nanos() > 0);
self.config.warm_up_time = dur;
self
}
pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
assert!(dur.to_nanos() > 0);
self.config.measurement_time = dur;
self
}
pub fn nresamples(mut self, n: usize) -> Criterion<M> {
assert!(n > 0);
if n <= 1000 {
println!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.config.nresamples = n;
self
}
pub fn noise_threshold(mut self, threshold: f64) -> Criterion<M> {
assert!(threshold >= 0.0);
self.config.noise_threshold = threshold;
self
}
pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
assert!(cl > 0.0 && cl < 1.0);
if cl < 0.5 {
println!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
}
self.config.confidence_level = cl;
self
}
pub fn significance_level(mut self, sl: f64) -> Criterion<M> {
assert!(sl > 0.0 && sl < 1.0);
self.config.significance_level = sl;
self
}
pub fn with_plots(mut self) -> Criterion<M> {
if self.connection.is_none() {
self.report.html_enabled = true;
}
self
}
pub fn without_plots(mut self) -> Criterion<M> {
self.report.html_enabled = false;
self
}
#[deprecated(
since = "0.3.4",
note = "No longer useful; since the plotters backend is available Criterion.rs can always generate plots"
)]
pub fn can_plot(&self) -> bool {
true
}
pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
self.baseline_directory = baseline;
self.baseline = Baseline::Save;
self
}
pub fn retain_baseline(mut self, baseline: String) -> Criterion<M> {
self.baseline_directory = baseline;
self.baseline = Baseline::Compare;
self
}
pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
let filter_text = filter.into();
let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
panic!(
"Unable to parse '{}' as a regular expression: {}",
filter_text, err
)
});
self.filter = Some(filter);
self
}
pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
self.report.cli.enable_text_coloring = enabled;
self
}
#[doc(hidden)]
pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
self.output_directory = path.to_owned();
self
}
#[doc(hidden)]
pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
match profile_time {
Some(time) => self.mode = Mode::Profile(time),
None => self.mode = Mode::Benchmark,
}
self
}
#[doc(hidden)]
pub fn final_summary(&self) {
if !self.mode.is_benchmark() {
return;
}
let report_context = ReportContext {
output_directory: self.output_directory.clone(),
plot_config: PlotConfiguration::default(),
};
self.report.final_summary(&report_context);
}
#[cfg_attr(feature = "cargo-clippy", allow(clippy::cognitive_complexity))]
pub fn configure_from_args(mut self) -> Criterion<M> {
use clap::{App, Arg};
let matches = App::new("Criterion Benchmark")
.arg(Arg::with_name("FILTER")
.help("Skip benchmarks whose names do not contain FILTER.")
.index(1))
.arg(Arg::with_name("color")
.short("c")
.long("color")
.alias("colour")
.takes_value(true)
.possible_values(&["auto", "always", "never"])
.default_value("auto")
.help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
.arg(Arg::with_name("verbose")
.short("v")
.long("verbose")
.help("Print additional statistical information."))
.arg(Arg::with_name("noplot")
.short("n")
.long("noplot")
.help("Disable plot and HTML generation."))
.arg(Arg::with_name("save-baseline")
.short("s")
.long("save-baseline")
.default_value("base")
.help("Save results under a named baseline."))
.arg(Arg::with_name("baseline")
.short("b")
.long("baseline")
.takes_value(true)
.conflicts_with("save-baseline")
.help("Compare to a named baseline."))
.arg(Arg::with_name("list")
.long("list")
.help("List all benchmarks")
.conflicts_with_all(&["test", "profile-time"]))
.arg(Arg::with_name("profile-time")
.long("profile-time")
.takes_value(true)
.help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
.conflicts_with_all(&["test", "list"]))
.arg(Arg::with_name("load-baseline")
.long("load-baseline")
.takes_value(true)
.conflicts_with("profile-time")
.requires("baseline")
.help("Load a previous baseline instead of sampling new data."))
.arg(Arg::with_name("sample-size")
.long("sample-size")
.takes_value(true)
.help(&format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
.arg(Arg::with_name("warm-up-time")
.long("warm-up-time")
.takes_value(true)
.help(&format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
.arg(Arg::with_name("measurement-time")
.long("measurement-time")
.takes_value(true)
.help(&format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
.arg(Arg::with_name("nresamples")
.long("nresamples")
.takes_value(true)
.help(&format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
.arg(Arg::with_name("noise-threshold")
.long("noise-threshold")
.takes_value(true)
.help(&format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
.arg(Arg::with_name("confidence-level")
.long("confidence-level")
.takes_value(true)
.help(&format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
.arg(Arg::with_name("significance-level")
.long("significance-level")
.takes_value(true)
.help(&format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
.arg(Arg::with_name("test")
.hidden(true)
.long("test")
.help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
.conflicts_with_all(&["list", "profile-time"]))
.arg(Arg::with_name("bench")
.hidden(true)
.long("bench"))
.arg(Arg::with_name("plotting-backend")
.long("plotting-backend")
.takes_value(true)
.possible_values(&["gnuplot", "plotters"])
.help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
.arg(Arg::with_name("output-format")
.long("output-format")
.takes_value(true)
.possible_values(&["criterion", "bencher"])
.default_value("criterion")
.help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
.arg(Arg::with_name("nocapture")
.long("nocapture")
.hidden(true)
.help("Ignored, but added for compatibility with libtest."))
.arg(Arg::with_name("version")
.hidden(true)
.short("V")
.long("version"))
.after_help("
This executable is a Criterion.rs benchmark.
See https://github.com/bheisler/criterion.rs for more details.
To enable debug output, define the environment variable CRITERION_DEBUG.
Criterion.rs will output more debug information and will save the gnuplot
scripts alongside the generated plots.
To test that the benchmarks work, run `cargo test --benches`
NOTE: If you see an 'unrecognized option' error using any of the options above, see:
https://bheisler.github.io/criterion.rs/book/faq.html
")
.get_matches();
if self.connection.is_some() {
if let Some(color) = matches.value_of("color") {
if color != "auto" {
println!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
}
}
if matches.is_present("verbose") {
println!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
}
if matches.is_present("noplot") {
println!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
}
if let Some(backend) = matches.value_of("plotting-backend") {
println!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
}
if let Some(format) = matches.value_of("output-format") {
if format != "criterion" {
println!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
}
}
if matches.is_present("baseline")
|| matches
.value_of("save-baseline")
.map(|base| base != "base")
.unwrap_or(false)
|| matches.is_present("load-baseline")
{
println!("Error: baselines are not supported when running with cargo-criterion.");
std::process::exit(1);
}
}
let bench = matches.is_present("bench");
let test = matches.is_present("test");
let test_mode = match (bench, test) {
(true, true) => true, (true, false) => false, (false, _) => true, };
self.mode = if test_mode {
Mode::Test
} else if matches.is_present("list") {
Mode::List
} else if matches.is_present("profile-time") {
let num_seconds = value_t!(matches.value_of("profile-time"), u64).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
if num_seconds < 1 {
println!("Profile time must be at least one second.");
std::process::exit(1);
}
Mode::Profile(Duration::from_secs(num_seconds))
} else {
Mode::Benchmark
};
if !self.mode.is_benchmark() {
self.connection = None;
}
if let Some(filter) = matches.value_of("FILTER") {
self = self.with_filter(filter);
}
match matches.value_of("plotting-backend") {
Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
Some(val) => panic!("Unexpected plotting backend '{}'", val),
None => {}
}
if matches.is_present("noplot") {
self = self.without_plots();
} else {
self = self.with_plots();
}
if let Some(dir) = matches.value_of("save-baseline") {
self.baseline = Baseline::Save;
self.baseline_directory = dir.to_owned()
}
if let Some(dir) = matches.value_of("baseline") {
self.baseline = Baseline::Compare;
self.baseline_directory = dir.to_owned();
}
if self.connection.is_some() {
self.report.cli_enabled = false;
self.report.bencher_enabled = false;
self.report.csv_enabled = false;
self.report.html_enabled = false;
} else {
match matches.value_of("output-format") {
Some("bencher") => {
self.report.bencher_enabled = true;
self.report.cli_enabled = false;
}
_ => {
let verbose = matches.is_present("verbose");
let stdout_isatty = atty::is(atty::Stream::Stdout);
let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
let enable_text_coloring;
match matches.value_of("color") {
Some("always") => {
enable_text_coloring = true;
}
Some("never") => {
enable_text_coloring = false;
enable_text_overwrite = false;
}
_ => enable_text_coloring = stdout_isatty,
};
self.report.bencher_enabled = false;
self.report.cli_enabled = true;
self.report.cli =
CliReport::new(enable_text_overwrite, enable_text_coloring, verbose);
}
};
}
if let Some(dir) = matches.value_of("load-baseline") {
self.load_baseline = Some(dir.to_owned());
}
if matches.is_present("sample-size") {
let num_size = value_t!(matches.value_of("sample-size"), usize).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
assert!(num_size >= 10);
self.config.sample_size = num_size;
}
if matches.is_present("warm-up-time") {
let num_seconds = value_t!(matches.value_of("warm-up-time"), u64).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
let dur = std::time::Duration::new(num_seconds, 0);
assert!(dur.to_nanos() > 0);
self.config.warm_up_time = dur;
}
if matches.is_present("measurement-time") {
let num_seconds =
value_t!(matches.value_of("measurement-time"), u64).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
let dur = std::time::Duration::new(num_seconds, 0);
assert!(dur.to_nanos() > 0);
self.config.measurement_time = dur;
}
if matches.is_present("nresamples") {
let num_resamples =
value_t!(matches.value_of("nresamples"), usize).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
assert!(num_resamples > 0);
self.config.nresamples = num_resamples;
}
if matches.is_present("noise-threshold") {
let num_noise_threshold = value_t!(matches.value_of("noise-threshold"), f64)
.unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
assert!(num_noise_threshold > 0.0);
self.config.noise_threshold = num_noise_threshold;
}
if matches.is_present("confidence-level") {
let num_confidence_level = value_t!(matches.value_of("confidence-level"), f64)
.unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
self.config.confidence_level = num_confidence_level;
}
if matches.is_present("significance-level") {
let num_significance_level = value_t!(matches.value_of("significance-level"), f64)
.unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
self.config.significance_level = num_significance_level;
}
self
}
fn filter_matches(&self, id: &str) -> bool {
match self.filter {
Some(ref regex) => regex.is_match(id),
None => true,
}
}
pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
let group_name = group_name.into();
if group_name.is_empty() {
panic!("Group name must not be empty.");
}
if let Some(conn) = &self.connection {
conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name })
.unwrap();
}
BenchmarkGroup::new(self, group_name)
}
}
impl<M> Criterion<M>
where
M: Measurement + 'static,
{
pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion<M>
where
F: FnMut(&mut Bencher<'_, M>),
{
self.benchmark_group(id)
.bench_function(BenchmarkId::no_function(), f);
self
}
pub fn bench_with_input<F, I>(&mut self, id: BenchmarkId, input: &I, f: F) -> &mut Criterion<M>
where
F: FnMut(&mut Bencher<'_, M>, &I),
{
let group_name = id.function_name.expect(
"Cannot use BenchmarkId::from_parameter with Criterion::bench_with_input. \
Consider using a BenchmarkGroup or BenchmarkId::new instead.",
);
let parameter = id.parameter.unwrap();
self.benchmark_group(group_name).bench_with_input(
BenchmarkId::no_function_with_input(parameter),
input,
f,
);
self
}
#[doc(hidden)]
#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
#[allow(deprecated)]
pub fn bench_function_over_inputs<I, F>(
&mut self,
id: &str,
f: F,
inputs: I,
) -> &mut Criterion<M>
where
I: IntoIterator,
I::Item: fmt::Debug + 'static,
F: FnMut(&mut Bencher<'_, M>, &I::Item) + 'static,
{
self.bench(id, ParameterizedBenchmark::new(id, f, inputs))
}
#[doc(hidden)]
#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
#[allow(deprecated)]
pub fn bench_functions<I>(
&mut self,
id: &str,
funs: Vec<Fun<I, M>>,
input: I,
) -> &mut Criterion<M>
where
I: fmt::Debug + 'static,
{
let benchmark = ParameterizedBenchmark::with_functions(
funs.into_iter().map(|fun| fun.f).collect(),
vec![input],
);
self.bench(id, benchmark)
}
#[doc(hidden)]
#[deprecated(since = "0.3.4", note = "Please use BenchmarkGroups instead.")]
pub fn bench<B: BenchmarkDefinition<M>>(
&mut self,
group_id: &str,
benchmark: B,
) -> &mut Criterion<M> {
benchmark.run(group_id, self);
self
}
}
trait DurationExt {
fn to_nanos(&self) -> u64;
}
const NANOS_PER_SEC: u64 = 1_000_000_000;
impl DurationExt for Duration {
fn to_nanos(&self) -> u64 {
self.as_secs() * NANOS_PER_SEC + u64::from(self.subsec_nanos())
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum Throughput {
Bytes(u64),
Elements(u64),
}
#[derive(Debug, Clone, Copy)]
pub enum AxisScale {
Linear,
Logarithmic,
}
#[derive(Debug, Clone)]
pub struct PlotConfiguration {
summary_scale: AxisScale,
}
impl Default for PlotConfiguration {
fn default() -> PlotConfiguration {
PlotConfiguration {
summary_scale: AxisScale::Linear,
}
}
}
impl PlotConfiguration {
pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
self.summary_scale = new_scale;
self
}
}
#[derive(Debug, Clone, Copy)]
pub enum SamplingMode {
Auto,
Linear,
Flat,
}
impl SamplingMode {
pub(crate) fn choose_sampling_mode(
&self,
warmup_mean_execution_time: f64,
sample_count: u64,
target_time: f64,
) -> ActualSamplingMode {
match self {
SamplingMode::Linear => ActualSamplingMode::Linear,
SamplingMode::Flat => ActualSamplingMode::Flat,
SamplingMode::Auto => {
let total_runs = sample_count * (sample_count + 1) / 2;
let d =
(target_time / warmup_mean_execution_time / total_runs as f64).ceil() as u64;
let expected_ns = total_runs as f64 * d as f64 * warmup_mean_execution_time;
if expected_ns > (2.0 * target_time) {
ActualSamplingMode::Flat
} else {
ActualSamplingMode::Linear
}
}
}
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub(crate) enum ActualSamplingMode {
Linear,
Flat,
}
impl ActualSamplingMode {
pub(crate) fn iteration_counts(
&self,
warmup_mean_execution_time: f64,
sample_count: u64,
target_time: &Duration,
) -> Vec<u64> {
match self {
ActualSamplingMode::Linear => {
let n = sample_count;
let met = warmup_mean_execution_time;
let m_ns = target_time.to_nanos();
let total_runs = n * (n + 1) / 2;
let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
let expected_ns = total_runs as f64 * d as f64 * met;
if d == 1 {
let recommended_sample_size =
ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
println!(
", enable flat sampling, or reduce sample count to {}.",
recommended_sample_size
);
} else {
println!(" or enable flat sampling.");
}
}
(1..(n + 1) as u64).map(|a| a * d).collect::<Vec<u64>>()
}
ActualSamplingMode::Flat => {
let n = sample_count;
let met = warmup_mean_execution_time;
let m_ns = target_time.to_nanos() as f64;
let time_per_sample = m_ns / (n as f64);
let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
let expected_ns = met * (iterations_per_sample * n) as f64;
if iterations_per_sample == 1 {
let recommended_sample_size =
ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
print!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
println!(", or reduce sample count to {}.", recommended_sample_size);
} else {
println!(".");
}
}
vec![iterations_per_sample; n as usize]
}
}
}
fn is_linear(&self) -> bool {
match self {
ActualSamplingMode::Linear => true,
_ => false,
}
}
fn recommend_linear_sample_size(target_time: f64, met: f64) -> u64 {
let c = target_time / met;
let sample_size = (-1.0 + (4.0 * c).sqrt()) / 2.0;
let sample_size = sample_size as u64;
let sample_size = (sample_size / 10) * 10;
if sample_size < 10 {
10
} else {
sample_size
}
}
fn recommend_flat_sample_size(target_time: f64, met: f64) -> u64 {
let sample_size = (target_time / met) as u64;
let sample_size = (sample_size / 10) * 10;
if sample_size < 10 {
10
} else {
sample_size
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct SavedSample {
sampling_mode: ActualSamplingMode,
iters: Vec<f64>,
times: Vec<f64>,
}
#[doc(hidden)]
pub fn runner(benches: &[&dyn Fn()]) {
for bench in benches {
bench();
}
Criterion::default().configure_from_args().final_summary();
}
#[cfg(not(feature = "html_reports"))]
#[doc(hidden)]
pub fn __warn_about_html_reports_feature() {
if CARGO_CRITERION_CONNECTION.is_none() {
println!(
"WARNING: HTML report generation will become a non-default optional feature in Criterion.rs 0.4.0."
);
println!(
"This feature is being moved to cargo-criterion \
(https://github.com/bheisler/cargo-criterion) and will be optional in a future \
version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
enable the 'html_reports' feature in your Cargo.toml."
);
println!();
}
}
#[cfg(feature = "html_reports")]
#[doc(hidden)]
pub fn __warn_about_html_reports_feature() {
}
#[cfg(not(feature = "cargo_bench_support"))]
#[doc(hidden)]
pub fn __warn_about_cargo_bench_support_feature() {
if CARGO_CRITERION_CONNECTION.is_none() {
println!(
"WARNING: In Criterion.rs 0.4.0, running criterion benchmarks outside of cargo-criterion will become a default optional feature."
);
println!(
"The statistical analysis and reporting is being moved to cargo-criterion \
(https://github.com/bheisler/cargo-criterion) and will be optional in a future \
version of Criterion.rs. To silence this warning, either switch to cargo-criterion or \
enable the 'cargo_bench_support' feature in your Cargo.toml."
);
println!();
}
}
#[cfg(feature = "cargo_bench_support")]
#[doc(hidden)]
pub fn __warn_about_cargo_bench_support_feature() {
}