#![warn(clippy::doc_markdown, missing_docs)]
#![warn(bare_trait_objects)]
#![allow(
clippy::just_underscores_and_digits, // Used in the stats code
clippy::transmute_ptr_to_ptr, // Used in the stats code
)]
#[cfg(all(feature = "rayon", target_arch = "wasm32"))]
compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features.");
use regex::Regex;
use serde::{Deserialize, Serialize};
#[macro_use]
mod macros_private;
#[macro_use]
mod analysis;
mod benchmark;
#[macro_use]
mod benchmark_group;
pub mod async_executor;
mod bencher;
mod connection;
#[cfg(feature = "csv_output")]
mod csv_report;
mod error;
mod estimate;
mod format;
mod fs;
mod html;
mod kde;
mod macros;
pub mod measurement;
mod plot;
pub mod profiler;
mod report;
mod routine;
mod stats;
use std::cell::RefCell;
use std::collections::HashSet;
use std::env;
use std::io::{stdout, IsTerminal};
use std::net::TcpStream;
use std::path::{Path, PathBuf};
use std::process::Command;
use std::sync::{Mutex, MutexGuard};
use std::time::Duration;
use criterion_plot::{Version, VersionError};
use std::sync::OnceLock;
use crate::benchmark::BenchmarkConfig;
use crate::connection::Connection;
use crate::connection::OutgoingMessage;
use crate::html::Html;
use crate::measurement::{Measurement, WallTime};
#[cfg(feature = "plotters")]
use crate::plot::PlottersBackend;
use crate::plot::{Gnuplot, Plotter};
use crate::profiler::{ExternalProfiler, Profiler};
use crate::report::{BencherReport, CliReport, CliVerbosity, Report, ReportContext, Reports};
#[cfg(feature = "async")]
pub use crate::bencher::AsyncBencher;
pub use crate::bencher::Bencher;
pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
fn gnuplot_version() -> &'static Result<Version, VersionError> {
static GNUPLOT_VERSION: OnceLock<Result<Version, VersionError>> = OnceLock::new();
GNUPLOT_VERSION.get_or_init(criterion_plot::version)
}
fn default_plotting_backend() -> &'static PlottingBackend {
static DEFAULT_PLOTTING_BACKEND: OnceLock<PlottingBackend> = OnceLock::new();
DEFAULT_PLOTTING_BACKEND.get_or_init(|| match gnuplot_version() {
Ok(_) => PlottingBackend::Gnuplot,
#[cfg(feature = "plotters")]
Err(e) => {
match e {
VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"),
e => eprintln!(
"Gnuplot not found or not usable, using plotters backend\n{}",
e
),
};
PlottingBackend::Plotters
}
#[cfg(not(feature = "plotters"))]
Err(_) => PlottingBackend::None,
})
}
fn cargo_criterion_connection() -> &'static Option<Mutex<Connection>> {
static CARGO_CRITERION_CONNECTION: OnceLock<Option<Mutex<Connection>>> = OnceLock::new();
CARGO_CRITERION_CONNECTION.get_or_init(|| match std::env::var("CARGO_CRITERION_PORT") {
Ok(port_str) => {
let port: u16 = port_str.parse().ok()?;
let stream = TcpStream::connect(("localhost", port)).ok()?;
Some(Mutex::new(Connection::new(stream).ok()?))
}
Err(_) => None,
})
}
fn default_output_directory() -> &'static PathBuf {
static DEFAULT_OUTPUT_DIRECTORY: OnceLock<PathBuf> = OnceLock::new();
DEFAULT_OUTPUT_DIRECTORY.get_or_init(|| {
if let Some(value) = env::var_os("CRITERION_HOME") {
PathBuf::from(value)
} else if let Some(path) = cargo_target_directory() {
path.join("criterion")
} else {
PathBuf::from("target/criterion")
}
})
}
fn debug_enabled() -> bool {
static DEBUG_ENABLED: OnceLock<bool> = OnceLock::new();
*DEBUG_ENABLED.get_or_init(|| std::env::var_os("CRITERION_DEBUG").is_some())
}
#[deprecated(note = "use `std::hint::black_box()` instead")]
pub fn black_box<T>(dummy: T) -> T {
std::hint::black_box(dummy)
}
#[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
pub enum BatchSize {
SmallInput,
LargeInput,
PerIteration,
NumBatches(u64),
NumIterations(u64),
#[doc(hidden)]
__NonExhaustive,
}
impl BatchSize {
fn iters_per_batch(self, iters: u64) -> u64 {
match self {
BatchSize::SmallInput => (iters + 10 - 1) / 10,
BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
BatchSize::PerIteration => 1,
BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
BatchSize::NumIterations(size) => size,
BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
}
}
}
#[derive(Debug, Clone, Copy)]
pub enum Baseline {
CompareLenient,
CompareStrict,
Save,
Discard,
}
#[derive(Debug, Clone, Copy)]
pub enum PlottingBackend {
Gnuplot,
Plotters,
None,
}
impl PlottingBackend {
fn create_plotter(&self) -> Option<Box<dyn Plotter>> {
match self {
PlottingBackend::Gnuplot => Some(Box::<Gnuplot>::default()),
#[cfg(feature = "plotters")]
PlottingBackend::Plotters => Some(Box::<PlottersBackend>::default()),
#[cfg(not(feature = "plotters"))]
PlottingBackend::Plotters => panic!("Criterion was built without plotters support."),
PlottingBackend::None => None,
}
}
}
#[derive(Debug, Clone)]
pub(crate) enum Mode {
Benchmark,
List(ListFormat),
Test,
Profile(Duration),
}
impl Mode {
pub fn is_benchmark(&self) -> bool {
matches!(self, Mode::Benchmark)
}
pub fn is_terse(&self) -> bool {
matches!(self, Mode::List(ListFormat::Terse))
}
}
#[derive(Debug, Default, Clone)]
pub(crate) enum ListFormat {
#[default]
Pretty,
Terse,
}
#[derive(Clone, Debug)]
pub enum BenchmarkFilter {
AcceptAll,
Regex(Regex),
Exact(String),
RejectAll,
}
pub struct Criterion<M: Measurement = WallTime> {
config: BenchmarkConfig,
filter: BenchmarkFilter,
report: Reports,
output_directory: PathBuf,
baseline_directory: String,
baseline: Baseline,
load_baseline: Option<String>,
all_directories: HashSet<String>,
all_titles: HashSet<String>,
measurement: M,
profiler: Box<RefCell<dyn Profiler>>,
connection: Option<MutexGuard<'static, Connection>>,
mode: Mode,
}
fn cargo_target_directory() -> Option<PathBuf> {
#[derive(Deserialize)]
struct Metadata {
target_directory: PathBuf,
}
env::var_os("CARGO_TARGET_DIR")
.map(PathBuf::from)
.or_else(|| {
let output = Command::new(env::var_os("CARGO")?)
.args(["metadata", "--format-version", "1"])
.output()
.ok()?;
let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?;
Some(metadata.target_directory)
})
}
impl Default for Criterion {
fn default() -> Criterion {
let reports = Reports {
cli_enabled: true,
cli: CliReport::new(false, false, CliVerbosity::Normal),
bencher_enabled: false,
bencher: BencherReport,
html: default_plotting_backend().create_plotter().map(Html::new),
csv_enabled: cfg!(feature = "csv_output"),
};
let mut criterion = Criterion {
config: BenchmarkConfig {
confidence_level: 0.95,
measurement_time: Duration::from_secs(5),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
warm_up_time: Duration::from_secs(3),
sampling_mode: SamplingMode::Auto,
quick_mode: false,
},
filter: BenchmarkFilter::AcceptAll,
report: reports,
baseline_directory: "base".to_owned(),
baseline: Baseline::Save,
load_baseline: None,
output_directory: default_output_directory().clone(),
all_directories: HashSet::new(),
all_titles: HashSet::new(),
measurement: WallTime,
profiler: Box::new(RefCell::new(ExternalProfiler)),
connection: cargo_criterion_connection()
.as_ref()
.map(|mtx| mtx.lock().unwrap()),
mode: Mode::Benchmark,
};
if criterion.connection.is_some() {
criterion.report.cli_enabled = false;
criterion.report.bencher_enabled = false;
criterion.report.csv_enabled = false;
criterion.report.html = None;
}
criterion
}
}
impl<M: Measurement> Criterion<M> {
pub fn with_measurement<M2: Measurement>(self, m: M2) -> Criterion<M2> {
Criterion {
config: self.config,
filter: self.filter,
report: self.report,
baseline_directory: self.baseline_directory,
baseline: self.baseline,
load_baseline: self.load_baseline,
output_directory: self.output_directory,
all_directories: self.all_directories,
all_titles: self.all_titles,
measurement: m,
profiler: self.profiler,
connection: self.connection,
mode: self.mode,
}
}
#[must_use]
pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
Criterion {
profiler: Box::new(RefCell::new(p)),
..self
}
}
#[must_use]
pub fn plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M> {
if let PlottingBackend::Gnuplot = backend {
assert!(
!gnuplot_version().is_err(),
"Gnuplot plotting backend was requested, but gnuplot is not available. \
To continue, either install Gnuplot or allow Criterion.rs to fall back \
to using plotters."
);
}
self.report.html = backend.create_plotter().map(Html::new);
self
}
#[must_use]
pub fn sample_size(mut self, n: usize) -> Criterion<M> {
assert!(n >= 10);
self.config.sample_size = n;
self
}
#[must_use]
pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
assert!(dur.as_nanos() > 0);
self.config.warm_up_time = dur;
self
}
#[must_use]
pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
assert!(dur.as_nanos() > 0);
self.config.measurement_time = dur;
self
}
#[must_use]
pub fn nresamples(mut self, n: usize) -> Criterion<M> {
assert!(n > 0);
if n <= 1000 {
eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
}
self.config.nresamples = n;
self
}
#[must_use]
pub fn noise_threshold(mut self, threshold: f64) -> Criterion<M> {
assert!(threshold >= 0.0);
self.config.noise_threshold = threshold;
self
}
#[must_use]
pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
assert!(cl > 0.0 && cl < 1.0);
if cl < 0.5 {
eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
}
self.config.confidence_level = cl;
self
}
#[must_use]
pub fn significance_level(mut self, sl: f64) -> Criterion<M> {
assert!(sl > 0.0 && sl < 1.0);
self.config.significance_level = sl;
self
}
#[must_use]
pub fn with_plots(mut self) -> Criterion<M> {
if self.connection.is_none() && self.report.html.is_none() {
let default_backend = default_plotting_backend().create_plotter();
if let Some(backend) = default_backend {
self.report.html = Some(Html::new(backend));
} else {
panic!("Cannot find a default plotting backend!");
}
}
self
}
#[must_use]
pub fn without_plots(mut self) -> Criterion<M> {
self.report.html = None;
self
}
#[must_use]
pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
self.baseline_directory = baseline;
self.baseline = Baseline::Save;
self
}
#[must_use]
pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion<M> {
self.baseline_directory = baseline;
self.baseline = if strict {
Baseline::CompareStrict
} else {
Baseline::CompareLenient
};
self
}
#[must_use]
pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
let filter_text = filter.into();
let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
panic!(
"Unable to parse '{}' as a regular expression: {}",
filter_text, err
)
});
self.filter = BenchmarkFilter::Regex(filter);
self
}
pub fn with_benchmark_filter(mut self, filter: BenchmarkFilter) -> Criterion<M> {
self.filter = filter;
self
}
#[must_use]
pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
self.report.cli.enable_text_coloring = enabled;
self
}
#[must_use]
#[doc(hidden)]
pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
path.clone_into(&mut self.output_directory);
self
}
#[must_use]
#[doc(hidden)]
pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
match profile_time {
Some(time) => self.mode = Mode::Profile(time),
None => self.mode = Mode::Benchmark,
}
self
}
#[doc(hidden)]
pub fn final_summary(&self) {
if !self.mode.is_benchmark() {
return;
}
let report_context = ReportContext {
output_directory: self.output_directory.clone(),
plot_config: PlotConfiguration::default(),
};
self.report.final_summary(&report_context);
}
#[must_use]
#[allow(clippy::cognitive_complexity)]
pub fn configure_from_args(mut self) -> Criterion<M> {
use clap::{value_parser, Arg, Command};
let matches = Command::new("Criterion Benchmark")
.arg(Arg::new("FILTER")
.help("Skip benchmarks whose names do not contain FILTER.")
.index(1))
.arg(Arg::new("color")
.short('c')
.long("color")
.alias("colour")
.value_parser(["auto", "always", "never"])
.default_value("auto")
.help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
.arg(Arg::new("verbose")
.short('v')
.long("verbose")
.num_args(0)
.help("Print additional statistical information."))
.arg(Arg::new("quiet")
.long("quiet")
.num_args(0)
.conflicts_with("verbose")
.help("Print only the benchmark results."))
.arg(Arg::new("noplot")
.short('n')
.long("noplot")
.num_args(0)
.help("Disable plot and HTML generation."))
.arg(Arg::new("save-baseline")
.short('s')
.long("save-baseline")
.default_value("base")
.help("Save results under a named baseline."))
.arg(Arg::new("discard-baseline")
.long("discard-baseline")
.num_args(0)
.conflicts_with_all(["save-baseline", "baseline", "baseline-lenient"])
.help("Discard benchmark results."))
.arg(Arg::new("baseline")
.short('b')
.long("baseline")
.conflicts_with_all(["save-baseline", "baseline-lenient"])
.help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails."))
.arg(Arg::new("baseline-lenient")
.long("baseline-lenient")
.conflicts_with_all(["save-baseline", "baseline"])
.help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline."))
.arg(Arg::new("list")
.long("list")
.num_args(0)
.help("List all benchmarks")
.conflicts_with_all(["test", "profile-time"]))
.arg(Arg::new("format")
.long("format")
.value_parser(["pretty", "terse"])
.default_value("pretty")
.help("Output formatting"))
.arg(Arg::new("ignored")
.long("ignored")
.num_args(0)
.help("List or run ignored benchmarks (currently means skip all benchmarks)"))
.arg(Arg::new("exact")
.long("exact")
.num_args(0)
.help("Run benchmarks that exactly match the provided filter"))
.arg(Arg::new("profile-time")
.long("profile-time")
.value_parser(value_parser!(f64))
.help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
.conflicts_with_all(["test", "list"]))
.arg(Arg::new("load-baseline")
.long("load-baseline")
.conflicts_with("profile-time")
.requires("baseline")
.help("Load a previous baseline instead of sampling new data."))
.arg(Arg::new("sample-size")
.long("sample-size")
.value_parser(value_parser!(usize))
.help(format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
.arg(Arg::new("warm-up-time")
.long("warm-up-time")
.value_parser(value_parser!(f64))
.help(format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
.arg(Arg::new("measurement-time")
.long("measurement-time")
.value_parser(value_parser!(f64))
.help(format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
.arg(Arg::new("nresamples")
.long("nresamples")
.value_parser(value_parser!(usize))
.help(format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
.arg(Arg::new("noise-threshold")
.long("noise-threshold")
.value_parser(value_parser!(f64))
.help(format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
.arg(Arg::new("confidence-level")
.long("confidence-level")
.value_parser(value_parser!(f64))
.help(format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
.arg(Arg::new("significance-level")
.long("significance-level")
.value_parser(value_parser!(f64))
.help(format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
.arg(Arg::new("quick")
.long("quick")
.num_args(0)
.conflicts_with("sample-size")
.help(format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode)))
.arg(Arg::new("test")
.hide(true)
.long("test")
.num_args(0)
.help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
.conflicts_with_all(["list", "profile-time"]))
.arg(Arg::new("bench")
.hide(true)
.long("bench")
.num_args(0))
.arg(Arg::new("plotting-backend")
.long("plotting-backend")
.value_parser(["gnuplot", "plotters"])
.help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
.arg(Arg::new("output-format")
.long("output-format")
.value_parser(["criterion", "bencher"])
.default_value("criterion")
.help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
.arg(Arg::new("nocapture")
.long("nocapture")
.num_args(0)
.hide(true)
.help("Ignored, but added for compatibility with libtest."))
.arg(Arg::new("show-output")
.long("show-output")
.num_args(0)
.hide(true)
.help("Ignored, but added for compatibility with libtest."))
.arg(Arg::new("include-ignored")
.long("include-ignored")
.num_args(0)
.hide(true)
.help("Ignored, but added for compatibility with libtest."))
.arg(Arg::new("version")
.hide(true)
.short('V')
.long("version")
.num_args(0))
.after_help("
This executable is a Criterion.rs benchmark.
See https://github.com/bheisler/criterion.rs for more details.
To enable debug output, define the environment variable CRITERION_DEBUG.
Criterion.rs will output more debug information and will save the gnuplot
scripts alongside the generated plots.
To test that the benchmarks work, run `cargo test --benches`
NOTE: If you see an 'unrecognized option' error using any of the options above, see:
https://bheisler.github.io/criterion.rs/book/faq.html
")
.get_matches();
if self.connection.is_some() {
if let Some(color) = matches.get_one::<String>("color") {
if color != "auto" {
eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
}
}
if matches.get_flag("verbose") {
eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
}
if matches.get_flag("noplot") {
eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
}
if let Some(backend) = matches.get_one::<String>("plotting-backend") {
eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
}
if let Some(format) = matches.get_one::<String>("output-format") {
if format != "criterion" {
eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
}
}
if matches.contains_id("baseline")
|| matches
.get_one::<String>("save-baseline")
.is_some_and(|base| base != "base")
|| matches.contains_id("load-baseline")
{
eprintln!("Error: baselines are not supported when running with cargo-criterion.");
std::process::exit(1);
}
}
let bench = matches.get_flag("bench");
let test = matches.get_flag("test");
let test_mode = match (bench, test) {
(true, true) => true, (true, false) => false, (false, _) => true, };
self.mode = if matches.get_flag("list") {
let list_format = match matches
.get_one::<String>("format")
.expect("a default value was provided for this")
.as_str()
{
"pretty" => ListFormat::Pretty,
"terse" => ListFormat::Terse,
other => unreachable!(
"unrecognized value for --format that isn't part of possible-values: {}",
other
),
};
Mode::List(list_format)
} else if test_mode {
Mode::Test
} else if let Some(&num_seconds) = matches.get_one("profile-time") {
if num_seconds < 1.0 {
eprintln!("Profile time must be at least one second.");
std::process::exit(1);
}
Mode::Profile(Duration::from_secs_f64(num_seconds))
} else {
Mode::Benchmark
};
if !self.mode.is_benchmark() {
self.connection = None;
}
let filter = if matches.get_flag("ignored") {
BenchmarkFilter::RejectAll
} else if let Some(filter) = matches.get_one::<String>("FILTER") {
if matches.get_flag("exact") {
BenchmarkFilter::Exact(filter.to_owned())
} else {
let regex = Regex::new(filter).unwrap_or_else(|err| {
panic!(
"Unable to parse '{}' as a regular expression: {}",
filter, err
)
});
BenchmarkFilter::Regex(regex)
}
} else {
BenchmarkFilter::AcceptAll
};
self = self.with_benchmark_filter(filter);
match matches.get_one("plotting-backend").map(String::as_str) {
Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
Some(val) => panic!("Unexpected plotting backend '{}'", val),
None => {}
}
if matches.get_flag("noplot") {
self = self.without_plots();
}
if let Some(dir) = matches.get_one::<String>("save-baseline") {
self.baseline = Baseline::Save;
dir.clone_into(&mut self.baseline_directory);
}
if matches.get_flag("discard-baseline") {
self.baseline = Baseline::Discard;
}
if let Some(dir) = matches.get_one::<String>("baseline") {
self.baseline = Baseline::CompareStrict;
dir.clone_into(&mut self.baseline_directory);
}
if let Some(dir) = matches.get_one::<String>("baseline-lenient") {
self.baseline = Baseline::CompareLenient;
dir.clone_into(&mut self.baseline_directory);
}
if self.connection.is_some() {
self.report.cli_enabled = false;
self.report.bencher_enabled = false;
self.report.csv_enabled = false;
self.report.html = None;
} else {
match matches.get_one("output-format").map(String::as_str) {
Some("bencher") => {
self.report.bencher_enabled = true;
self.report.cli_enabled = false;
}
_ => {
let verbose = matches.get_flag("verbose");
let verbosity = if verbose {
CliVerbosity::Verbose
} else if matches.get_flag("quiet") {
CliVerbosity::Quiet
} else {
CliVerbosity::Normal
};
let stdout_isatty = stdout().is_terminal();
let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
let enable_text_coloring;
match matches.get_one("color").map(String::as_str) {
Some("always") => {
enable_text_coloring = true;
}
Some("never") => {
enable_text_coloring = false;
enable_text_overwrite = false;
}
_ => enable_text_coloring = stdout_isatty,
};
self.report.bencher_enabled = false;
self.report.cli_enabled = true;
self.report.cli =
CliReport::new(enable_text_overwrite, enable_text_coloring, verbosity);
}
};
}
if let Some(dir) = matches.get_one::<String>("load-baseline") {
self.load_baseline = Some(dir.to_owned());
}
if let Some(&num_size) = matches.get_one("sample-size") {
assert!(num_size >= 10);
self.config.sample_size = num_size;
}
if let Some(&num_seconds) = matches.get_one("warm-up-time") {
let dur = std::time::Duration::from_secs_f64(num_seconds);
assert!(dur.as_nanos() > 0);
self.config.warm_up_time = dur;
}
if let Some(&num_seconds) = matches.get_one("measurement-time") {
let dur = std::time::Duration::from_secs_f64(num_seconds);
assert!(dur.as_nanos() > 0);
self.config.measurement_time = dur;
}
if let Some(&num_resamples) = matches.get_one("nresamples") {
assert!(num_resamples > 0);
self.config.nresamples = num_resamples;
}
if let Some(&num_noise_threshold) = matches.get_one("noise-threshold") {
assert!(num_noise_threshold > 0.0);
self.config.noise_threshold = num_noise_threshold;
}
if let Some(&num_confidence_level) = matches.get_one("confidence-level") {
assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
self.config.confidence_level = num_confidence_level;
}
if let Some(&num_significance_level) = matches.get_one("significance-level") {
assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
self.config.significance_level = num_significance_level;
}
if matches.get_flag("quick") {
self.config.quick_mode = true;
}
self
}
fn filter_matches(&self, id: &str) -> bool {
match &self.filter {
BenchmarkFilter::AcceptAll => true,
BenchmarkFilter::Regex(regex) => regex.is_match(id),
BenchmarkFilter::Exact(exact) => id == exact,
BenchmarkFilter::RejectAll => false,
}
}
fn should_save_baseline(&self) -> bool {
self.connection.is_none()
&& self.load_baseline.is_none()
&& !matches!(self.baseline, Baseline::Discard)
}
pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
let group_name = group_name.into();
assert!(!group_name.is_empty(), "Group name must not be empty.");
if let Some(conn) = &self.connection {
conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name })
.unwrap();
}
BenchmarkGroup::new(self, group_name)
}
}
impl<M> Criterion<M>
where
M: Measurement + 'static,
{
pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion<M>
where
F: FnMut(&mut Bencher<'_, M>),
{
self.benchmark_group(id)
.bench_function(BenchmarkId::no_function(), f);
self
}
pub fn bench_with_input<F, I>(&mut self, id: BenchmarkId, input: &I, f: F) -> &mut Criterion<M>
where
F: FnMut(&mut Bencher<'_, M>, &I),
{
let group_name = id.function_name.expect(
"Cannot use BenchmarkId::from_parameter with Criterion::bench_with_input. \
Consider using a BenchmarkGroup or BenchmarkId::new instead.",
);
let parameter = id.parameter.unwrap();
self.benchmark_group(group_name).bench_with_input(
BenchmarkId::no_function_with_input(parameter),
input,
f,
);
self
}
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum Throughput {
Bytes(u64),
BytesDecimal(u64),
Elements(u64),
}
#[derive(Debug, Default, Clone, Copy)]
pub enum AxisScale {
#[default]
Linear,
Logarithmic,
}
#[derive(Debug, Default, Clone)]
pub struct PlotConfiguration {
summary_scale: AxisScale,
}
impl PlotConfiguration {
#[must_use]
pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
self.summary_scale = new_scale;
self
}
}
#[derive(Debug, Default, Clone, Copy)]
pub enum SamplingMode {
#[default]
Auto,
Linear,
Flat,
}
impl SamplingMode {
pub(crate) fn choose_sampling_mode(
&self,
warmup_mean_execution_time: f64,
sample_count: u64,
target_time: f64,
) -> ActualSamplingMode {
match self {
SamplingMode::Linear => ActualSamplingMode::Linear,
SamplingMode::Flat => ActualSamplingMode::Flat,
SamplingMode::Auto => {
let total_runs = sample_count * (sample_count + 1) / 2;
let d =
(target_time / warmup_mean_execution_time / total_runs as f64).ceil() as u64;
let expected_ns = total_runs as f64 * d as f64 * warmup_mean_execution_time;
if expected_ns > (2.0 * target_time) {
ActualSamplingMode::Flat
} else {
ActualSamplingMode::Linear
}
}
}
}
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub(crate) enum ActualSamplingMode {
Linear,
Flat,
}
impl ActualSamplingMode {
pub(crate) fn iteration_counts(
&self,
warmup_mean_execution_time: f64,
sample_count: u64,
target_time: &Duration,
) -> Vec<u64> {
match self {
ActualSamplingMode::Linear => {
let n = sample_count;
let met = warmup_mean_execution_time;
let m_ns = target_time.as_nanos();
let total_runs = n * (n + 1) / 2;
let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
let expected_ns = total_runs as f64 * d as f64 * met;
if d == 1 {
let recommended_sample_size =
ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
eprintln!(
", enable flat sampling, or reduce sample count to {}.",
recommended_sample_size
);
} else {
eprintln!(" or enable flat sampling.");
}
}
(1..(n + 1)).map(|a| a * d).collect::<Vec<u64>>()
}
ActualSamplingMode::Flat => {
let n = sample_count;
let met = warmup_mean_execution_time;
let m_ns = target_time.as_nanos() as f64;
let time_per_sample = m_ns / (n as f64);
let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
let expected_ns = met * (iterations_per_sample * n) as f64;
if iterations_per_sample == 1 {
let recommended_sample_size =
ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
let actual_time = Duration::from_nanos(expected_ns as u64);
eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
n, target_time, actual_time);
if recommended_sample_size != n {
eprintln!(", or reduce sample count to {}.", recommended_sample_size);
} else {
eprintln!(".");
}
}
vec![iterations_per_sample; n as usize]
}
}
}
fn is_linear(&self) -> bool {
matches!(self, ActualSamplingMode::Linear)
}
fn recommend_linear_sample_size(target_time: f64, met: f64) -> u64 {
let c = target_time / met;
let sample_size = (-1.0 + (4.0 * c).sqrt()) / 2.0;
let sample_size = sample_size as u64;
let sample_size = (sample_size / 10) * 10;
if sample_size < 10 {
10
} else {
sample_size
}
}
fn recommend_flat_sample_size(target_time: f64, met: f64) -> u64 {
let sample_size = (target_time / met) as u64;
let sample_size = (sample_size / 10) * 10;
if sample_size < 10 {
10
} else {
sample_size
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub(crate) struct SavedSample {
sampling_mode: ActualSamplingMode,
iters: Vec<f64>,
times: Vec<f64>,
}
#[doc(hidden)]
pub fn runner(benches: &[&dyn Fn()]) {
for bench in benches {
bench();
}
Criterion::default().configure_from_args().final_summary();
}