#![deny(missing_docs)]
#![cfg_attr(feature = "real_blackbox", feature(test))]
#![cfg_attr(not(feature = "html_reports"), allow(dead_code))]
#![cfg_attr(
feature = "cargo-clippy",
allow(
clippy::used_underscore_binding,
clippy::just_underscores_and_digits,
clippy::transmute_ptr_to_ptr
)
)]
#[cfg(test)]
#[macro_use]
extern crate approx;
#[cfg(test)]
#[macro_use]
extern crate quickcheck;
#[cfg(test)]
extern crate rand;
#[macro_use]
extern crate clap;
#[macro_use]
extern crate lazy_static;
extern crate atty;
extern crate cast;
extern crate csv;
extern crate itertools;
extern crate num_traits;
extern crate rand_core;
extern crate rand_os;
extern crate rand_xoshiro;
extern crate rayon;
extern crate serde;
extern crate serde_json;
extern crate walkdir;
#[cfg(feature = "html_reports")]
extern crate criterion_plot;
#[cfg(feature = "html_reports")]
extern crate tinytemplate;
#[cfg(feature = "real_blackbox")]
extern crate test;
#[macro_use]
extern crate serde_derive;
#[macro_use]
mod macros_private;
#[macro_use]
mod analysis;
mod benchmark;
mod csv_report;
mod error;
mod estimate;
mod format;
mod fs;
mod macros;
mod program;
mod report;
mod routine;
mod stats;
#[cfg(feature = "html_reports")]
mod kde;
#[cfg(feature = "html_reports")]
mod plot;
#[cfg(feature = "html_reports")]
mod html;
use std::cell::RefCell;
use std::collections::BTreeMap;
use std::default::Default;
use std::fmt;
use std::iter::IntoIterator;
use std::process::Command;
use std::time::{Duration, Instant};
use benchmark::BenchmarkConfig;
use benchmark::NamedRoutine;
use csv_report::FileCsvReport;
use estimate::{Distributions, Estimates, Statistic};
use plotting::Plotting;
use report::{CliReport, Report, ReportContext, Reports};
use routine::Function;
#[cfg(feature = "html_reports")]
use html::Html;
pub use benchmark::{Benchmark, BenchmarkDefinition, ParameterizedBenchmark};
lazy_static! {
static ref DEBUG_ENABLED: bool = { std::env::vars().any(|(key, _)| key == "CRITERION_DEBUG") };
}
fn debug_enabled() -> bool {
*DEBUG_ENABLED
}
#[cfg(not(feature = "html_reports"))]
#[cfg_attr(not(feature = "html_reports"), doc(hidden))]
pub fn deprecation_warning() {
#[deprecated(
since = "0.2.6",
note = "The html_reports cargo feature is deprecated. As of 0.3.0, HTML reports will no longer be optional."
)]
fn deprecation_warning_inner() {}
deprecation_warning_inner()
}
#[cfg(feature = "real_blackbox")]
pub fn black_box<T>(dummy: T) -> T {
test::black_box(dummy)
}
#[cfg(not(feature = "real_blackbox"))]
pub fn black_box<T>(dummy: T) -> T {
unsafe {
let ret = std::ptr::read_volatile(&dummy);
std::mem::forget(dummy);
ret
}
}
pub struct Fun<I: fmt::Debug> {
f: NamedRoutine<I>,
}
impl<I> Fun<I>
where
I: fmt::Debug + 'static,
{
pub fn new<F>(name: &str, f: F) -> Fun<I>
where
F: FnMut(&mut Bencher, &I) + 'static,
{
let routine = NamedRoutine {
id: name.to_owned(),
f: Box::new(RefCell::new(Function::new(f))),
};
Fun { f: routine }
}
}
#[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
pub enum BatchSize {
SmallInput,
LargeInput,
PerIteration,
NumBatches(u64),
NumIterations(u64),
#[doc(hidden)]
__NonExhaustive,
}
impl BatchSize {
fn iters_per_batch(self, iters: u64) -> u64 {
match self {
BatchSize::SmallInput => (iters + 10 - 1) / 10,
BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
BatchSize::PerIteration => 1,
BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
BatchSize::NumIterations(size) => size,
BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
}
}
}
#[derive(Clone, Copy)]
pub struct Bencher {
iterated: bool,
iters: u64,
elapsed: Duration,
}
impl Bencher {
#[inline(never)]
pub fn iter<O, R>(&mut self, mut routine: R)
where
R: FnMut() -> O,
{
self.iterated = true;
let start = Instant::now();
for _ in 0..self.iters {
black_box(routine());
}
self.elapsed = start.elapsed();
}
#[doc(hidden)]
pub fn iter_with_setup<I, O, S, R>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
self.iter_batched(setup, routine, BatchSize::PerIteration);
}
#[doc(hidden)]
pub fn iter_with_large_drop<O, R>(&mut self, mut routine: R)
where
R: FnMut() -> O,
{
self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput);
}
#[doc(hidden)]
pub fn iter_with_large_setup<I, O, S, R>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
self.iter_batched(setup, routine, BatchSize::NumBatches(1));
}
#[inline(never)]
pub fn iter_batched<I, O, S, R>(&mut self, mut setup: S, mut routine: R, size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
self.iterated = true;
let batch_size = size.iters_per_batch(self.iters);
assert!(batch_size != 0, "Batch size must not be zero.");
self.elapsed = Duration::from_secs(0);
if batch_size == 1 {
for _ in 0..self.iters {
let mut input = black_box(setup());
let start = Instant::now();
let output = routine(input);
self.elapsed += start.elapsed();
drop(black_box(output));
}
} else {
let mut iteration_counter = 0;
while iteration_counter < self.iters {
let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter);
let inputs = black_box((0..batch_size).map(|_| setup()).collect::<Vec<_>>());
let mut outputs = Vec::with_capacity(batch_size as usize);
let start = Instant::now();
outputs.extend(inputs.into_iter().map(&mut routine));
self.elapsed += start.elapsed();
black_box(outputs);
iteration_counter += batch_size;
}
}
}
#[inline(never)]
pub fn iter_batched_ref<I, O, S, R>(&mut self, mut setup: S, mut routine: R, size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(&mut I) -> O,
{
self.iterated = true;
let batch_size = size.iters_per_batch(self.iters);
assert!(batch_size != 0, "Batch size must not be zero.");
self.elapsed = Duration::from_secs(0);
if batch_size == 1 {
for _ in 0..self.iters {
let mut input = black_box(setup());
let start = Instant::now();
let output = routine(&mut input);
self.elapsed += start.elapsed();
drop(black_box(output));
drop(black_box(input));
}
} else {
let mut iteration_counter = 0;
while iteration_counter < self.iters {
let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter);
let mut inputs = black_box((0..batch_size).map(|_| setup()).collect::<Vec<_>>());
let mut outputs = Vec::with_capacity(batch_size as usize);
let start = Instant::now();
outputs.extend(inputs.iter_mut().map(&mut routine));
self.elapsed += start.elapsed();
black_box(outputs);
iteration_counter += batch_size;
}
}
}
fn assert_iterated(&mut self) {
if !self.iterated {
panic!("Benchmark function must call Bencher::iter or related method.");
}
self.iterated = false;
}
}
pub enum Baseline {
Compare,
Save,
}
pub struct Criterion {
config: BenchmarkConfig,
plotting: Plotting,
filter: Option<String>,
report: Box<Report>,
output_directory: String,
baseline_directory: String,
baseline: Baseline,
profile_time: Option<Duration>,
test_mode: bool,
list_mode: bool,
}
impl Default for Criterion {
fn default() -> Criterion {
#[allow(unused_mut, unused_assignments)]
let mut plotting = Plotting::Unset;
let mut reports: Vec<Box<Report>> = vec![];
reports.push(Box::new(CliReport::new(false, false, false)));
reports.push(Box::new(FileCsvReport));
let output_directory =
match std::env::vars().find(|&(ref key, _)| key == "CARGO_TARGET_DIR") {
Some((_, value)) => format!("{}/criterion", value),
None => "target/criterion".to_owned(),
};
Criterion {
config: BenchmarkConfig {
confidence_level: 0.95,
measurement_time: Duration::new(5, 0),
noise_threshold: 0.01,
nresamples: 100_000,
sample_size: 100,
significance_level: 0.05,
warm_up_time: Duration::new(3, 0),
},
plotting,
filter: None,
report: Box::new(Reports::new(reports)),
baseline_directory: "base".to_owned(),
baseline: Baseline::Save,
profile_time: None,
test_mode: false,
list_mode: false,
output_directory,
}
}
}
impl Criterion {
pub fn sample_size(mut self, n: usize) -> Criterion {
assert!(n >= 2);
if n < 10 {
println!("Warning: Sample sizes < 10 will be disallowed in Criterion.rs 0.3.0.");
}
self.config.sample_size = n;
self
}
pub fn warm_up_time(mut self, dur: Duration) -> Criterion {
assert!(dur.to_nanos() > 0);
self.config.warm_up_time = dur;
self
}
pub fn measurement_time(mut self, dur: Duration) -> Criterion {
assert!(dur.to_nanos() > 0);
self.config.measurement_time = dur;
self
}
pub fn nresamples(mut self, n: usize) -> Criterion {
assert!(n > 0);
self.config.nresamples = n;
self
}
pub fn noise_threshold(mut self, threshold: f64) -> Criterion {
assert!(threshold >= 0.0);
self.config.noise_threshold = threshold;
self
}
pub fn confidence_level(mut self, cl: f64) -> Criterion {
assert!(cl > 0.0 && cl < 1.0);
self.config.confidence_level = cl;
self
}
pub fn significance_level(mut self, sl: f64) -> Criterion {
assert!(sl > 0.0 && sl < 1.0);
self.config.significance_level = sl;
self
}
#[cfg(feature = "html_reports")]
pub fn with_plots(mut self) -> Criterion {
use criterion_plot::VersionError;
self.plotting = match criterion_plot::version() {
Ok(_) => {
let mut reports: Vec<Box<Report>> = vec![];
reports.push(Box::new(CliReport::new(false, false, false)));
reports.push(Box::new(FileCsvReport));
reports.push(Box::new(Html::new()));
self.report = Box::new(Reports::new(reports));
Plotting::Enabled
}
Err(e) => {
match e {
VersionError::Exec(_) => println!("Gnuplot not found, disabling plotting"),
e => println!("Gnuplot not found or not usable, disabling plotting\n{}", e),
}
Plotting::NotAvailable
}
};
self
}
#[cfg(not(feature = "html_reports"))]
pub fn with_plots(self) -> Criterion {
self
}
pub fn without_plots(mut self) -> Criterion {
self.plotting = Plotting::Disabled;
self
}
#[cfg(feature = "html_reports")]
pub fn can_plot(&self) -> bool {
match self.plotting {
Plotting::NotAvailable => false,
Plotting::Enabled => true,
_ => criterion_plot::version().is_ok(),
}
}
#[cfg(not(feature = "html_reports"))]
pub fn can_plot(&self) -> bool {
false
}
pub fn save_baseline(mut self, baseline: String) -> Criterion {
self.baseline_directory = baseline;
self.baseline = Baseline::Save;
self
}
pub fn retain_baseline(mut self, baseline: String) -> Criterion {
self.baseline_directory = baseline;
self.baseline = Baseline::Compare;
self
}
pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion {
self.filter = Some(filter.into());
self
}
#[doc(hidden)]
pub fn output_directory(mut self, path: &std::path::Path) -> Criterion {
self.output_directory = path.to_string_lossy().into_owned();
self
}
#[doc(hidden)]
pub fn final_summary(&self) {
if self.profile_time.is_some() || self.test_mode {
return;
}
let report_context = ReportContext {
output_directory: self.output_directory.clone(),
plotting: self.plotting,
plot_config: PlotConfiguration::default(),
test_mode: self.test_mode,
};
self.report.final_summary(&report_context);
}
pub fn configure_from_args(mut self) -> Criterion {
use clap::{App, Arg};
let matches = App::new("Criterion Benchmark")
.arg(Arg::with_name("FILTER")
.help("Skip benchmarks whose names do not contain FILTER.")
.index(1))
.arg(Arg::with_name("color")
.short("c")
.long("color")
.alias("colour")
.takes_value(true)
.possible_values(&["auto", "always", "never"])
.default_value("auto")
.help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
.arg(Arg::with_name("verbose")
.short("v")
.long("verbose")
.help("Print additional statistical information."))
.arg(Arg::with_name("noplot")
.short("n")
.long("noplot")
.help("Disable plot and HTML generation."))
.arg(Arg::with_name("save-baseline")
.short("s")
.long("save-baseline")
.default_value("base")
.help("Save results under a named baseline."))
.arg(Arg::with_name("baseline")
.short("b")
.long("baseline")
.takes_value(true)
.conflicts_with("save-baseline")
.help("Compare to a named baseline."))
.arg(Arg::with_name("list")
.long("list")
.help("List all benchmarks"))
.arg(Arg::with_name("measure-only")
.long("measure-only")
.hidden(true)
.help("Only perform measurements; do no analysis or storage of results. This is useful eg. when profiling the benchmarks, to reduce clutter in the profiling data."))
.arg(Arg::with_name("profile-time")
.long("profile-time")
.takes_value(true)
.help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler."))
.arg(Arg::with_name("test")
.long("test")
.help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results."))
.arg(Arg::with_name("bench")
.hidden(true)
.long("bench"))
.arg(Arg::with_name("version")
.hidden(true)
.short("V")
.long("version"))
.after_help("
This executable is a Criterion.rs benchmark.
See https://github.com/bheisler/criterion.rs for more details.
To enable debug output, define the environment variable CRITERION_DEBUG.
Criterion.rs will output more debug information and will save the gnuplot
scripts alongside the generated plots.
")
.get_matches();
if let Some(filter) = matches.value_of("FILTER") {
self = self.with_filter(filter);
}
let verbose = matches.is_present("verbose");
let stdout_isatty = atty::is(atty::Stream::Stdout);
let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
let enable_text_coloring;
match matches.value_of("color") {
Some("always") => {
enable_text_coloring = true;
}
Some("never") => {
enable_text_coloring = false;
enable_text_overwrite = false;
}
_ => enable_text_coloring = stdout_isatty,
}
if matches.is_present("noplot") || matches.is_present("test") {
self = self.without_plots();
} else {
self = self.with_plots();
}
if let Some(dir) = matches.value_of("save-baseline") {
self.baseline = Baseline::Save;
self.baseline_directory = dir.to_owned()
}
if let Some(dir) = matches.value_of("baseline") {
self.baseline = Baseline::Compare;
self.baseline_directory = dir.to_owned();
}
let mut reports: Vec<Box<Report>> = vec![];
reports.push(Box::new(CliReport::new(
enable_text_overwrite,
enable_text_coloring,
verbose,
)));
reports.push(Box::new(FileCsvReport));
if matches.is_present("measure-only") {
println!("Warning: The '--measure-only' argument is deprecated and will be removed in Criterion.rs 0.3.0. Use '--profile-time' instead.");
self.profile_time = Some(Duration::from_secs(5));
}
if matches.is_present("profile-time") {
let num_seconds = value_t!(matches.value_of("profile-time"), u64).unwrap_or_else(|e| {
println!("{}", e);
std::process::exit(1)
});
if num_seconds < 1 {
println!("Profile time must be at least one second.");
std::process::exit(1);
}
self.profile_time = Some(Duration::from_secs(num_seconds));
}
self.test_mode = matches.is_present("test");
if matches.is_present("list") {
self.test_mode = true;
self.list_mode = true;
}
#[cfg(feature = "html_reports")]
{
if self.profile_time.is_none() {
reports.push(Box::new(Html::new()));
}
}
self.report = Box::new(Reports::new(reports));
self
}
fn filter_matches(&self, id: &str) -> bool {
match self.filter {
Some(ref string) => id.contains(string),
None => true,
}
}
pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion
where
F: FnMut(&mut Bencher) + 'static,
{
self.bench(id, Benchmark::new(id, f))
}
pub fn bench_functions<I>(&mut self, id: &str, funs: Vec<Fun<I>>, input: I) -> &mut Criterion
where
I: fmt::Debug + 'static,
{
let benchmark = ParameterizedBenchmark::with_functions(
funs.into_iter().map(|fun| fun.f).collect(),
vec![input],
);
self.bench(id, benchmark)
}
pub fn bench_function_over_inputs<I, F>(&mut self, id: &str, f: F, inputs: I) -> &mut Criterion
where
I: IntoIterator,
I::Item: fmt::Debug + 'static,
F: FnMut(&mut Bencher, &I::Item) + 'static,
{
self.bench(id, ParameterizedBenchmark::new(id, f, inputs))
}
#[deprecated(
since = "0.2.6",
note = "External program benchmarks were rarely used and are awkward to maintain, so they are scheduled for deletion in 0.3.0"
)]
#[allow(deprecated)]
pub fn bench_program(&mut self, id: &str, program: Command) -> &mut Criterion {
self.bench(id, Benchmark::new_external(id, program))
}
#[deprecated(
since = "0.2.6",
note = "External program benchmarks were rarely used and are awkward to maintain, so they are scheduled for deletion in 0.3.0"
)]
#[allow(deprecated)]
pub fn bench_program_over_inputs<I, F>(
&mut self,
id: &str,
mut program: F,
inputs: I,
) -> &mut Criterion
where
F: FnMut() -> Command + 'static,
I: IntoIterator,
I::Item: fmt::Debug + 'static,
{
self.bench(
id,
ParameterizedBenchmark::new_external(
id,
move |i| {
let mut command = program();
command.arg(format!("{:?}", i));
command
},
inputs,
),
)
}
pub fn bench<B: BenchmarkDefinition>(
&mut self,
group_id: &str,
benchmark: B,
) -> &mut Criterion {
benchmark.run(group_id, self);
self
}
}
mod plotting {
#[derive(Debug, Clone, Copy)]
pub enum Plotting {
Unset,
Disabled,
Enabled,
NotAvailable,
}
impl Plotting {
pub fn is_enabled(self) -> bool {
match self {
Plotting::Enabled => true,
_ => false,
}
}
}
}
trait DurationExt {
fn to_nanos(&self) -> u64;
}
const NANOS_PER_SEC: u64 = 1_000_000_000;
impl DurationExt for Duration {
fn to_nanos(&self) -> u64 {
self.as_secs() * NANOS_PER_SEC + u64::from(self.subsec_nanos())
}
}
#[derive(Clone, Copy, PartialEq, Deserialize, Serialize, Debug)]
struct ConfidenceInterval {
confidence_level: f64,
lower_bound: f64,
upper_bound: f64,
}
#[derive(Clone, Copy, PartialEq, Deserialize, Serialize, Debug)]
struct Estimate {
confidence_interval: ConfidenceInterval,
point_estimate: f64,
standard_error: f64,
}
fn build_estimates(
distributions: &Distributions,
points: &BTreeMap<Statistic, f64>,
cl: f64,
) -> Estimates {
distributions
.iter()
.map(|(&statistic, distribution)| {
let point_estimate = points[&statistic];
let (lb, ub) = distribution.confidence_interval(cl);
(
statistic,
Estimate {
confidence_interval: ConfidenceInterval {
confidence_level: cl,
lower_bound: lb,
upper_bound: ub,
},
point_estimate,
standard_error: distribution.std_dev(None),
},
)
})
.collect()
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Throughput {
Bytes(u32),
Elements(u32),
}
#[derive(Debug, Clone, Copy)]
pub enum AxisScale {
Linear,
Logarithmic,
}
#[derive(Debug, Clone)]
pub struct PlotConfiguration {
summary_scale: AxisScale,
}
impl Default for PlotConfiguration {
fn default() -> PlotConfiguration {
PlotConfiguration {
summary_scale: AxisScale::Linear,
}
}
}
impl PlotConfiguration {
pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
self.summary_scale = new_scale;
self
}
}