1#![warn(clippy::doc_markdown, missing_docs)]
19#![warn(bare_trait_objects)]
20#![allow(
21 clippy::just_underscores_and_digits, clippy::transmute_ptr_to_ptr, )]
24
25#[cfg(all(feature = "rayon", target_arch = "wasm32"))]
26compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features.");
27
28use regex::Regex;
29use serde::{Deserialize, Serialize};
30
31#[macro_use]
34mod macros_private;
35#[macro_use]
36mod analysis;
37mod benchmark;
38#[macro_use]
39mod benchmark_group;
40pub mod async_executor;
41mod bencher;
42mod connection;
43#[cfg(feature = "csv_output")]
44mod csv_report;
45mod error;
46mod estimate;
47mod format;
48mod fs;
49mod html;
50mod kde;
51mod macros;
52pub mod measurement;
53mod plot;
54pub mod profiler;
55mod report;
56mod routine;
57mod stats;
58
59use std::cell::RefCell;
60use std::collections::HashSet;
61use std::env;
62use std::io::{stdout, IsTerminal};
63use std::net::TcpStream;
64use std::path::{Path, PathBuf};
65use std::process::Command;
66use std::sync::{Mutex, MutexGuard};
67use std::time::Duration;
68
69use criterion_plot::{Version, VersionError};
70use std::sync::OnceLock;
71
72use crate::benchmark::BenchmarkConfig;
73use crate::connection::Connection;
74use crate::connection::OutgoingMessage;
75use crate::html::Html;
76use crate::measurement::{Measurement, WallTime};
77#[cfg(feature = "plotters")]
78use crate::plot::PlottersBackend;
79use crate::plot::{Gnuplot, Plotter};
80use crate::profiler::{ExternalProfiler, Profiler};
81use crate::report::{BencherReport, CliReport, CliVerbosity, Report, ReportContext, Reports};
82
83#[cfg(feature = "async")]
84pub use crate::bencher::AsyncBencher;
85pub use crate::bencher::Bencher;
86pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
87
88fn gnuplot_version() -> &'static Result<Version, VersionError> {
89 static GNUPLOT_VERSION: OnceLock<Result<Version, VersionError>> = OnceLock::new();
90
91 GNUPLOT_VERSION.get_or_init(criterion_plot::version)
92}
93
94fn default_plotting_backend() -> &'static PlottingBackend {
95 static DEFAULT_PLOTTING_BACKEND: OnceLock<PlottingBackend> = OnceLock::new();
96
97 DEFAULT_PLOTTING_BACKEND.get_or_init(|| match gnuplot_version() {
98 Ok(_) => PlottingBackend::Gnuplot,
99 #[cfg(feature = "plotters")]
100 Err(e) => {
101 match e {
102 VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"),
103 e => eprintln!(
104 "Gnuplot not found or not usable, using plotters backend\n{}",
105 e
106 ),
107 };
108 PlottingBackend::Plotters
109 }
110 #[cfg(not(feature = "plotters"))]
111 Err(_) => PlottingBackend::None,
112 })
113}
114
115fn cargo_criterion_connection() -> &'static Option<Mutex<Connection>> {
116 static CARGO_CRITERION_CONNECTION: OnceLock<Option<Mutex<Connection>>> = OnceLock::new();
117
118 CARGO_CRITERION_CONNECTION.get_or_init(|| match std::env::var("CARGO_CRITERION_PORT") {
119 Ok(port_str) => {
120 let port: u16 = port_str.parse().ok()?;
121 let stream = TcpStream::connect(("localhost", port)).ok()?;
122 Some(Mutex::new(Connection::new(stream).ok()?))
123 }
124 Err(_) => None,
125 })
126}
127
128fn default_output_directory() -> &'static PathBuf {
129 static DEFAULT_OUTPUT_DIRECTORY: OnceLock<PathBuf> = OnceLock::new();
130
131 DEFAULT_OUTPUT_DIRECTORY.get_or_init(|| {
132 if let Some(value) = env::var_os("CRITERION_HOME") {
138 PathBuf::from(value)
139 } else if let Some(path) = cargo_target_directory() {
140 path.join("criterion")
141 } else {
142 PathBuf::from("target/criterion")
143 }
144 })
145}
146
147fn debug_enabled() -> bool {
148 static DEBUG_ENABLED: OnceLock<bool> = OnceLock::new();
149
150 *DEBUG_ENABLED.get_or_init(|| std::env::var_os("CRITERION_DEBUG").is_some())
151}
152
153#[deprecated(note = "use `std::hint::black_box()` instead")]
156pub fn black_box<T>(dummy: T) -> T {
157 std::hint::black_box(dummy)
158}
159
160#[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
182pub enum BatchSize {
183 SmallInput,
190
191 LargeInput,
198
199 PerIteration,
208
209 NumBatches(u64),
215
216 NumIterations(u64),
222
223 #[doc(hidden)]
224 __NonExhaustive,
225}
226impl BatchSize {
227 fn iters_per_batch(self, iters: u64) -> u64 {
234 match self {
235 BatchSize::SmallInput => (iters + 10 - 1) / 10,
236 BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
237 BatchSize::PerIteration => 1,
238 BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
239 BatchSize::NumIterations(size) => size,
240 BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
241 }
242 }
243}
244
245#[derive(Debug, Clone, Copy)]
247pub enum Baseline {
248 CompareLenient,
251 CompareStrict,
254 Save,
257 Discard,
259}
260
261#[derive(Debug, Clone, Copy)]
265pub enum PlottingBackend {
266 Gnuplot,
269 Plotters,
272 None,
274}
275impl PlottingBackend {
276 fn create_plotter(&self) -> Option<Box<dyn Plotter>> {
277 match self {
278 PlottingBackend::Gnuplot => Some(Box::<Gnuplot>::default()),
279 #[cfg(feature = "plotters")]
280 PlottingBackend::Plotters => Some(Box::<PlottersBackend>::default()),
281 #[cfg(not(feature = "plotters"))]
282 PlottingBackend::Plotters => panic!("Criterion was built without plotters support."),
283 PlottingBackend::None => None,
284 }
285 }
286}
287
288#[derive(Debug, Clone)]
289pub(crate) enum Mode {
291 Benchmark,
293 List(ListFormat),
295 Test,
297 Profile(Duration),
299}
300impl Mode {
301 pub fn is_benchmark(&self) -> bool {
302 matches!(self, Mode::Benchmark)
303 }
304
305 pub fn is_terse(&self) -> bool {
306 matches!(self, Mode::List(ListFormat::Terse))
307 }
308}
309
310#[derive(Debug, Default, Clone)]
311pub(crate) enum ListFormat {
313 #[default]
315 Pretty,
316 Terse,
319}
320
321#[derive(Clone, Debug)]
323pub enum BenchmarkFilter {
324 AcceptAll,
326 Regex(Regex),
328 Exact(String),
330 RejectAll,
332}
333
334pub struct Criterion<M: Measurement = WallTime> {
349 config: BenchmarkConfig,
350 filter: BenchmarkFilter,
351 report: Reports,
352 output_directory: PathBuf,
353 baseline_directory: String,
354 baseline: Baseline,
355 load_baseline: Option<String>,
356 all_directories: HashSet<String>,
357 all_titles: HashSet<String>,
358 measurement: M,
359 profiler: Box<RefCell<dyn Profiler>>,
360 connection: Option<MutexGuard<'static, Connection>>,
361 mode: Mode,
362}
363
364fn cargo_target_directory() -> Option<PathBuf> {
367 #[derive(Deserialize)]
368 struct Metadata {
369 target_directory: PathBuf,
370 }
371
372 env::var_os("CARGO_TARGET_DIR")
373 .map(PathBuf::from)
374 .or_else(|| {
375 let output = Command::new(env::var_os("CARGO")?)
376 .args(["metadata", "--format-version", "1"])
377 .output()
378 .ok()?;
379 let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?;
380 Some(metadata.target_directory)
381 })
382}
383
384impl Default for Criterion {
385 fn default() -> Criterion {
397 let reports = Reports {
398 cli_enabled: true,
399 cli: CliReport::new(false, false, CliVerbosity::Normal),
400 bencher_enabled: false,
401 bencher: BencherReport,
402 html: default_plotting_backend().create_plotter().map(Html::new),
403 csv_enabled: cfg!(feature = "csv_output"),
404 };
405
406 let mut criterion = Criterion {
407 config: BenchmarkConfig {
408 confidence_level: 0.95,
409 measurement_time: Duration::from_secs(5),
410 noise_threshold: 0.01,
411 nresamples: 100_000,
412 sample_size: 100,
413 significance_level: 0.05,
414 warm_up_time: Duration::from_secs(3),
415 sampling_mode: SamplingMode::Auto,
416 quick_mode: false,
417 },
418 filter: BenchmarkFilter::AcceptAll,
419 report: reports,
420 baseline_directory: "base".to_owned(),
421 baseline: Baseline::Save,
422 load_baseline: None,
423 output_directory: default_output_directory().clone(),
424 all_directories: HashSet::new(),
425 all_titles: HashSet::new(),
426 measurement: WallTime,
427 profiler: Box::new(RefCell::new(ExternalProfiler)),
428 connection: cargo_criterion_connection()
429 .as_ref()
430 .map(|mtx| mtx.lock().unwrap()),
431 mode: Mode::Benchmark,
432 };
433
434 if criterion.connection.is_some() {
435 criterion.report.cli_enabled = false;
437 criterion.report.bencher_enabled = false;
438 criterion.report.csv_enabled = false;
439 criterion.report.html = None;
440 }
441 criterion
442 }
443}
444
445impl<M: Measurement> Criterion<M> {
446 pub fn with_measurement<M2: Measurement>(self, m: M2) -> Criterion<M2> {
449 Criterion {
451 config: self.config,
452 filter: self.filter,
453 report: self.report,
454 baseline_directory: self.baseline_directory,
455 baseline: self.baseline,
456 load_baseline: self.load_baseline,
457 output_directory: self.output_directory,
458 all_directories: self.all_directories,
459 all_titles: self.all_titles,
460 measurement: m,
461 profiler: self.profiler,
462 connection: self.connection,
463 mode: self.mode,
464 }
465 }
466
467 #[must_use]
468 pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
471 Criterion {
472 profiler: Box::new(RefCell::new(p)),
473 ..self
474 }
475 }
476
477 #[must_use]
478 pub fn plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M> {
485 if let PlottingBackend::Gnuplot = backend {
486 assert!(
487 !gnuplot_version().is_err(),
488 "Gnuplot plotting backend was requested, but gnuplot is not available. \
489 To continue, either install Gnuplot or allow Criterion.rs to fall back \
490 to using plotters."
491 );
492 }
493
494 self.report.html = backend.create_plotter().map(Html::new);
495 self
496 }
497
498 #[must_use]
499 pub fn sample_size(mut self, n: usize) -> Criterion<M> {
510 assert!(n >= 10);
511
512 self.config.sample_size = n;
513 self
514 }
515
516 #[must_use]
517 pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
523 assert!(dur.as_nanos() > 0);
524
525 self.config.warm_up_time = dur;
526 self
527 }
528
529 #[must_use]
530 pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
541 assert!(dur.as_nanos() > 0);
542
543 self.config.measurement_time = dur;
544 self
545 }
546
547 #[must_use]
548 pub fn nresamples(mut self, n: usize) -> Criterion<M> {
560 assert!(n > 0);
561 if n <= 1000 {
562 eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
563 }
564
565 self.config.nresamples = n;
566 self
567 }
568
569 #[must_use]
570 pub fn noise_threshold(mut self, threshold: f64) -> Criterion<M> {
583 assert!(threshold >= 0.0);
584
585 self.config.noise_threshold = threshold;
586 self
587 }
588
589 #[must_use]
590 pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
599 assert!(cl > 0.0 && cl < 1.0);
600 if cl < 0.5 {
601 eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
602 }
603
604 self.config.confidence_level = cl;
605 self
606 }
607
608 #[must_use]
609 pub fn significance_level(mut self, sl: f64) -> Criterion<M> {
630 assert!(sl > 0.0 && sl < 1.0);
631
632 self.config.significance_level = sl;
633 self
634 }
635
636 #[must_use]
637 pub fn with_plots(mut self) -> Criterion<M> {
639 if self.connection.is_none() && self.report.html.is_none() {
641 let default_backend = default_plotting_backend().create_plotter();
642 if let Some(backend) = default_backend {
643 self.report.html = Some(Html::new(backend));
644 } else {
645 panic!("Cannot find a default plotting backend!");
646 }
647 }
648 self
649 }
650
651 #[must_use]
652 pub fn without_plots(mut self) -> Criterion<M> {
654 self.report.html = None;
655 self
656 }
657
658 #[must_use]
659 pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
661 self.baseline_directory = baseline;
662 self.baseline = Baseline::Save;
663 self
664 }
665
666 #[must_use]
667 pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion<M> {
669 self.baseline_directory = baseline;
670 self.baseline = if strict {
671 Baseline::CompareStrict
672 } else {
673 Baseline::CompareLenient
674 };
675 self
676 }
677
678 #[must_use]
679 pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
684 let filter_text = filter.into();
685 let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
686 panic!(
687 "Unable to parse '{}' as a regular expression: {}",
688 filter_text, err
689 )
690 });
691 self.filter = BenchmarkFilter::Regex(filter);
692
693 self
694 }
695
696 pub fn with_benchmark_filter(mut self, filter: BenchmarkFilter) -> Criterion<M> {
700 self.filter = filter;
701
702 self
703 }
704
705 #[must_use]
706 pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
709 self.report.cli.enable_text_coloring = enabled;
710 self
711 }
712
713 #[must_use]
715 #[doc(hidden)]
716 pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
717 path.clone_into(&mut self.output_directory);
718
719 self
720 }
721
722 #[must_use]
724 #[doc(hidden)]
725 pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
726 match profile_time {
727 Some(time) => self.mode = Mode::Profile(time),
728 None => self.mode = Mode::Benchmark,
729 }
730
731 self
732 }
733
734 #[doc(hidden)]
736 pub fn final_summary(&self) {
737 if !self.mode.is_benchmark() {
738 return;
739 }
740
741 let report_context = ReportContext {
742 output_directory: self.output_directory.clone(),
743 plot_config: PlotConfiguration::default(),
744 };
745
746 self.report.final_summary(&report_context);
747 }
748
749 #[must_use]
752 #[allow(clippy::cognitive_complexity)]
753 pub fn configure_from_args(mut self) -> Criterion<M> {
754 use clap::{value_parser, Arg, Command};
755 let matches = Command::new("Criterion Benchmark")
756 .arg(Arg::new("FILTER")
757 .help("Skip benchmarks whose names do not contain FILTER.")
758 .index(1))
759 .arg(Arg::new("color")
760 .short('c')
761 .long("color")
762 .alias("colour")
763 .value_parser(["auto", "always", "never"])
764 .default_value("auto")
765 .help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
766 .arg(Arg::new("verbose")
767 .short('v')
768 .long("verbose")
769 .num_args(0)
770 .help("Print additional statistical information."))
771 .arg(Arg::new("quiet")
772 .long("quiet")
773 .num_args(0)
774 .conflicts_with("verbose")
775 .help("Print only the benchmark results."))
776 .arg(Arg::new("noplot")
777 .short('n')
778 .long("noplot")
779 .num_args(0)
780 .help("Disable plot and HTML generation."))
781 .arg(Arg::new("save-baseline")
782 .short('s')
783 .long("save-baseline")
784 .default_value("base")
785 .help("Save results under a named baseline."))
786 .arg(Arg::new("discard-baseline")
787 .long("discard-baseline")
788 .num_args(0)
789 .conflicts_with_all(["save-baseline", "baseline", "baseline-lenient"])
790 .help("Discard benchmark results."))
791 .arg(Arg::new("baseline")
792 .short('b')
793 .long("baseline")
794 .conflicts_with_all(["save-baseline", "baseline-lenient"])
795 .help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails."))
796 .arg(Arg::new("baseline-lenient")
797 .long("baseline-lenient")
798 .conflicts_with_all(["save-baseline", "baseline"])
799 .help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline."))
800 .arg(Arg::new("list")
801 .long("list")
802 .num_args(0)
803 .help("List all benchmarks")
804 .conflicts_with_all(["test", "profile-time"]))
805 .arg(Arg::new("format")
806 .long("format")
807 .value_parser(["pretty", "terse"])
808 .default_value("pretty")
809 .help("Output formatting"))
812 .arg(Arg::new("ignored")
813 .long("ignored")
814 .num_args(0)
815 .help("List or run ignored benchmarks (currently means skip all benchmarks)"))
816 .arg(Arg::new("exact")
817 .long("exact")
818 .num_args(0)
819 .help("Run benchmarks that exactly match the provided filter"))
820 .arg(Arg::new("profile-time")
821 .long("profile-time")
822 .value_parser(value_parser!(f64))
823 .help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
824 .conflicts_with_all(["test", "list"]))
825 .arg(Arg::new("load-baseline")
826 .long("load-baseline")
827 .conflicts_with("profile-time")
828 .requires("baseline")
829 .help("Load a previous baseline instead of sampling new data."))
830 .arg(Arg::new("sample-size")
831 .long("sample-size")
832 .value_parser(value_parser!(usize))
833 .help(format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
834 .arg(Arg::new("warm-up-time")
835 .long("warm-up-time")
836 .value_parser(value_parser!(f64))
837 .help(format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
838 .arg(Arg::new("measurement-time")
839 .long("measurement-time")
840 .value_parser(value_parser!(f64))
841 .help(format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
842 .arg(Arg::new("nresamples")
843 .long("nresamples")
844 .value_parser(value_parser!(usize))
845 .help(format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
846 .arg(Arg::new("noise-threshold")
847 .long("noise-threshold")
848 .value_parser(value_parser!(f64))
849 .help(format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
850 .arg(Arg::new("confidence-level")
851 .long("confidence-level")
852 .value_parser(value_parser!(f64))
853 .help(format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
854 .arg(Arg::new("significance-level")
855 .long("significance-level")
856 .value_parser(value_parser!(f64))
857 .help(format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
858 .arg(Arg::new("quick")
859 .long("quick")
860 .num_args(0)
861 .conflicts_with("sample-size")
862 .help(format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode)))
863 .arg(Arg::new("test")
864 .hide(true)
865 .long("test")
866 .num_args(0)
867 .help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
868 .conflicts_with_all(["list", "profile-time"]))
869 .arg(Arg::new("bench")
870 .hide(true)
871 .long("bench")
872 .num_args(0))
873 .arg(Arg::new("plotting-backend")
874 .long("plotting-backend")
875 .value_parser(["gnuplot", "plotters"])
876 .help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
877 .arg(Arg::new("output-format")
878 .long("output-format")
879 .value_parser(["criterion", "bencher"])
880 .default_value("criterion")
881 .help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
882 .arg(Arg::new("nocapture")
883 .long("nocapture")
884 .num_args(0)
885 .hide(true)
886 .help("Ignored, but added for compatibility with libtest."))
887 .arg(Arg::new("show-output")
888 .long("show-output")
889 .num_args(0)
890 .hide(true)
891 .help("Ignored, but added for compatibility with libtest."))
892 .arg(Arg::new("include-ignored")
893 .long("include-ignored")
894 .num_args(0)
895 .hide(true)
896 .help("Ignored, but added for compatibility with libtest."))
897 .arg(Arg::new("version")
898 .hide(true)
899 .short('V')
900 .long("version")
901 .num_args(0))
902 .after_help("
903This executable is a Criterion.rs benchmark.
904See https://github.com/bheisler/criterion.rs for more details.
905
906To enable debug output, define the environment variable CRITERION_DEBUG.
907Criterion.rs will output more debug information and will save the gnuplot
908scripts alongside the generated plots.
909
910To test that the benchmarks work, run `cargo test --benches`
911
912NOTE: If you see an 'unrecognized option' error using any of the options above, see:
913https://bheisler.github.io/criterion.rs/book/faq.html
914")
915 .get_matches();
916
917 if self.connection.is_some() {
918 if let Some(color) = matches.get_one::<String>("color") {
919 if color != "auto" {
920 eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
921 }
922 }
923 if matches.get_flag("verbose") {
924 eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
925 }
926 if matches.get_flag("noplot") {
927 eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
928 }
929 if let Some(backend) = matches.get_one::<String>("plotting-backend") {
930 eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
931 }
932 if let Some(format) = matches.get_one::<String>("output-format") {
933 if format != "criterion" {
934 eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
935 }
936 }
937
938 if matches.contains_id("baseline")
939 || matches
940 .get_one::<String>("save-baseline")
941 .is_some_and(|base| base != "base")
942 || matches.contains_id("load-baseline")
943 {
944 eprintln!("Error: baselines are not supported when running with cargo-criterion.");
945 std::process::exit(1);
946 }
947 }
948
949 let bench = matches.get_flag("bench");
950 let test = matches.get_flag("test");
951 let test_mode = match (bench, test) {
952 (true, true) => true, (true, false) => false, (false, _) => true, };
956
957 self.mode = if matches.get_flag("list") {
958 let list_format = match matches
959 .get_one::<String>("format")
960 .expect("a default value was provided for this")
961 .as_str()
962 {
963 "pretty" => ListFormat::Pretty,
964 "terse" => ListFormat::Terse,
965 other => unreachable!(
966 "unrecognized value for --format that isn't part of possible-values: {}",
967 other
968 ),
969 };
970 Mode::List(list_format)
971 } else if test_mode {
972 Mode::Test
973 } else if let Some(&num_seconds) = matches.get_one("profile-time") {
974 if num_seconds < 1.0 {
975 eprintln!("Profile time must be at least one second.");
976 std::process::exit(1);
977 }
978
979 Mode::Profile(Duration::from_secs_f64(num_seconds))
980 } else {
981 Mode::Benchmark
982 };
983
984 if !self.mode.is_benchmark() {
986 self.connection = None;
987 }
988
989 let filter = if matches.get_flag("ignored") {
990 BenchmarkFilter::RejectAll
992 } else if let Some(filter) = matches.get_one::<String>("FILTER") {
993 if matches.get_flag("exact") {
994 BenchmarkFilter::Exact(filter.to_owned())
995 } else {
996 let regex = Regex::new(filter).unwrap_or_else(|err| {
997 panic!(
998 "Unable to parse '{}' as a regular expression: {}",
999 filter, err
1000 )
1001 });
1002 BenchmarkFilter::Regex(regex)
1003 }
1004 } else {
1005 BenchmarkFilter::AcceptAll
1006 };
1007 self = self.with_benchmark_filter(filter);
1008
1009 match matches.get_one("plotting-backend").map(String::as_str) {
1010 Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
1012 Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
1013 Some(val) => panic!("Unexpected plotting backend '{}'", val),
1014 None => {}
1015 }
1016
1017 if matches.get_flag("noplot") {
1018 self = self.without_plots();
1019 }
1020
1021 if let Some(dir) = matches.get_one::<String>("save-baseline") {
1022 self.baseline = Baseline::Save;
1023 dir.clone_into(&mut self.baseline_directory);
1024 }
1025 if matches.get_flag("discard-baseline") {
1026 self.baseline = Baseline::Discard;
1027 }
1028 if let Some(dir) = matches.get_one::<String>("baseline") {
1029 self.baseline = Baseline::CompareStrict;
1030 dir.clone_into(&mut self.baseline_directory);
1031 }
1032 if let Some(dir) = matches.get_one::<String>("baseline-lenient") {
1033 self.baseline = Baseline::CompareLenient;
1034 dir.clone_into(&mut self.baseline_directory);
1035 }
1036
1037 if self.connection.is_some() {
1038 self.report.cli_enabled = false;
1040 self.report.bencher_enabled = false;
1041 self.report.csv_enabled = false;
1042 self.report.html = None;
1043 } else {
1044 match matches.get_one("output-format").map(String::as_str) {
1045 Some("bencher") => {
1046 self.report.bencher_enabled = true;
1047 self.report.cli_enabled = false;
1048 }
1049 _ => {
1050 let verbose = matches.get_flag("verbose");
1051 let verbosity = if verbose {
1052 CliVerbosity::Verbose
1053 } else if matches.get_flag("quiet") {
1054 CliVerbosity::Quiet
1055 } else {
1056 CliVerbosity::Normal
1057 };
1058 let stdout_isatty = stdout().is_terminal();
1059 let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
1060 let enable_text_coloring;
1061 match matches.get_one("color").map(String::as_str) {
1062 Some("always") => {
1063 enable_text_coloring = true;
1064 }
1065 Some("never") => {
1066 enable_text_coloring = false;
1067 enable_text_overwrite = false;
1068 }
1069 _ => enable_text_coloring = stdout_isatty,
1070 };
1071 self.report.bencher_enabled = false;
1072 self.report.cli_enabled = true;
1073 self.report.cli =
1074 CliReport::new(enable_text_overwrite, enable_text_coloring, verbosity);
1075 }
1076 };
1077 }
1078
1079 if let Some(dir) = matches.get_one::<String>("load-baseline") {
1080 self.load_baseline = Some(dir.to_owned());
1081 }
1082
1083 if let Some(&num_size) = matches.get_one("sample-size") {
1084 assert!(num_size >= 10);
1085 self.config.sample_size = num_size;
1086 }
1087 if let Some(&num_seconds) = matches.get_one("warm-up-time") {
1088 let dur = std::time::Duration::from_secs_f64(num_seconds);
1089 assert!(dur.as_nanos() > 0);
1090
1091 self.config.warm_up_time = dur;
1092 }
1093 if let Some(&num_seconds) = matches.get_one("measurement-time") {
1094 let dur = std::time::Duration::from_secs_f64(num_seconds);
1095 assert!(dur.as_nanos() > 0);
1096
1097 self.config.measurement_time = dur;
1098 }
1099 if let Some(&num_resamples) = matches.get_one("nresamples") {
1100 assert!(num_resamples > 0);
1101
1102 self.config.nresamples = num_resamples;
1103 }
1104 if let Some(&num_noise_threshold) = matches.get_one("noise-threshold") {
1105 assert!(num_noise_threshold > 0.0);
1106
1107 self.config.noise_threshold = num_noise_threshold;
1108 }
1109 if let Some(&num_confidence_level) = matches.get_one("confidence-level") {
1110 assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
1111
1112 self.config.confidence_level = num_confidence_level;
1113 }
1114 if let Some(&num_significance_level) = matches.get_one("significance-level") {
1115 assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
1116
1117 self.config.significance_level = num_significance_level;
1118 }
1119
1120 if matches.get_flag("quick") {
1121 self.config.quick_mode = true;
1122 }
1123
1124 self
1125 }
1126
1127 fn filter_matches(&self, id: &str) -> bool {
1128 match &self.filter {
1129 BenchmarkFilter::AcceptAll => true,
1130 BenchmarkFilter::Regex(regex) => regex.is_match(id),
1131 BenchmarkFilter::Exact(exact) => id == exact,
1132 BenchmarkFilter::RejectAll => false,
1133 }
1134 }
1135
1136 fn should_save_baseline(&self) -> bool {
1139 self.connection.is_none()
1140 && self.load_baseline.is_none()
1141 && !matches!(self.baseline, Baseline::Discard)
1142 }
1143
1144 pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
1167 let group_name = group_name.into();
1168 assert!(!group_name.is_empty(), "Group name must not be empty.");
1169
1170 if let Some(conn) = &self.connection {
1171 conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name })
1172 .unwrap();
1173 }
1174
1175 BenchmarkGroup::new(self, group_name)
1176 }
1177}
1178impl<M> Criterion<M>
1179where
1180 M: Measurement + 'static,
1181{
1182 pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion<M>
1204 where
1205 F: FnMut(&mut Bencher<'_, M>),
1206 {
1207 self.benchmark_group(id)
1208 .bench_function(BenchmarkId::no_function(), f);
1209 self
1210 }
1211
1212 pub fn bench_with_input<F, I>(&mut self, id: BenchmarkId, input: &I, f: F) -> &mut Criterion<M>
1235 where
1236 F: FnMut(&mut Bencher<'_, M>, &I),
1237 {
1238 let group_name = id.function_name.expect(
1242 "Cannot use BenchmarkId::from_parameter with Criterion::bench_with_input. \
1243 Consider using a BenchmarkGroup or BenchmarkId::new instead.",
1244 );
1245 let parameter = id.parameter.unwrap();
1247 self.benchmark_group(group_name).bench_with_input(
1248 BenchmarkId::no_function_with_input(parameter),
1249 input,
1250 f,
1251 );
1252 self
1253 }
1254}
1255
1256#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
1261pub enum Throughput {
1262 Bytes(u64),
1266
1267 BytesDecimal(u64),
1271
1272 Elements(u64),
1277}
1278
1279#[derive(Debug, Default, Clone, Copy)]
1281pub enum AxisScale {
1282 #[default]
1284 Linear,
1285
1286 Logarithmic,
1288}
1289
1290#[derive(Debug, Default, Clone)]
1306pub struct PlotConfiguration {
1307 summary_scale: AxisScale,
1308}
1309
1310impl PlotConfiguration {
1311 #[must_use]
1312 pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
1320 self.summary_scale = new_scale;
1321 self
1322 }
1323}
1324
1325#[derive(Debug, Default, Clone, Copy)]
1329pub enum SamplingMode {
1330 #[default]
1333 Auto,
1334
1335 Linear,
1338
1339 Flat,
1344}
1345
1346impl SamplingMode {
1347 pub(crate) fn choose_sampling_mode(
1348 &self,
1349 warmup_mean_execution_time: f64,
1350 sample_count: u64,
1351 target_time: f64,
1352 ) -> ActualSamplingMode {
1353 match self {
1354 SamplingMode::Linear => ActualSamplingMode::Linear,
1355 SamplingMode::Flat => ActualSamplingMode::Flat,
1356 SamplingMode::Auto => {
1357 let total_runs = sample_count * (sample_count + 1) / 2;
1359 let d =
1360 (target_time / warmup_mean_execution_time / total_runs as f64).ceil() as u64;
1361 let expected_ns = total_runs as f64 * d as f64 * warmup_mean_execution_time;
1362
1363 if expected_ns > (2.0 * target_time) {
1364 ActualSamplingMode::Flat
1365 } else {
1366 ActualSamplingMode::Linear
1367 }
1368 }
1369 }
1370 }
1371}
1372
1373#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1375pub(crate) enum ActualSamplingMode {
1376 Linear,
1377 Flat,
1378}
1379
1380impl ActualSamplingMode {
1381 pub(crate) fn iteration_counts(
1382 &self,
1383 warmup_mean_execution_time: f64,
1384 sample_count: u64,
1385 target_time: &Duration,
1386 ) -> Vec<u64> {
1387 match self {
1388 ActualSamplingMode::Linear => {
1389 let n = sample_count;
1390 let met = warmup_mean_execution_time;
1391 let m_ns = target_time.as_nanos();
1392 let total_runs = n * (n + 1) / 2;
1394 let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
1395 let expected_ns = total_runs as f64 * d as f64 * met;
1396
1397 if d == 1 {
1398 let recommended_sample_size =
1399 ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
1400 let actual_time = Duration::from_nanos(expected_ns as u64);
1401 eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1402 n, target_time, actual_time);
1403
1404 if recommended_sample_size != n {
1405 eprintln!(
1406 ", enable flat sampling, or reduce sample count to {}.",
1407 recommended_sample_size
1408 );
1409 } else {
1410 eprintln!(" or enable flat sampling.");
1411 }
1412 }
1413
1414 (1..(n + 1)).map(|a| a * d).collect::<Vec<u64>>()
1415 }
1416 ActualSamplingMode::Flat => {
1417 let n = sample_count;
1418 let met = warmup_mean_execution_time;
1419 let m_ns = target_time.as_nanos() as f64;
1420 let time_per_sample = m_ns / (n as f64);
1421 let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
1423
1424 let expected_ns = met * (iterations_per_sample * n) as f64;
1425
1426 if iterations_per_sample == 1 {
1427 let recommended_sample_size =
1428 ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
1429 let actual_time = Duration::from_nanos(expected_ns as u64);
1430 eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1431 n, target_time, actual_time);
1432
1433 if recommended_sample_size != n {
1434 eprintln!(", or reduce sample count to {}.", recommended_sample_size);
1435 } else {
1436 eprintln!(".");
1437 }
1438 }
1439
1440 vec![iterations_per_sample; n as usize]
1441 }
1442 }
1443 }
1444
1445 fn is_linear(&self) -> bool {
1446 matches!(self, ActualSamplingMode::Linear)
1447 }
1448
1449 fn recommend_linear_sample_size(target_time: f64, met: f64) -> u64 {
1450 let c = target_time / met;
1458 let sample_size = (-1.0 + (4.0 * c).sqrt()) / 2.0;
1459 let sample_size = sample_size as u64;
1460
1461 let sample_size = (sample_size / 10) * 10;
1463
1464 if sample_size < 10 {
1466 10
1467 } else {
1468 sample_size
1469 }
1470 }
1471
1472 fn recommend_flat_sample_size(target_time: f64, met: f64) -> u64 {
1473 let sample_size = (target_time / met) as u64;
1474
1475 let sample_size = (sample_size / 10) * 10;
1477
1478 if sample_size < 10 {
1480 10
1481 } else {
1482 sample_size
1483 }
1484 }
1485}
1486
1487#[derive(Debug, Serialize, Deserialize)]
1488pub(crate) struct SavedSample {
1489 sampling_mode: ActualSamplingMode,
1490 iters: Vec<f64>,
1491 times: Vec<f64>,
1492}
1493
1494#[doc(hidden)]
1496pub fn runner(benches: &[&dyn Fn()]) {
1497 for bench in benches {
1498 bench();
1499 }
1500 Criterion::default().configure_from_args().final_summary();
1501}