Deprecated: The each() function is deprecated. This message will be suppressed on further calls in /home/zhenxiangba/zhenxiangba.com/public_html/phproxy-improved-master/index.php on line 456
lib.rs - source
[go: Go Back, main page]

criterion/
lib.rs

1//! A statistics-driven micro-benchmarking library written in Rust.
2//!
3//! This crate is a microbenchmarking library which aims to provide strong
4//! statistical confidence in detecting and estimating the size of performance
5//! improvements and regressions, while also being easy to use.
6//!
7//! See
8//! [the user guide](https://bheisler.github.io/criterion.rs/book/index.html)
9//! for examples as well as details on the measurement and analysis process,
10//! and the output.
11//!
12//! ## Features:
13//! * Collects detailed statistics, providing strong confidence that changes
14//!   to performance are real, not measurement noise.
15//! * Produces detailed charts, providing thorough understanding of your code's
16//!   performance behavior.
17
18#![warn(clippy::doc_markdown, missing_docs)]
19#![warn(bare_trait_objects)]
20#![allow(
21        clippy::just_underscores_and_digits, // Used in the stats code
22        clippy::transmute_ptr_to_ptr, // Used in the stats code
23)]
24
25#[cfg(all(feature = "rayon", target_arch = "wasm32"))]
26compile_error!("Rayon cannot be used when targeting wasi32. Try disabling default features.");
27
28use regex::Regex;
29use serde::{Deserialize, Serialize};
30
31// Needs to be declared before other modules
32// in order to be usable there.
33#[macro_use]
34mod macros_private;
35#[macro_use]
36mod analysis;
37mod benchmark;
38#[macro_use]
39mod benchmark_group;
40pub mod async_executor;
41mod bencher;
42mod connection;
43#[cfg(feature = "csv_output")]
44mod csv_report;
45mod error;
46mod estimate;
47mod format;
48mod fs;
49mod html;
50mod kde;
51mod macros;
52pub mod measurement;
53mod plot;
54pub mod profiler;
55mod report;
56mod routine;
57mod stats;
58
59use std::cell::RefCell;
60use std::collections::HashSet;
61use std::env;
62use std::io::{stdout, IsTerminal};
63use std::net::TcpStream;
64use std::path::{Path, PathBuf};
65use std::process::Command;
66use std::sync::{Mutex, MutexGuard};
67use std::time::Duration;
68
69use criterion_plot::{Version, VersionError};
70use std::sync::OnceLock;
71
72use crate::benchmark::BenchmarkConfig;
73use crate::connection::Connection;
74use crate::connection::OutgoingMessage;
75use crate::html::Html;
76use crate::measurement::{Measurement, WallTime};
77#[cfg(feature = "plotters")]
78use crate::plot::PlottersBackend;
79use crate::plot::{Gnuplot, Plotter};
80use crate::profiler::{ExternalProfiler, Profiler};
81use crate::report::{BencherReport, CliReport, CliVerbosity, Report, ReportContext, Reports};
82
83#[cfg(feature = "async")]
84pub use crate::bencher::AsyncBencher;
85pub use crate::bencher::Bencher;
86pub use crate::benchmark_group::{BenchmarkGroup, BenchmarkId};
87
88fn gnuplot_version() -> &'static Result<Version, VersionError> {
89    static GNUPLOT_VERSION: OnceLock<Result<Version, VersionError>> = OnceLock::new();
90
91    GNUPLOT_VERSION.get_or_init(criterion_plot::version)
92}
93
94fn default_plotting_backend() -> &'static PlottingBackend {
95    static DEFAULT_PLOTTING_BACKEND: OnceLock<PlottingBackend> = OnceLock::new();
96
97    DEFAULT_PLOTTING_BACKEND.get_or_init(|| match gnuplot_version() {
98        Ok(_) => PlottingBackend::Gnuplot,
99        #[cfg(feature = "plotters")]
100        Err(e) => {
101            match e {
102                VersionError::Exec(_) => eprintln!("Gnuplot not found, using plotters backend"),
103                e => eprintln!(
104                    "Gnuplot not found or not usable, using plotters backend\n{}",
105                    e
106                ),
107            };
108            PlottingBackend::Plotters
109        }
110        #[cfg(not(feature = "plotters"))]
111        Err(_) => PlottingBackend::None,
112    })
113}
114
115fn cargo_criterion_connection() -> &'static Option<Mutex<Connection>> {
116    static CARGO_CRITERION_CONNECTION: OnceLock<Option<Mutex<Connection>>> = OnceLock::new();
117
118    CARGO_CRITERION_CONNECTION.get_or_init(|| match std::env::var("CARGO_CRITERION_PORT") {
119        Ok(port_str) => {
120            let port: u16 = port_str.parse().ok()?;
121            let stream = TcpStream::connect(("localhost", port)).ok()?;
122            Some(Mutex::new(Connection::new(stream).ok()?))
123        }
124        Err(_) => None,
125    })
126}
127
128fn default_output_directory() -> &'static PathBuf {
129    static DEFAULT_OUTPUT_DIRECTORY: OnceLock<PathBuf> = OnceLock::new();
130
131    DEFAULT_OUTPUT_DIRECTORY.get_or_init(|| {
132        // Set criterion home to (in descending order of preference):
133        // - $CRITERION_HOME (cargo-criterion sets this, but other users could as well)
134        // - $CARGO_TARGET_DIR/criterion
135        // - the cargo target dir from `cargo metadata`
136        // - ./target/criterion
137        if let Some(value) = env::var_os("CRITERION_HOME") {
138            PathBuf::from(value)
139        } else if let Some(path) = cargo_target_directory() {
140            path.join("criterion")
141        } else {
142            PathBuf::from("target/criterion")
143        }
144    })
145}
146
147fn debug_enabled() -> bool {
148    static DEBUG_ENABLED: OnceLock<bool> = OnceLock::new();
149
150    *DEBUG_ENABLED.get_or_init(|| std::env::var_os("CRITERION_DEBUG").is_some())
151}
152
153/// A function that is opaque to the optimizer, used to prevent the compiler from
154/// optimizing away computations in a benchmark.
155#[deprecated(note = "use `std::hint::black_box()` instead")]
156pub fn black_box<T>(dummy: T) -> T {
157    std::hint::black_box(dummy)
158}
159
160/// Argument to [`Bencher::iter_batched`] and [`Bencher::iter_batched_ref`] which controls the
161/// batch size.
162///
163/// Generally speaking, almost all benchmarks should use `SmallInput`. If the input or the result
164/// of the benchmark routine is large enough that `SmallInput` causes out-of-memory errors,
165/// `LargeInput` can be used to reduce memory usage at the cost of increasing the measurement
166/// overhead. If the input or the result is extremely large (or if it holds some
167/// limited external resource like a file handle), `PerIteration` will set the number of iterations
168/// per batch to exactly one. `PerIteration` can increase the measurement overhead substantially
169/// and should be avoided wherever possible.
170///
171/// Each value lists an estimate of the measurement overhead. This is intended as a rough guide
172/// to assist in choosing an option, it should not be relied upon. In particular, it is not valid
173/// to subtract the listed overhead from the measurement and assume that the result represents the
174/// true runtime of a function. The actual measurement overhead for your specific benchmark depends
175/// on the details of the function you're benchmarking and the hardware and operating
176/// system running the benchmark.
177///
178/// With that said, if the runtime of your function is small relative to the measurement overhead
179/// it will be difficult to take accurate measurements. In this situation, the best option is to use
180/// [`Bencher::iter`] which has next-to-zero measurement overhead.
181#[derive(Debug, Eq, PartialEq, Copy, Hash, Clone)]
182pub enum BatchSize {
183    /// `SmallInput` indicates that the input to the benchmark routine (the value returned from
184    /// the setup routine) is small enough that millions of values can be safely held in memory.
185    /// Always prefer `SmallInput` unless the benchmark is using too much memory.
186    ///
187    /// In testing, the maximum measurement overhead from benchmarking with `SmallInput` is on the
188    /// order of 500 picoseconds. This is presented as a rough guide; your results may vary.
189    SmallInput,
190
191    /// `LargeInput` indicates that the input to the benchmark routine or the value returned from
192    /// that routine is large. This will reduce the memory usage but increase the measurement
193    /// overhead.
194    ///
195    /// In testing, the maximum measurement overhead from benchmarking with `LargeInput` is on the
196    /// order of 750 picoseconds. This is presented as a rough guide; your results may vary.
197    LargeInput,
198
199    /// `PerIteration` indicates that the input to the benchmark routine or the value returned from
200    /// that routine is extremely large or holds some limited resource, such that holding many values
201    /// in memory at once is infeasible. This provides the worst measurement overhead, but the
202    /// lowest memory usage.
203    ///
204    /// In testing, the maximum measurement overhead from benchmarking with `PerIteration` is on the
205    /// order of 350 nanoseconds or 350,000 picoseconds. This is presented as a rough guide; your
206    /// results may vary.
207    PerIteration,
208
209    /// `NumBatches` will attempt to divide the iterations up into a given number of batches.
210    /// A larger number of batches (and thus smaller batches) will reduce memory usage but increase
211    /// measurement overhead. This allows the user to choose their own tradeoff between memory usage
212    /// and measurement overhead, but care must be taken in tuning the number of batches. Most
213    /// benchmarks should use `SmallInput` or `LargeInput` instead.
214    NumBatches(u64),
215
216    /// `NumIterations` fixes the batch size to a constant number, specified by the user. This
217    /// allows the user to choose their own tradeoff between overhead and memory usage, but care must
218    /// be taken in tuning the batch size. In general, the measurement overhead of `NumIterations`
219    /// will be larger than that of `NumBatches`. Most benchmarks should use `SmallInput` or
220    /// `LargeInput` instead.
221    NumIterations(u64),
222
223    #[doc(hidden)]
224    __NonExhaustive,
225}
226impl BatchSize {
227    /// Convert to a number of iterations per batch.
228    ///
229    /// We try to do a constant number of batches regardless of the number of iterations in this
230    /// sample. If the measurement overhead is roughly constant regardless of the number of
231    /// iterations the analysis of the results later will have an easier time separating the
232    /// measurement overhead from the benchmark time.
233    fn iters_per_batch(self, iters: u64) -> u64 {
234        match self {
235            BatchSize::SmallInput => (iters + 10 - 1) / 10,
236            BatchSize::LargeInput => (iters + 1000 - 1) / 1000,
237            BatchSize::PerIteration => 1,
238            BatchSize::NumBatches(batches) => (iters + batches - 1) / batches,
239            BatchSize::NumIterations(size) => size,
240            BatchSize::__NonExhaustive => panic!("__NonExhaustive is not a valid BatchSize."),
241        }
242    }
243}
244
245/// Baseline describes how the `baseline_directory` is handled.
246#[derive(Debug, Clone, Copy)]
247pub enum Baseline {
248    /// `CompareLenient` compares against a previous saved version of the baseline.
249    /// If a previous baseline does not exist, the benchmark is run as normal but no comparison occurs.
250    CompareLenient,
251    /// `CompareStrict` compares against a previous saved version of the baseline.
252    /// If a previous baseline does not exist, a panic occurs.
253    CompareStrict,
254    /// `Save` writes the benchmark results to the baseline directory,
255    /// overwriting any results that were previously there.
256    Save,
257    /// `Discard` benchmark results.
258    Discard,
259}
260
261/// Enum used to select the plotting backend.
262///
263/// See [`Criterion::plotting_backend`].
264#[derive(Debug, Clone, Copy)]
265pub enum PlottingBackend {
266    /// Plotting backend which uses the external `gnuplot` command to render plots. This is the
267    /// default if the `gnuplot` command is installed.
268    Gnuplot,
269    /// Plotting backend which uses the Rust 'Plotters' library. This is the default if `gnuplot`
270    /// is not installed.
271    Plotters,
272    /// Null plotting backend which outputs nothing,
273    None,
274}
275impl PlottingBackend {
276    fn create_plotter(&self) -> Option<Box<dyn Plotter>> {
277        match self {
278            PlottingBackend::Gnuplot => Some(Box::<Gnuplot>::default()),
279            #[cfg(feature = "plotters")]
280            PlottingBackend::Plotters => Some(Box::<PlottersBackend>::default()),
281            #[cfg(not(feature = "plotters"))]
282            PlottingBackend::Plotters => panic!("Criterion was built without plotters support."),
283            PlottingBackend::None => None,
284        }
285    }
286}
287
288#[derive(Debug, Clone)]
289/// Enum representing the execution mode.
290pub(crate) enum Mode {
291    /// Run benchmarks normally.
292    Benchmark,
293    /// List all benchmarks but do not run them.
294    List(ListFormat),
295    /// Run benchmarks once to verify that they work, but otherwise do not measure them.
296    Test,
297    /// Iterate benchmarks for a given length of time but do not analyze or report on them.
298    Profile(Duration),
299}
300impl Mode {
301    pub fn is_benchmark(&self) -> bool {
302        matches!(self, Mode::Benchmark)
303    }
304
305    pub fn is_terse(&self) -> bool {
306        matches!(self, Mode::List(ListFormat::Terse))
307    }
308}
309
310#[derive(Debug, Default, Clone)]
311/// Enum representing the list format.
312pub(crate) enum ListFormat {
313    /// The regular, default format.
314    #[default]
315    Pretty,
316    /// The terse format, where nothing other than the name of the test and ": benchmark" at the end
317    /// is printed out.
318    Terse,
319}
320
321/// Benchmark filtering support.
322#[derive(Clone, Debug)]
323pub enum BenchmarkFilter {
324    /// Run all benchmarks.
325    AcceptAll,
326    /// Run benchmarks matching this regex.
327    Regex(Regex),
328    /// Run the benchmark matching this string exactly.
329    Exact(String),
330    /// Do not run any benchmarks.
331    RejectAll,
332}
333
334/// The benchmark manager
335///
336/// `Criterion` lets you configure and execute benchmarks
337///
338/// Each benchmark consists of four phases:
339///
340/// - **Warm-up**: The routine is repeatedly executed, to let the CPU/OS/JIT/interpreter adapt to
341///   the new load
342/// - **Measurement**: The routine is repeatedly executed, and timing information is collected into
343///   a sample
344/// - **Analysis**: The sample is analyzed and distilled into meaningful statistics that get
345///   reported to stdout, stored in files, and plotted
346/// - **Comparison**: The current sample is compared with the sample obtained in the previous
347///   benchmark.
348pub struct Criterion<M: Measurement = WallTime> {
349    config: BenchmarkConfig,
350    filter: BenchmarkFilter,
351    report: Reports,
352    output_directory: PathBuf,
353    baseline_directory: String,
354    baseline: Baseline,
355    load_baseline: Option<String>,
356    all_directories: HashSet<String>,
357    all_titles: HashSet<String>,
358    measurement: M,
359    profiler: Box<RefCell<dyn Profiler>>,
360    connection: Option<MutexGuard<'static, Connection>>,
361    mode: Mode,
362}
363
364/// Returns the Cargo target directory, possibly calling `cargo metadata` to
365/// figure it out.
366fn cargo_target_directory() -> Option<PathBuf> {
367    #[derive(Deserialize)]
368    struct Metadata {
369        target_directory: PathBuf,
370    }
371
372    env::var_os("CARGO_TARGET_DIR")
373        .map(PathBuf::from)
374        .or_else(|| {
375            let output = Command::new(env::var_os("CARGO")?)
376                .args(["metadata", "--format-version", "1"])
377                .output()
378                .ok()?;
379            let metadata: Metadata = serde_json::from_slice(&output.stdout).ok()?;
380            Some(metadata.target_directory)
381        })
382}
383
384impl Default for Criterion {
385    /// Creates a benchmark manager with the following default settings:
386    ///
387    /// - Sample size: 100 measurements
388    /// - Warm-up time: 3 s
389    /// - Measurement time: 5 s
390    /// - Bootstrap size: 100 000 resamples
391    /// - Noise threshold: 0.01 (1%)
392    /// - Confidence level: 0.95
393    /// - Significance level: 0.05
394    /// - Plotting: enabled, using gnuplot if available or plotters if gnuplot is not available
395    /// - No filter
396    fn default() -> Criterion {
397        let reports = Reports {
398            cli_enabled: true,
399            cli: CliReport::new(false, false, CliVerbosity::Normal),
400            bencher_enabled: false,
401            bencher: BencherReport,
402            html: default_plotting_backend().create_plotter().map(Html::new),
403            csv_enabled: cfg!(feature = "csv_output"),
404        };
405
406        let mut criterion = Criterion {
407            config: BenchmarkConfig {
408                confidence_level: 0.95,
409                measurement_time: Duration::from_secs(5),
410                noise_threshold: 0.01,
411                nresamples: 100_000,
412                sample_size: 100,
413                significance_level: 0.05,
414                warm_up_time: Duration::from_secs(3),
415                sampling_mode: SamplingMode::Auto,
416                quick_mode: false,
417            },
418            filter: BenchmarkFilter::AcceptAll,
419            report: reports,
420            baseline_directory: "base".to_owned(),
421            baseline: Baseline::Save,
422            load_baseline: None,
423            output_directory: default_output_directory().clone(),
424            all_directories: HashSet::new(),
425            all_titles: HashSet::new(),
426            measurement: WallTime,
427            profiler: Box::new(RefCell::new(ExternalProfiler)),
428            connection: cargo_criterion_connection()
429                .as_ref()
430                .map(|mtx| mtx.lock().unwrap()),
431            mode: Mode::Benchmark,
432        };
433
434        if criterion.connection.is_some() {
435            // disable all reports when connected to cargo-criterion; it will do the reporting.
436            criterion.report.cli_enabled = false;
437            criterion.report.bencher_enabled = false;
438            criterion.report.csv_enabled = false;
439            criterion.report.html = None;
440        }
441        criterion
442    }
443}
444
445impl<M: Measurement> Criterion<M> {
446    /// Changes the measurement for the benchmarks run with this runner. See the
447    /// [`Measurement`] trait for more details
448    pub fn with_measurement<M2: Measurement>(self, m: M2) -> Criterion<M2> {
449        // Can't use struct update syntax here because they're technically different types.
450        Criterion {
451            config: self.config,
452            filter: self.filter,
453            report: self.report,
454            baseline_directory: self.baseline_directory,
455            baseline: self.baseline,
456            load_baseline: self.load_baseline,
457            output_directory: self.output_directory,
458            all_directories: self.all_directories,
459            all_titles: self.all_titles,
460            measurement: m,
461            profiler: self.profiler,
462            connection: self.connection,
463            mode: self.mode,
464        }
465    }
466
467    #[must_use]
468    /// Changes the internal profiler for benchmarks run with this runner. See
469    /// the [`Profiler`] trait for more details.
470    pub fn with_profiler<P: Profiler + 'static>(self, p: P) -> Criterion<M> {
471        Criterion {
472            profiler: Box::new(RefCell::new(p)),
473            ..self
474        }
475    }
476
477    #[must_use]
478    /// Set the [plotting backend]. By default, Criterion will use `gnuplot` if available,
479    /// or `plotters` if not.
480    ///
481    /// Panics if `backend` is [`PlottingBackend::Gnuplot`] and `gnuplot` is not available.
482    ///
483    /// [plotting backend]: PlottingBackend
484    pub fn plotting_backend(mut self, backend: PlottingBackend) -> Criterion<M> {
485        if let PlottingBackend::Gnuplot = backend {
486            assert!(
487                !gnuplot_version().is_err(),
488                "Gnuplot plotting backend was requested, but gnuplot is not available. \
489                To continue, either install Gnuplot or allow Criterion.rs to fall back \
490                to using plotters."
491            );
492        }
493
494        self.report.html = backend.create_plotter().map(Html::new);
495        self
496    }
497
498    #[must_use]
499    /// Changes the default size of the sample for benchmarks run with this runner.
500    ///
501    /// A bigger sample should yield more accurate results if paired with a sufficiently large
502    /// measurement time.
503    ///
504    /// Sample size must be at least 10.
505    ///
506    /// # Panics
507    ///
508    /// Panics if n < 10
509    pub fn sample_size(mut self, n: usize) -> Criterion<M> {
510        assert!(n >= 10);
511
512        self.config.sample_size = n;
513        self
514    }
515
516    #[must_use]
517    /// Changes the default warm up time for benchmarks run with this runner.
518    ///
519    /// # Panics
520    ///
521    /// Panics if the input duration is zero
522    pub fn warm_up_time(mut self, dur: Duration) -> Criterion<M> {
523        assert!(dur.as_nanos() > 0);
524
525        self.config.warm_up_time = dur;
526        self
527    }
528
529    #[must_use]
530    /// Changes the default measurement time for benchmarks run with this runner.
531    ///
532    /// With a longer time, the measurement will become more resilient to transitory peak loads
533    /// caused by external programs
534    ///
535    /// **Note**: If the measurement time is too "low", Criterion will automatically increase it
536    ///
537    /// # Panics
538    ///
539    /// Panics if the input duration in zero
540    pub fn measurement_time(mut self, dur: Duration) -> Criterion<M> {
541        assert!(dur.as_nanos() > 0);
542
543        self.config.measurement_time = dur;
544        self
545    }
546
547    #[must_use]
548    /// Changes the default number of resamples for benchmarks run with this runner.
549    ///
550    /// Number of resamples to use for the
551    /// [bootstrap](http://en.wikipedia.org/wiki/Bootstrapping_(statistics)#Case_resampling)
552    ///
553    /// A larger number of resamples reduces the random sampling errors, which are inherent to the
554    /// bootstrap method, but also increases the analysis time
555    ///
556    /// # Panics
557    ///
558    /// Panics if the number of resamples is set to zero
559    pub fn nresamples(mut self, n: usize) -> Criterion<M> {
560        assert!(n > 0);
561        if n <= 1000 {
562            eprintln!("\nWarning: It is not recommended to reduce nresamples below 1000.");
563        }
564
565        self.config.nresamples = n;
566        self
567    }
568
569    #[must_use]
570    /// Changes the default noise threshold for benchmarks run with this runner. The noise threshold
571    /// is used to filter out small changes in performance, even if they are statistically
572    /// significant. Sometimes benchmarking the same code twice will result in small but
573    /// statistically significant differences solely because of noise. This provides a way to filter
574    /// out some of these false positives at the cost of making it harder to detect small changes
575    /// to the true performance of the benchmark.
576    ///
577    /// The default is 0.01, meaning that changes smaller than 1% will be ignored.
578    ///
579    /// # Panics
580    ///
581    /// Panics if the threshold is set to a negative value
582    pub fn noise_threshold(mut self, threshold: f64) -> Criterion<M> {
583        assert!(threshold >= 0.0);
584
585        self.config.noise_threshold = threshold;
586        self
587    }
588
589    #[must_use]
590    /// Changes the default confidence level for benchmarks run with this runner. The confidence
591    /// level is the desired probability that the true runtime lies within the estimated
592    /// [confidence interval](https://en.wikipedia.org/wiki/Confidence_interval). The default is
593    /// 0.95, meaning that the confidence interval should capture the true value 95% of the time.
594    ///
595    /// # Panics
596    ///
597    /// Panics if the confidence level is set to a value outside the `(0, 1)` range
598    pub fn confidence_level(mut self, cl: f64) -> Criterion<M> {
599        assert!(cl > 0.0 && cl < 1.0);
600        if cl < 0.5 {
601            eprintln!("\nWarning: It is not recommended to reduce confidence level below 0.5.");
602        }
603
604        self.config.confidence_level = cl;
605        self
606    }
607
608    #[must_use]
609    /// Changes the default [significance level](https://en.wikipedia.org/wiki/Statistical_significance)
610    /// for benchmarks run with this runner. This is used to perform a
611    /// [hypothesis test](https://en.wikipedia.org/wiki/Statistical_hypothesis_testing) to see if
612    /// the measurements from this run are different from the measured performance of the last run.
613    /// The significance level is the desired probability that two measurements of identical code
614    /// will be considered 'different' due to noise in the measurements. The default value is 0.05,
615    /// meaning that approximately 5% of identical benchmarks will register as different due to
616    /// noise.
617    ///
618    /// This presents a trade-off. By setting the significance level closer to 0.0, you can increase
619    /// the statistical robustness against noise, but it also weakens Criterion.rs' ability to
620    /// detect small but real changes in the performance. By setting the significance level
621    /// closer to 1.0, Criterion.rs will be more able to detect small true changes, but will also
622    /// report more spurious differences.
623    ///
624    /// See also the noise threshold setting.
625    ///
626    /// # Panics
627    ///
628    /// Panics if the significance level is set to a value outside the `(0, 1)` range
629    pub fn significance_level(mut self, sl: f64) -> Criterion<M> {
630        assert!(sl > 0.0 && sl < 1.0);
631
632        self.config.significance_level = sl;
633        self
634    }
635
636    #[must_use]
637    /// Enables plotting
638    pub fn with_plots(mut self) -> Criterion<M> {
639        // If running under cargo-criterion then don't re-enable the reports; let it do the reporting.
640        if self.connection.is_none() && self.report.html.is_none() {
641            let default_backend = default_plotting_backend().create_plotter();
642            if let Some(backend) = default_backend {
643                self.report.html = Some(Html::new(backend));
644            } else {
645                panic!("Cannot find a default plotting backend!");
646            }
647        }
648        self
649    }
650
651    #[must_use]
652    /// Disables plotting
653    pub fn without_plots(mut self) -> Criterion<M> {
654        self.report.html = None;
655        self
656    }
657
658    #[must_use]
659    /// Names an explicit baseline and enables overwriting the previous results.
660    pub fn save_baseline(mut self, baseline: String) -> Criterion<M> {
661        self.baseline_directory = baseline;
662        self.baseline = Baseline::Save;
663        self
664    }
665
666    #[must_use]
667    /// Names an explicit baseline and disables overwriting the previous results.
668    pub fn retain_baseline(mut self, baseline: String, strict: bool) -> Criterion<M> {
669        self.baseline_directory = baseline;
670        self.baseline = if strict {
671            Baseline::CompareStrict
672        } else {
673            Baseline::CompareLenient
674        };
675        self
676    }
677
678    #[must_use]
679    /// Filters the benchmarks. Only benchmarks with names that contain the
680    /// given string will be executed.
681    ///
682    /// This overwrites [`Self::with_benchmark_filter`].
683    pub fn with_filter<S: Into<String>>(mut self, filter: S) -> Criterion<M> {
684        let filter_text = filter.into();
685        let filter = Regex::new(&filter_text).unwrap_or_else(|err| {
686            panic!(
687                "Unable to parse '{}' as a regular expression: {}",
688                filter_text, err
689            )
690        });
691        self.filter = BenchmarkFilter::Regex(filter);
692
693        self
694    }
695
696    /// Only run benchmarks specified by the given filter.
697    ///
698    /// This overwrites [`Self::with_filter`].
699    pub fn with_benchmark_filter(mut self, filter: BenchmarkFilter) -> Criterion<M> {
700        self.filter = filter;
701
702        self
703    }
704
705    #[must_use]
706    /// Override whether the CLI output will be colored or not. Usually you would use the `--color`
707    /// CLI argument, but this is available for programmmatic use as well.
708    pub fn with_output_color(mut self, enabled: bool) -> Criterion<M> {
709        self.report.cli.enable_text_coloring = enabled;
710        self
711    }
712
713    /// Set the output directory (currently for testing only)
714    #[must_use]
715    #[doc(hidden)]
716    pub fn output_directory(mut self, path: &Path) -> Criterion<M> {
717        path.clone_into(&mut self.output_directory);
718
719        self
720    }
721
722    /// Set the profile time (currently for testing only)
723    #[must_use]
724    #[doc(hidden)]
725    pub fn profile_time(mut self, profile_time: Option<Duration>) -> Criterion<M> {
726        match profile_time {
727            Some(time) => self.mode = Mode::Profile(time),
728            None => self.mode = Mode::Benchmark,
729        }
730
731        self
732    }
733
734    /// Generate the final summary at the end of a run.
735    #[doc(hidden)]
736    pub fn final_summary(&self) {
737        if !self.mode.is_benchmark() {
738            return;
739        }
740
741        let report_context = ReportContext {
742            output_directory: self.output_directory.clone(),
743            plot_config: PlotConfiguration::default(),
744        };
745
746        self.report.final_summary(&report_context);
747    }
748
749    /// Configure this criterion struct based on the command-line arguments to
750    /// this process.
751    #[must_use]
752    #[allow(clippy::cognitive_complexity)]
753    pub fn configure_from_args(mut self) -> Criterion<M> {
754        use clap::{value_parser, Arg, Command};
755        let matches = Command::new("Criterion Benchmark")
756            .arg(Arg::new("FILTER")
757                .help("Skip benchmarks whose names do not contain FILTER.")
758                .index(1))
759            .arg(Arg::new("color")
760                .short('c')
761                .long("color")
762                .alias("colour")
763                .value_parser(["auto", "always", "never"])
764                .default_value("auto")
765                .help("Configure coloring of output. always = always colorize output, never = never colorize output, auto = colorize output if output is a tty and compiled for unix."))
766            .arg(Arg::new("verbose")
767                .short('v')
768                .long("verbose")
769                .num_args(0)
770                .help("Print additional statistical information."))
771            .arg(Arg::new("quiet")
772                .long("quiet")
773                .num_args(0)
774                .conflicts_with("verbose")
775                .help("Print only the benchmark results."))
776            .arg(Arg::new("noplot")
777                .short('n')
778                .long("noplot")
779                .num_args(0)
780                .help("Disable plot and HTML generation."))
781            .arg(Arg::new("save-baseline")
782                .short('s')
783                .long("save-baseline")
784                .default_value("base")
785                .help("Save results under a named baseline."))
786            .arg(Arg::new("discard-baseline")
787                .long("discard-baseline")
788                .num_args(0)
789                .conflicts_with_all(["save-baseline", "baseline", "baseline-lenient"])
790                .help("Discard benchmark results."))
791            .arg(Arg::new("baseline")
792                .short('b')
793                .long("baseline")
794                .conflicts_with_all(["save-baseline", "baseline-lenient"])
795                .help("Compare to a named baseline. If any benchmarks do not have the specified baseline this command fails."))
796            .arg(Arg::new("baseline-lenient")
797                .long("baseline-lenient")
798                .conflicts_with_all(["save-baseline", "baseline"])
799                .help("Compare to a named baseline. If any benchmarks do not have the specified baseline then just those benchmarks are not compared against the baseline while every other benchmark is compared against the baseline."))
800            .arg(Arg::new("list")
801                .long("list")
802                .num_args(0)
803                .help("List all benchmarks")
804                .conflicts_with_all(["test", "profile-time"]))
805            .arg(Arg::new("format")
806                .long("format")
807                .value_parser(["pretty", "terse"])
808                .default_value("pretty")
809                // Note that libtest's --format also works during test execution, but criterion
810                // doesn't support that at the moment.
811                .help("Output formatting"))
812            .arg(Arg::new("ignored")
813                .long("ignored")
814                .num_args(0)
815                .help("List or run ignored benchmarks (currently means skip all benchmarks)"))
816            .arg(Arg::new("exact")
817                .long("exact")
818                .num_args(0)
819                .help("Run benchmarks that exactly match the provided filter"))
820            .arg(Arg::new("profile-time")
821                .long("profile-time")
822                .value_parser(value_parser!(f64))
823                .help("Iterate each benchmark for approximately the given number of seconds, doing no analysis and without storing the results. Useful for running the benchmarks in a profiler.")
824                .conflicts_with_all(["test", "list"]))
825            .arg(Arg::new("load-baseline")
826                 .long("load-baseline")
827                 .conflicts_with("profile-time")
828                 .requires("baseline")
829                 .help("Load a previous baseline instead of sampling new data."))
830            .arg(Arg::new("sample-size")
831                .long("sample-size")
832                .value_parser(value_parser!(usize))
833                .help(format!("Changes the default size of the sample for this run. [default: {}]", self.config.sample_size)))
834            .arg(Arg::new("warm-up-time")
835                .long("warm-up-time")
836                .value_parser(value_parser!(f64))
837                .help(format!("Changes the default warm up time for this run. [default: {}]", self.config.warm_up_time.as_secs())))
838            .arg(Arg::new("measurement-time")
839                .long("measurement-time")
840                .value_parser(value_parser!(f64))
841                .help(format!("Changes the default measurement time for this run. [default: {}]", self.config.measurement_time.as_secs())))
842            .arg(Arg::new("nresamples")
843                .long("nresamples")
844                .value_parser(value_parser!(usize))
845                .help(format!("Changes the default number of resamples for this run. [default: {}]", self.config.nresamples)))
846            .arg(Arg::new("noise-threshold")
847                .long("noise-threshold")
848                .value_parser(value_parser!(f64))
849                .help(format!("Changes the default noise threshold for this run. [default: {}]", self.config.noise_threshold)))
850            .arg(Arg::new("confidence-level")
851                .long("confidence-level")
852                .value_parser(value_parser!(f64))
853                .help(format!("Changes the default confidence level for this run. [default: {}]", self.config.confidence_level)))
854            .arg(Arg::new("significance-level")
855                .long("significance-level")
856                .value_parser(value_parser!(f64))
857                .help(format!("Changes the default significance level for this run. [default: {}]", self.config.significance_level)))
858            .arg(Arg::new("quick")
859                .long("quick")
860                .num_args(0)
861                .conflicts_with("sample-size")
862                .help(format!("Benchmark only until the significance level has been reached [default: {}]", self.config.quick_mode)))
863            .arg(Arg::new("test")
864                .hide(true)
865                .long("test")
866                .num_args(0)
867                .help("Run the benchmarks once, to verify that they execute successfully, but do not measure or report the results.")
868                .conflicts_with_all(["list", "profile-time"]))
869            .arg(Arg::new("bench")
870                .hide(true)
871                .long("bench")
872                .num_args(0))
873            .arg(Arg::new("plotting-backend")
874                 .long("plotting-backend")
875                 .value_parser(["gnuplot", "plotters"])
876                 .help("Set the plotting backend. By default, Criterion.rs will use the gnuplot backend if gnuplot is available, or the plotters backend if it isn't."))
877            .arg(Arg::new("output-format")
878                .long("output-format")
879                .value_parser(["criterion", "bencher"])
880                .default_value("criterion")
881                .help("Change the CLI output format. By default, Criterion.rs will use its own format. If output format is set to 'bencher', Criterion.rs will print output in a format that resembles the 'bencher' crate."))
882            .arg(Arg::new("nocapture")
883                .long("nocapture")
884                .num_args(0)
885                .hide(true)
886                .help("Ignored, but added for compatibility with libtest."))
887            .arg(Arg::new("show-output")
888                .long("show-output")
889                .num_args(0)
890                .hide(true)
891                .help("Ignored, but added for compatibility with libtest."))
892            .arg(Arg::new("include-ignored")
893                .long("include-ignored")
894                .num_args(0)
895                .hide(true)
896                .help("Ignored, but added for compatibility with libtest."))
897            .arg(Arg::new("version")
898                .hide(true)
899                .short('V')
900                .long("version")
901                .num_args(0))
902            .after_help("
903This executable is a Criterion.rs benchmark.
904See https://github.com/bheisler/criterion.rs for more details.
905
906To enable debug output, define the environment variable CRITERION_DEBUG.
907Criterion.rs will output more debug information and will save the gnuplot
908scripts alongside the generated plots.
909
910To test that the benchmarks work, run `cargo test --benches`
911
912NOTE: If you see an 'unrecognized option' error using any of the options above, see:
913https://bheisler.github.io/criterion.rs/book/faq.html
914")
915            .get_matches();
916
917        if self.connection.is_some() {
918            if let Some(color) = matches.get_one::<String>("color") {
919                if color != "auto" {
920                    eprintln!("Warning: --color will be ignored when running with cargo-criterion. Use `cargo criterion --color {} -- <args>` instead.", color);
921                }
922            }
923            if matches.get_flag("verbose") {
924                eprintln!("Warning: --verbose will be ignored when running with cargo-criterion. Use `cargo criterion --output-format verbose -- <args>` instead.");
925            }
926            if matches.get_flag("noplot") {
927                eprintln!("Warning: --noplot will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend disabled -- <args>` instead.");
928            }
929            if let Some(backend) = matches.get_one::<String>("plotting-backend") {
930                eprintln!("Warning: --plotting-backend will be ignored when running with cargo-criterion. Use `cargo criterion --plotting-backend {} -- <args>` instead.", backend);
931            }
932            if let Some(format) = matches.get_one::<String>("output-format") {
933                if format != "criterion" {
934                    eprintln!("Warning: --output-format will be ignored when running with cargo-criterion. Use `cargo criterion --output-format {} -- <args>` instead.", format);
935                }
936            }
937
938            if matches.contains_id("baseline")
939                || matches
940                    .get_one::<String>("save-baseline")
941                    .is_some_and(|base| base != "base")
942                || matches.contains_id("load-baseline")
943            {
944                eprintln!("Error: baselines are not supported when running with cargo-criterion.");
945                std::process::exit(1);
946            }
947        }
948
949        let bench = matches.get_flag("bench");
950        let test = matches.get_flag("test");
951        let test_mode = match (bench, test) {
952            (true, true) => true,   // cargo bench -- --test should run tests
953            (true, false) => false, // cargo bench should run benchmarks
954            (false, _) => true,     // cargo test --benches should run tests
955        };
956
957        self.mode = if matches.get_flag("list") {
958            let list_format = match matches
959                .get_one::<String>("format")
960                .expect("a default value was provided for this")
961                .as_str()
962            {
963                "pretty" => ListFormat::Pretty,
964                "terse" => ListFormat::Terse,
965                other => unreachable!(
966                    "unrecognized value for --format that isn't part of possible-values: {}",
967                    other
968                ),
969            };
970            Mode::List(list_format)
971        } else if test_mode {
972            Mode::Test
973        } else if let Some(&num_seconds) = matches.get_one("profile-time") {
974            if num_seconds < 1.0 {
975                eprintln!("Profile time must be at least one second.");
976                std::process::exit(1);
977            }
978
979            Mode::Profile(Duration::from_secs_f64(num_seconds))
980        } else {
981            Mode::Benchmark
982        };
983
984        // This is kind of a hack, but disable the connection to the runner if we're not benchmarking.
985        if !self.mode.is_benchmark() {
986            self.connection = None;
987        }
988
989        let filter = if matches.get_flag("ignored") {
990            // --ignored overwrites any name-based filters passed in.
991            BenchmarkFilter::RejectAll
992        } else if let Some(filter) = matches.get_one::<String>("FILTER") {
993            if matches.get_flag("exact") {
994                BenchmarkFilter::Exact(filter.to_owned())
995            } else {
996                let regex = Regex::new(filter).unwrap_or_else(|err| {
997                    panic!(
998                        "Unable to parse '{}' as a regular expression: {}",
999                        filter, err
1000                    )
1001                });
1002                BenchmarkFilter::Regex(regex)
1003            }
1004        } else {
1005            BenchmarkFilter::AcceptAll
1006        };
1007        self = self.with_benchmark_filter(filter);
1008
1009        match matches.get_one("plotting-backend").map(String::as_str) {
1010            // Use plotting_backend() here to re-use the panic behavior if Gnuplot is not available.
1011            Some("gnuplot") => self = self.plotting_backend(PlottingBackend::Gnuplot),
1012            Some("plotters") => self = self.plotting_backend(PlottingBackend::Plotters),
1013            Some(val) => panic!("Unexpected plotting backend '{}'", val),
1014            None => {}
1015        }
1016
1017        if matches.get_flag("noplot") {
1018            self = self.without_plots();
1019        }
1020
1021        if let Some(dir) = matches.get_one::<String>("save-baseline") {
1022            self.baseline = Baseline::Save;
1023            dir.clone_into(&mut self.baseline_directory);
1024        }
1025        if matches.get_flag("discard-baseline") {
1026            self.baseline = Baseline::Discard;
1027        }
1028        if let Some(dir) = matches.get_one::<String>("baseline") {
1029            self.baseline = Baseline::CompareStrict;
1030            dir.clone_into(&mut self.baseline_directory);
1031        }
1032        if let Some(dir) = matches.get_one::<String>("baseline-lenient") {
1033            self.baseline = Baseline::CompareLenient;
1034            dir.clone_into(&mut self.baseline_directory);
1035        }
1036
1037        if self.connection.is_some() {
1038            // disable all reports when connected to cargo-criterion; it will do the reporting.
1039            self.report.cli_enabled = false;
1040            self.report.bencher_enabled = false;
1041            self.report.csv_enabled = false;
1042            self.report.html = None;
1043        } else {
1044            match matches.get_one("output-format").map(String::as_str) {
1045                Some("bencher") => {
1046                    self.report.bencher_enabled = true;
1047                    self.report.cli_enabled = false;
1048                }
1049                _ => {
1050                    let verbose = matches.get_flag("verbose");
1051                    let verbosity = if verbose {
1052                        CliVerbosity::Verbose
1053                    } else if matches.get_flag("quiet") {
1054                        CliVerbosity::Quiet
1055                    } else {
1056                        CliVerbosity::Normal
1057                    };
1058                    let stdout_isatty = stdout().is_terminal();
1059                    let mut enable_text_overwrite = stdout_isatty && !verbose && !debug_enabled();
1060                    let enable_text_coloring;
1061                    match matches.get_one("color").map(String::as_str) {
1062                        Some("always") => {
1063                            enable_text_coloring = true;
1064                        }
1065                        Some("never") => {
1066                            enable_text_coloring = false;
1067                            enable_text_overwrite = false;
1068                        }
1069                        _ => enable_text_coloring = stdout_isatty,
1070                    };
1071                    self.report.bencher_enabled = false;
1072                    self.report.cli_enabled = true;
1073                    self.report.cli =
1074                        CliReport::new(enable_text_overwrite, enable_text_coloring, verbosity);
1075                }
1076            };
1077        }
1078
1079        if let Some(dir) = matches.get_one::<String>("load-baseline") {
1080            self.load_baseline = Some(dir.to_owned());
1081        }
1082
1083        if let Some(&num_size) = matches.get_one("sample-size") {
1084            assert!(num_size >= 10);
1085            self.config.sample_size = num_size;
1086        }
1087        if let Some(&num_seconds) = matches.get_one("warm-up-time") {
1088            let dur = std::time::Duration::from_secs_f64(num_seconds);
1089            assert!(dur.as_nanos() > 0);
1090
1091            self.config.warm_up_time = dur;
1092        }
1093        if let Some(&num_seconds) = matches.get_one("measurement-time") {
1094            let dur = std::time::Duration::from_secs_f64(num_seconds);
1095            assert!(dur.as_nanos() > 0);
1096
1097            self.config.measurement_time = dur;
1098        }
1099        if let Some(&num_resamples) = matches.get_one("nresamples") {
1100            assert!(num_resamples > 0);
1101
1102            self.config.nresamples = num_resamples;
1103        }
1104        if let Some(&num_noise_threshold) = matches.get_one("noise-threshold") {
1105            assert!(num_noise_threshold > 0.0);
1106
1107            self.config.noise_threshold = num_noise_threshold;
1108        }
1109        if let Some(&num_confidence_level) = matches.get_one("confidence-level") {
1110            assert!(num_confidence_level > 0.0 && num_confidence_level < 1.0);
1111
1112            self.config.confidence_level = num_confidence_level;
1113        }
1114        if let Some(&num_significance_level) = matches.get_one("significance-level") {
1115            assert!(num_significance_level > 0.0 && num_significance_level < 1.0);
1116
1117            self.config.significance_level = num_significance_level;
1118        }
1119
1120        if matches.get_flag("quick") {
1121            self.config.quick_mode = true;
1122        }
1123
1124        self
1125    }
1126
1127    fn filter_matches(&self, id: &str) -> bool {
1128        match &self.filter {
1129            BenchmarkFilter::AcceptAll => true,
1130            BenchmarkFilter::Regex(regex) => regex.is_match(id),
1131            BenchmarkFilter::Exact(exact) => id == exact,
1132            BenchmarkFilter::RejectAll => false,
1133        }
1134    }
1135
1136    /// Returns true iff we should save the benchmark results in
1137    /// json files on the local disk.
1138    fn should_save_baseline(&self) -> bool {
1139        self.connection.is_none()
1140            && self.load_baseline.is_none()
1141            && !matches!(self.baseline, Baseline::Discard)
1142    }
1143
1144    /// Return a benchmark group. All benchmarks performed using a benchmark group will be
1145    /// grouped together in the final report.
1146    ///
1147    /// # Examples:
1148    ///
1149    /// ```rust
1150    /// use criterion::{criterion_group, criterion_main, Criterion};
1151    ///
1152    /// fn bench_simple(c: &mut Criterion) {
1153    ///     let mut group = c.benchmark_group("My Group");
1154    ///
1155    ///     // Now we can perform benchmarks with this group
1156    ///     group.bench_function("Bench 1", |b| b.iter(|| 1 ));
1157    ///     group.bench_function("Bench 2", |b| b.iter(|| 2 ));
1158    ///
1159    ///     group.finish();
1160    /// }
1161    /// criterion_group!(benches, bench_simple);
1162    /// criterion_main!(benches);
1163    /// ```
1164    /// # Panics:
1165    /// Panics if the group name is empty
1166    pub fn benchmark_group<S: Into<String>>(&mut self, group_name: S) -> BenchmarkGroup<'_, M> {
1167        let group_name = group_name.into();
1168        assert!(!group_name.is_empty(), "Group name must not be empty.");
1169
1170        if let Some(conn) = &self.connection {
1171            conn.send(&OutgoingMessage::BeginningBenchmarkGroup { group: &group_name })
1172                .unwrap();
1173        }
1174
1175        BenchmarkGroup::new(self, group_name)
1176    }
1177}
1178impl<M> Criterion<M>
1179where
1180    M: Measurement + 'static,
1181{
1182    /// Benchmarks a function. For comparing multiple functions, see
1183    /// [`benchmark_group`](Self::benchmark_group).
1184    ///
1185    /// # Example
1186    ///
1187    /// ```rust
1188    /// use criterion::{criterion_group, criterion_main, Criterion};
1189    ///
1190    /// fn bench(c: &mut Criterion) {
1191    ///     // Setup (construct data, allocate memory, etc)
1192    ///     c.bench_function(
1193    ///         "function_name",
1194    ///         |b| b.iter(|| {
1195    ///             // Code to benchmark goes here
1196    ///         }),
1197    ///     );
1198    /// }
1199    ///
1200    /// criterion_group!(benches, bench);
1201    /// criterion_main!(benches);
1202    /// ```
1203    pub fn bench_function<F>(&mut self, id: &str, f: F) -> &mut Criterion<M>
1204    where
1205        F: FnMut(&mut Bencher<'_, M>),
1206    {
1207        self.benchmark_group(id)
1208            .bench_function(BenchmarkId::no_function(), f);
1209        self
1210    }
1211
1212    /// Benchmarks a function with an input. For comparing multiple functions or multiple inputs,
1213    /// see [`benchmark_group`](Self::benchmark_group).
1214    ///
1215    /// # Example
1216    ///
1217    /// ```rust
1218    /// use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
1219    ///
1220    /// fn bench(c: &mut Criterion) {
1221    ///     // Setup (construct data, allocate memory, etc)
1222    ///     let input = 5u64;
1223    ///     c.bench_with_input(
1224    ///         BenchmarkId::new("function_name", input), &input,
1225    ///         |b, i| b.iter(|| {
1226    ///             // Code to benchmark using input `i` goes here
1227    ///         }),
1228    ///     );
1229    /// }
1230    ///
1231    /// criterion_group!(benches, bench);
1232    /// criterion_main!(benches);
1233    /// ```
1234    pub fn bench_with_input<F, I>(&mut self, id: BenchmarkId, input: &I, f: F) -> &mut Criterion<M>
1235    where
1236        F: FnMut(&mut Bencher<'_, M>, &I),
1237    {
1238        // It's possible to use BenchmarkId::from_parameter to create a benchmark ID with no function
1239        // name. That's intended for use with BenchmarkGroups where the function name isn't necessary,
1240        // but here it is.
1241        let group_name = id.function_name.expect(
1242            "Cannot use BenchmarkId::from_parameter with Criterion::bench_with_input. \
1243                 Consider using a BenchmarkGroup or BenchmarkId::new instead.",
1244        );
1245        // Guaranteed safe because external callers can't create benchmark IDs without a parameter
1246        let parameter = id.parameter.unwrap();
1247        self.benchmark_group(group_name).bench_with_input(
1248            BenchmarkId::no_function_with_input(parameter),
1249            input,
1250            f,
1251        );
1252        self
1253    }
1254}
1255
1256/// Enum representing different ways of measuring the throughput of benchmarked code.
1257/// If the throughput setting is configured for a benchmark then the estimated throughput will
1258/// be reported as well as the time per iteration.
1259// TODO: Remove serialize/deserialize from the public API.
1260#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
1261pub enum Throughput {
1262    /// Measure throughput in terms of bytes/second. The value should be the number of bytes
1263    /// processed by one iteration of the benchmarked code. Typically, this would be the length of
1264    /// an input string or `&[u8]`.
1265    Bytes(u64),
1266
1267    /// Equivalent to Bytes, but the value will be reported in terms of
1268    /// kilobytes (1000 bytes) per second instead of kibibytes (1024 bytes) per
1269    /// second, megabytes instead of mibibytes, and gigabytes instead of gibibytes.
1270    BytesDecimal(u64),
1271
1272    /// Measure throughput in terms of elements/second. The value should be the number of elements
1273    /// processed by one iteration of the benchmarked code. Typically, this would be the size of a
1274    /// collection, but could also be the number of lines of input text or the number of values to
1275    /// parse.
1276    Elements(u64),
1277}
1278
1279/// Axis scaling type. Specified via [`PlotConfiguration::summary_scale`].
1280#[derive(Debug, Default, Clone, Copy)]
1281pub enum AxisScale {
1282    /// Axes scale linearly
1283    #[default]
1284    Linear,
1285
1286    /// Axes scale logarithmically
1287    Logarithmic,
1288}
1289
1290/// Contains the configuration options for the plots generated by a particular benchmark
1291/// or benchmark group.
1292///
1293/// ```rust
1294/// use self::criterion::{Bencher, Criterion, PlotConfiguration, AxisScale};
1295///
1296/// let plot_config = PlotConfiguration::default()
1297///     .summary_scale(AxisScale::Logarithmic);
1298///
1299/// // Using Criterion::default() for simplicity; normally you'd use the macros.
1300/// let mut criterion = Criterion::default();
1301/// let mut benchmark_group = criterion.benchmark_group("Group name");
1302/// benchmark_group.plot_config(plot_config);
1303/// // Use benchmark group
1304/// ```
1305#[derive(Debug, Default, Clone)]
1306pub struct PlotConfiguration {
1307    summary_scale: AxisScale,
1308}
1309
1310impl PlotConfiguration {
1311    #[must_use]
1312    /// Set the axis scale ([linear] or [logarithmic]) for the summary plots.
1313    ///
1314    /// Typically, you would set this to logarithmic if benchmarking over a
1315    /// range of inputs which scale exponentially. Defaults to [`AxisScale::Linear`].
1316    ///
1317    /// [linear]: AxisScale::Linear
1318    /// [logarithmic]: AxisScale::Logarithmic
1319    pub fn summary_scale(mut self, new_scale: AxisScale) -> PlotConfiguration {
1320        self.summary_scale = new_scale;
1321        self
1322    }
1323}
1324
1325/// This enum allows the user to control how Criterion.rs chooses the iteration count when sampling.
1326/// The default is `Auto`, which will choose a method automatically based on the iteration time during
1327/// the warm-up phase.
1328#[derive(Debug, Default, Clone, Copy)]
1329pub enum SamplingMode {
1330    /// Criterion.rs should choose a sampling method automatically. This is the default, and is
1331    /// recommended for most users and most benchmarks.
1332    #[default]
1333    Auto,
1334
1335    /// Scale the iteration count in each sample linearly. This is suitable for most benchmarks,
1336    /// but it tends to require many iterations which can make it very slow for very long benchmarks.
1337    Linear,
1338
1339    /// Keep the iteration count the same for all samples. This is not recommended, as it affects
1340    /// the statistics that Criterion.rs can compute. However, it requires fewer iterations than
1341    /// the `Linear` method and therefore is more suitable for very long-running benchmarks where
1342    /// benchmark execution time is more of a problem and statistical precision is less important.
1343    Flat,
1344}
1345
1346impl SamplingMode {
1347    pub(crate) fn choose_sampling_mode(
1348        &self,
1349        warmup_mean_execution_time: f64,
1350        sample_count: u64,
1351        target_time: f64,
1352    ) -> ActualSamplingMode {
1353        match self {
1354            SamplingMode::Linear => ActualSamplingMode::Linear,
1355            SamplingMode::Flat => ActualSamplingMode::Flat,
1356            SamplingMode::Auto => {
1357                // Estimate execution time with linear sampling
1358                let total_runs = sample_count * (sample_count + 1) / 2;
1359                let d =
1360                    (target_time / warmup_mean_execution_time / total_runs as f64).ceil() as u64;
1361                let expected_ns = total_runs as f64 * d as f64 * warmup_mean_execution_time;
1362
1363                if expected_ns > (2.0 * target_time) {
1364                    ActualSamplingMode::Flat
1365                } else {
1366                    ActualSamplingMode::Linear
1367                }
1368            }
1369        }
1370    }
1371}
1372
1373/// Enum to represent the sampling mode without Auto.
1374#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1375pub(crate) enum ActualSamplingMode {
1376    Linear,
1377    Flat,
1378}
1379
1380impl ActualSamplingMode {
1381    pub(crate) fn iteration_counts(
1382        &self,
1383        warmup_mean_execution_time: f64,
1384        sample_count: u64,
1385        target_time: &Duration,
1386    ) -> Vec<u64> {
1387        match self {
1388            ActualSamplingMode::Linear => {
1389                let n = sample_count;
1390                let met = warmup_mean_execution_time;
1391                let m_ns = target_time.as_nanos();
1392                // Solve: [d + 2*d + 3*d + ... + n*d] * met = m_ns
1393                let total_runs = n * (n + 1) / 2;
1394                let d = ((m_ns as f64 / met / total_runs as f64).ceil() as u64).max(1);
1395                let expected_ns = total_runs as f64 * d as f64 * met;
1396
1397                if d == 1 {
1398                    let recommended_sample_size =
1399                        ActualSamplingMode::recommend_linear_sample_size(m_ns as f64, met);
1400                    let actual_time = Duration::from_nanos(expected_ns as u64);
1401                    eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1402                            n, target_time, actual_time);
1403
1404                    if recommended_sample_size != n {
1405                        eprintln!(
1406                            ", enable flat sampling, or reduce sample count to {}.",
1407                            recommended_sample_size
1408                        );
1409                    } else {
1410                        eprintln!(" or enable flat sampling.");
1411                    }
1412                }
1413
1414                (1..(n + 1)).map(|a| a * d).collect::<Vec<u64>>()
1415            }
1416            ActualSamplingMode::Flat => {
1417                let n = sample_count;
1418                let met = warmup_mean_execution_time;
1419                let m_ns = target_time.as_nanos() as f64;
1420                let time_per_sample = m_ns / (n as f64);
1421                // This is pretty simplistic; we could do something smarter to fit into the allotted time.
1422                let iterations_per_sample = ((time_per_sample / met).ceil() as u64).max(1);
1423
1424                let expected_ns = met * (iterations_per_sample * n) as f64;
1425
1426                if iterations_per_sample == 1 {
1427                    let recommended_sample_size =
1428                        ActualSamplingMode::recommend_flat_sample_size(m_ns, met);
1429                    let actual_time = Duration::from_nanos(expected_ns as u64);
1430                    eprint!("\nWarning: Unable to complete {} samples in {:.1?}. You may wish to increase target time to {:.1?}",
1431                            n, target_time, actual_time);
1432
1433                    if recommended_sample_size != n {
1434                        eprintln!(", or reduce sample count to {}.", recommended_sample_size);
1435                    } else {
1436                        eprintln!(".");
1437                    }
1438                }
1439
1440                vec![iterations_per_sample; n as usize]
1441            }
1442        }
1443    }
1444
1445    fn is_linear(&self) -> bool {
1446        matches!(self, ActualSamplingMode::Linear)
1447    }
1448
1449    fn recommend_linear_sample_size(target_time: f64, met: f64) -> u64 {
1450        // Some math shows that n(n+1)/2 * d * met = target_time. d = 1, so it can be ignored.
1451        // This leaves n(n+1) = (2*target_time)/met, or n^2 + n - (2*target_time)/met = 0
1452        // Which can be solved with the quadratic formula. Since A and B are constant 1,
1453        // this simplifies to sample_size = (-1 +- sqrt(1 - 4C))/2, where C = (2*target_time)/met.
1454        // We don't care about the negative solution. Experimentation shows that this actually tends to
1455        // result in twice the desired execution time (probably because of the ceil used to calculate
1456        // d) so instead I use c = target_time/met.
1457        let c = target_time / met;
1458        let sample_size = (-1.0 + (4.0 * c).sqrt()) / 2.0;
1459        let sample_size = sample_size as u64;
1460
1461        // Round down to the nearest 10 to give a margin and avoid excessive precision
1462        let sample_size = (sample_size / 10) * 10;
1463
1464        // Clamp it to be at least 10, since criterion.rs doesn't allow sample sizes smaller than 10.
1465        if sample_size < 10 {
1466            10
1467        } else {
1468            sample_size
1469        }
1470    }
1471
1472    fn recommend_flat_sample_size(target_time: f64, met: f64) -> u64 {
1473        let sample_size = (target_time / met) as u64;
1474
1475        // Round down to the nearest 10 to give a margin and avoid excessive precision
1476        let sample_size = (sample_size / 10) * 10;
1477
1478        // Clamp it to be at least 10, since criterion.rs doesn't allow sample sizes smaller than 10.
1479        if sample_size < 10 {
1480            10
1481        } else {
1482            sample_size
1483        }
1484    }
1485}
1486
1487#[derive(Debug, Serialize, Deserialize)]
1488pub(crate) struct SavedSample {
1489    sampling_mode: ActualSamplingMode,
1490    iters: Vec<f64>,
1491    times: Vec<f64>,
1492}
1493
1494/// Custom-test-framework runner. Should not be called directly.
1495#[doc(hidden)]
1496pub fn runner(benches: &[&dyn Fn()]) {
1497    for bench in benches {
1498        bench();
1499    }
1500    Criterion::default().configure_from_args().final_summary();
1501}