Deprecated: The each() function is deprecated. This message will be suppressed on further calls in /home/zhenxiangba/zhenxiangba.com/public_html/phproxy-improved-master/index.php on line 456
criterion 0.1.2 - Docs.rs
[go: Go Back, main page]

criterion 0.1.2

Statistics-driven micro-benchmarking library
Documentation
use stats::bivariate::Data;
use stats::bivariate::regression::Slope;
use stats::univariate::outliers::tukey::LabeledSample;

use format;
use stats::univariate::Sample;
use estimate::{Distributions, Estimates, Statistic};
use Estimate;
use std::io::stdout;
use std::io::Write;
use std::cell::Cell;

pub(crate) struct ComparisonData {
    pub p_value: f64,
    pub t_value: f64,
    pub relative_estimates: Estimates,
    pub significance_threshold: f64,
    pub noise_threshold: f64,
}

pub(crate) struct MeasurementData<'a> {
    pub iter_counts: &'a Sample<f64>,
    pub sample_times: &'a Sample<f64>,
    pub avg_times: LabeledSample<'a, f64>,
    pub absolute_estimates: Estimates,
    pub distributions: Distributions,
    pub comparison: Option<ComparisonData>,
}

pub(crate) trait Report {
    fn benchmark_start(&self, id: &str);
    fn warmup(&self, id: &str, warmup_ns: f64);
    fn analysis(&self, id: &str);
    fn measurement_start(&self, id: &str, sample_count: u64, estimate_ns: f64, iter_count: u64);
    fn measurement_complete(&self, id: &str, measurements: &MeasurementData);
}

pub(crate) struct CliReport {
    pub enable_text_overwrite: bool,
    pub enable_text_coloring: bool,
    pub verbose: bool,

    last_line_len: Cell<usize>,
}
impl CliReport {
    pub fn new(enable_text_overwrite: bool, enable_text_coloring: bool, verbose: bool) -> CliReport {
        CliReport {
            enable_text_overwrite: enable_text_overwrite,
            enable_text_coloring: enable_text_coloring,
            verbose: verbose,

            last_line_len: Cell::new(0),            
        }
    }

    fn text_overwrite(&self) {
        if self.enable_text_overwrite {
            print!("\r");
            for _ in 0..self.last_line_len.get() {
                print!(" ");
            }
            print!("\r");
        }
    }

    fn print_overwritable(&self, s: String) {
        if self.enable_text_overwrite {
            self.last_line_len.set(s.len());
            print!("{}", s);
            stdout().flush().unwrap();
        }
        else {
            println!("{}", s);
        }
    }

    fn green(&self, s: String) -> String {
        if self.enable_text_coloring {
            format!("\x1B[32m{}\x1B[39m", s)
        }
        else {
            s
        }
    }

    fn yellow(&self, s: String) -> String {
        if self.enable_text_coloring {
            format!("\x1B[33m{}\x1B[39m", s)
        }
        else {
            s
        }
    }

    fn red(&self, s: String) -> String {
        if self.enable_text_coloring {
            format!("\x1B[31m{}\x1B[39m", s)
        }
        else {
            s
        }
    }

    fn bold(&self, s: String) -> String {
        if self.enable_text_coloring {
            format!("\x1B[1m{}\x1B[22m", s)
        }
        else {
            s
        }
    }

    fn faint(&self, s: String) -> String {
        if self.enable_text_coloring {
            format!("\x1B[2m{}\x1B[22m", s)
        }
        else {
            s
        }
    }
    
    pub fn outliers(&self, sample: &LabeledSample<f64>) {
        let (los, lom, _, him, his) = sample.count();
        let noutliers = los + lom + him + his;
        let sample_size = sample.as_slice().len();

        if noutliers == 0 {
            return;
        }

        let percent = |n: usize| { 100. * n as f64 / sample_size as f64 };

        println!("{}", 
            self.yellow(
                format!("Found {} outliers among {} measurements ({:.2}%)",
                    noutliers,
                    sample_size,
                    percent(noutliers))));

        let print = |n, label| {
            if n != 0 {
                println!("  {} ({:.2}%) {}", n, percent(n), label);
            }
        };

        print(los, "low severe");
        print(lom, "low mild");
        print(him, "high mild");
        print(his, "high severe");
    }
}
impl Report for CliReport {
    fn benchmark_start(&self, id: &str) {
        self.print_overwritable(format!("Benchmarking {}", id));
    }

    fn warmup(&self, id: &str, warmup_ns: f64) {
        self.text_overwrite();
        self.print_overwritable(
            format!("Benchmarking {}: Warming up for {}", id, format::time(warmup_ns)));
    }

    fn analysis(&self, id: &str) {
        self.text_overwrite();
        self.print_overwritable(format!("Benchmarking {}: Analyzing", id));
    }

    fn measurement_start(&self, id: &str, sample_count: u64,
        estimate_ns: f64, iter_count: u64) {
        self.text_overwrite();
        self.print_overwritable(
            format!("Benchmarking {}: Collecting {} samples in estimated {} ({} iterations)",
                id, sample_count, format::time(estimate_ns), iter_count));
    }

    fn measurement_complete(&self, id: &str, meas: &MeasurementData) {
        self.text_overwrite();
        
        let slope_estimate = meas.absolute_estimates.get(&Statistic::Slope).unwrap();

        {
            let mut id = String::from(id);
            id.truncate(23);
            let id_len = id.len();
            println!("{}{}time:   [{} {} {}]", 
                self.green(id), 
                " ".repeat(24 - id_len),
                self.faint(format::time(slope_estimate.confidence_interval.lower_bound)),
                self.bold(format::time(slope_estimate.point_estimate)),
                self.faint(format::time(slope_estimate.confidence_interval.upper_bound))
            );
        }

        if let &Some(ref comp) = &meas.comparison {
            let different_mean = comp.p_value < comp.significance_threshold;
            let mean_est = comp.relative_estimates.get(&Statistic::Mean).unwrap();
            let point_estimate = mean_est.point_estimate;
            let mut point_estimate_str = format::change(point_estimate, true);
            let explanation_str: String;

            if !different_mean {
                explanation_str = "No change in performance detected.".to_owned();
            }
            else {
                let comparison = compare_to_threshold(&mean_est, comp.noise_threshold);
                match comparison {
                    ComparisonResult::Improved => {
                        point_estimate_str = self.green(self.bold(point_estimate_str));
                        explanation_str = format!("Performance has {}.",
                            self.green("improved".to_owned()));
                    }
                    ComparisonResult::Regressed => {
                        point_estimate_str = self.red(self.bold(point_estimate_str));
                        explanation_str = format!("Performance has {}.",
                            self.red("regressed".to_owned()));
                    }
                    ComparisonResult::NonSignificant => {
                        explanation_str = "Change within noise threshold.".to_owned();
                    }
                }
            }

            println!("{}change: [{} {} {}] (p = {:.2} {} {:.2})", 
                " ".repeat(24),
                self.faint(format::change(mean_est.confidence_interval.lower_bound, true)),
                point_estimate_str,
                self.faint(format::change(mean_est.confidence_interval.upper_bound, true)),
                comp.p_value,
                if different_mean {"<"} else {">"},
                comp.significance_threshold
            );
            println!("{}{}", " ".repeat(24), explanation_str);
        }

        self.outliers(&meas.avg_times);

        if self.verbose {
            let data = Data::new(meas.iter_counts.as_slice(), meas.sample_times.as_slice());
            let slope_estimate = meas.absolute_estimates.get(&Statistic::Slope).unwrap();

            fn format_short_estimate(estimate: &Estimate) -> String {
                format!("[{} {}]", 
                    format::time(estimate.confidence_interval.lower_bound),
                    format::time(estimate.confidence_interval.upper_bound))
            }

            println!("{:<7}{} {:<15}[{:0.7} {:0.7}]",
                "slope",
                format_short_estimate(slope_estimate),
                "R^2",
                Slope(slope_estimate.confidence_interval.lower_bound).r_squared(data),
                Slope(slope_estimate.confidence_interval.upper_bound).r_squared(data),
            );
            println!("{:<7}{} {:<15}{}",
                "mean",
                format_short_estimate(&meas.absolute_estimates.get(&Statistic::Mean).unwrap()),
                "std. dev.",
                format_short_estimate(&meas.absolute_estimates.get(&Statistic::StdDev).unwrap()),
            );
            println!("{:<7}{} {:<15}{}",
                "median",
                format_short_estimate(&meas.absolute_estimates.get(&Statistic::Median).unwrap()),
                "med. abs. dev.",
                format_short_estimate(&meas.absolute_estimates.get(&Statistic::MedianAbsDev).unwrap()),
            );
        }
    }
}

enum ComparisonResult {
    Improved,
    Regressed,
    NonSignificant,
}

fn compare_to_threshold(estimate: &Estimate, noise: f64) -> ComparisonResult {
    let ci = estimate.confidence_interval;
    let lb = ci.lower_bound;
    let ub = ci.upper_bound;

    if lb < -noise && ub < -noise {
        ComparisonResult::Improved
    } else if lb > noise && ub > noise {
        ComparisonResult::Regressed
    } else {
        ComparisonResult::NonSignificant
    }
}