use std::hint::black_box;
use std::time::Duration;
use std::time::Instant;
use crate::measurement::{Measurement, WallTime};
use crate::BatchSize;
#[cfg(feature = "async")]
use std::future::Future;
#[cfg(feature = "async")]
use crate::async_executor::AsyncExecutor;
pub struct Bencher<'a, M: Measurement = WallTime> {
pub(crate) iterated: bool, pub(crate) iters: u64, pub(crate) value: M::Value, pub(crate) measurement: &'a M, pub(crate) elapsed_time: Duration, }
impl<'a, M: Measurement> Bencher<'a, M> {
#[inline(never)]
pub fn iter<O, R>(&mut self, mut routine: R)
where
R: FnMut() -> O,
{
self.iterated = true;
let time_start = Instant::now();
let start = self.measurement.start();
for _ in 0..self.iters {
black_box(routine());
}
self.value = self.measurement.end(start);
self.elapsed_time = time_start.elapsed();
}
#[inline(never)]
pub fn iter_custom<R>(&mut self, mut routine: R)
where
R: FnMut(u64) -> M::Value,
{
self.iterated = true;
let time_start = Instant::now();
self.value = routine(self.iters);
self.elapsed_time = time_start.elapsed();
}
#[doc(hidden)]
pub fn iter_with_setup<I, O, S, R>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
self.iter_batched(setup, routine, BatchSize::PerIteration);
}
pub fn iter_with_large_drop<O, R>(&mut self, mut routine: R)
where
R: FnMut() -> O,
{
self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput);
}
#[inline(never)]
pub fn iter_batched<I, O, S, R>(&mut self, mut setup: S, mut routine: R, size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(I) -> O,
{
self.iterated = true;
let batch_size = size.iters_per_batch(self.iters);
assert!(batch_size != 0, "Batch size must not be zero.");
let time_start = Instant::now();
self.value = self.measurement.zero();
if batch_size == 1 {
for _ in 0..self.iters {
let input = black_box(setup());
let start = self.measurement.start();
let output = routine(input);
let end = self.measurement.end(start);
self.value = self.measurement.add(&self.value, &end);
drop(black_box(output));
}
} else {
let mut iteration_counter = 0;
while iteration_counter < self.iters {
let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter);
let inputs = black_box((0..batch_size).map(|_| setup()).collect::<Vec<_>>());
let mut outputs = Vec::with_capacity(batch_size as usize);
let start = self.measurement.start();
outputs.extend(inputs.into_iter().map(&mut routine));
let end = self.measurement.end(start);
self.value = self.measurement.add(&self.value, &end);
black_box(outputs);
iteration_counter += batch_size;
}
}
self.elapsed_time = time_start.elapsed();
}
#[inline(never)]
pub fn iter_batched_ref<I, O, S, R>(&mut self, mut setup: S, mut routine: R, size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(&mut I) -> O,
{
self.iterated = true;
let batch_size = size.iters_per_batch(self.iters);
assert!(batch_size != 0, "Batch size must not be zero.");
let time_start = Instant::now();
self.value = self.measurement.zero();
if batch_size == 1 {
for _ in 0..self.iters {
let mut input = black_box(setup());
let start = self.measurement.start();
let output = routine(&mut input);
let end = self.measurement.end(start);
self.value = self.measurement.add(&self.value, &end);
drop(black_box(output));
drop(black_box(input));
}
} else {
let mut iteration_counter = 0;
while iteration_counter < self.iters {
let batch_size = ::std::cmp::min(batch_size, self.iters - iteration_counter);
let mut inputs = black_box((0..batch_size).map(|_| setup()).collect::<Vec<_>>());
let mut outputs = Vec::with_capacity(batch_size as usize);
let start = self.measurement.start();
outputs.extend(inputs.iter_mut().map(&mut routine));
let end = self.measurement.end(start);
self.value = self.measurement.add(&self.value, &end);
black_box(outputs);
iteration_counter += batch_size;
}
}
self.elapsed_time = time_start.elapsed();
}
pub(crate) fn assert_iterated(&mut self) {
assert!(
self.iterated,
"Benchmark function must call Bencher::iter or related method."
);
self.iterated = false;
}
#[cfg(feature = "async")]
pub fn to_async<'b, A: AsyncExecutor>(&'b mut self, runner: A) -> AsyncBencher<'a, 'b, A, M> {
AsyncBencher { b: self, runner }
}
}
#[cfg(feature = "async")]
pub struct AsyncBencher<'a, 'b, A: AsyncExecutor, M: Measurement = WallTime> {
b: &'b mut Bencher<'a, M>,
runner: A,
}
#[cfg(feature = "async")]
impl<'a, 'b, A: AsyncExecutor, M: Measurement> AsyncBencher<'a, 'b, A, M> {
#[inline(never)]
pub fn iter<O, R, F>(&mut self, mut routine: R)
where
R: FnMut() -> F,
F: Future<Output = O>,
{
let AsyncBencher { b, runner } = self;
runner.block_on(async {
b.iterated = true;
let time_start = Instant::now();
let start = b.measurement.start();
for _ in 0..b.iters {
black_box(routine().await);
}
b.value = b.measurement.end(start);
b.elapsed_time = time_start.elapsed();
});
}
#[inline(never)]
pub fn iter_custom<R, F>(&mut self, mut routine: R)
where
R: FnMut(u64) -> F,
F: Future<Output = M::Value>,
{
let AsyncBencher { b, runner } = self;
runner.block_on(async {
b.iterated = true;
let time_start = Instant::now();
b.value = routine(b.iters).await;
b.elapsed_time = time_start.elapsed();
})
}
#[doc(hidden)]
pub fn iter_with_setup<I, O, S, R, F>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> F,
F: Future<Output = O>,
{
self.iter_batched(setup, routine, BatchSize::PerIteration);
}
pub fn iter_with_large_drop<O, R, F>(&mut self, mut routine: R)
where
R: FnMut() -> F,
F: Future<Output = O>,
{
self.iter_batched(|| (), |_| routine(), BatchSize::SmallInput);
}
#[doc(hidden)]
pub fn iter_with_large_setup<I, O, S, R, F>(&mut self, setup: S, routine: R)
where
S: FnMut() -> I,
R: FnMut(I) -> F,
F: Future<Output = O>,
{
self.iter_batched(setup, routine, BatchSize::NumBatches(1));
}
#[inline(never)]
pub fn iter_batched<I, O, S, R, F>(&mut self, mut setup: S, mut routine: R, size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(I) -> F,
F: Future<Output = O>,
{
let AsyncBencher { b, runner } = self;
runner.block_on(async {
b.iterated = true;
let batch_size = size.iters_per_batch(b.iters);
assert!(batch_size != 0, "Batch size must not be zero.");
let time_start = Instant::now();
b.value = b.measurement.zero();
if batch_size == 1 {
for _ in 0..b.iters {
let input = black_box(setup());
let start = b.measurement.start();
let output = routine(input).await;
let end = b.measurement.end(start);
b.value = b.measurement.add(&b.value, &end);
drop(black_box(output));
}
} else {
let mut iteration_counter = 0;
while iteration_counter < b.iters {
let batch_size = ::std::cmp::min(batch_size, b.iters - iteration_counter);
let inputs = black_box((0..batch_size).map(|_| setup()).collect::<Vec<_>>());
let mut outputs = Vec::with_capacity(batch_size as usize);
let start = b.measurement.start();
for input in inputs {
outputs.push(routine(input).await);
}
let end = b.measurement.end(start);
b.value = b.measurement.add(&b.value, &end);
black_box(outputs);
iteration_counter += batch_size;
}
}
b.elapsed_time = time_start.elapsed();
})
}
#[inline(never)]
pub fn iter_batched_ref<I, O, S, R, F>(&mut self, mut setup: S, mut routine: R, size: BatchSize)
where
S: FnMut() -> I,
R: FnMut(&mut I) -> F,
F: Future<Output = O>,
{
let AsyncBencher { b, runner } = self;
runner.block_on(async {
b.iterated = true;
let batch_size = size.iters_per_batch(b.iters);
assert!(batch_size != 0, "Batch size must not be zero.");
let time_start = Instant::now();
b.value = b.measurement.zero();
if batch_size == 1 {
for _ in 0..b.iters {
let mut input = black_box(setup());
let start = b.measurement.start();
let output = routine(&mut input).await;
let end = b.measurement.end(start);
b.value = b.measurement.add(&b.value, &end);
drop(black_box(output));
drop(black_box(input));
}
} else {
let mut iteration_counter = 0;
while iteration_counter < b.iters {
let batch_size = ::std::cmp::min(batch_size, b.iters - iteration_counter);
let inputs = black_box((0..batch_size).map(|_| setup()).collect::<Vec<_>>());
let mut outputs = Vec::with_capacity(batch_size as usize);
let start = b.measurement.start();
for mut input in inputs {
outputs.push(routine(&mut input).await);
}
let end = b.measurement.end(start);
b.value = b.measurement.add(&b.value, &end);
black_box(outputs);
iteration_counter += batch_size;
}
}
b.elapsed_time = time_start.elapsed();
});
}
}