use std::any::Any;
use std::sync::Arc;
use crate::error::{DataFusionError, Result};
use crate::physical_plan::{Accumulator, AggregateExpr, PhysicalExpr};
use crate::scalar::ScalarValue;
use arrow::array::Float64Array;
use arrow::{
array::{ArrayRef, UInt64Array},
compute::cast,
datatypes::DataType,
datatypes::Field,
};
use super::{format_state_name, StatsType};
#[derive(Debug)]
pub struct Variance {
name: String,
expr: Arc<dyn PhysicalExpr>,
}
#[derive(Debug)]
pub struct VariancePop {
name: String,
expr: Arc<dyn PhysicalExpr>,
}
pub(crate) fn variance_return_type(arg_type: &DataType) -> Result<DataType> {
match arg_type {
DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::Int64
| DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64
| DataType::Float32
| DataType::Float64 => Ok(DataType::Float64),
other => Err(DataFusionError::Plan(format!(
"VAR does not support {:?}",
other
))),
}
}
pub(crate) fn is_variance_support_arg_type(arg_type: &DataType) -> bool {
matches!(
arg_type,
DataType::UInt8
| DataType::UInt16
| DataType::UInt32
| DataType::UInt64
| DataType::Int8
| DataType::Int16
| DataType::Int32
| DataType::Int64
| DataType::Float32
| DataType::Float64
)
}
impl Variance {
pub fn new(
expr: Arc<dyn PhysicalExpr>,
name: impl Into<String>,
data_type: DataType,
) -> Self {
assert!(matches!(data_type, DataType::Float64));
Self {
name: name.into(),
expr,
}
}
}
impl AggregateExpr for Variance {
fn as_any(&self) -> &dyn Any {
self
}
fn field(&self) -> Result<Field> {
Ok(Field::new(&self.name, DataType::Float64, true))
}
fn create_accumulator(&self) -> Result<Box<dyn Accumulator>> {
Ok(Box::new(VarianceAccumulator::try_new(StatsType::Sample)?))
}
fn state_fields(&self) -> Result<Vec<Field>> {
Ok(vec![
Field::new(
&format_state_name(&self.name, "count"),
DataType::UInt64,
true,
),
Field::new(
&format_state_name(&self.name, "mean"),
DataType::Float64,
true,
),
Field::new(
&format_state_name(&self.name, "m2"),
DataType::Float64,
true,
),
])
}
fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> {
vec![self.expr.clone()]
}
fn name(&self) -> &str {
&self.name
}
}
impl VariancePop {
pub fn new(
expr: Arc<dyn PhysicalExpr>,
name: impl Into<String>,
data_type: DataType,
) -> Self {
assert!(matches!(data_type, DataType::Float64));
Self {
name: name.into(),
expr,
}
}
}
impl AggregateExpr for VariancePop {
fn as_any(&self) -> &dyn Any {
self
}
fn field(&self) -> Result<Field> {
Ok(Field::new(&self.name, DataType::Float64, true))
}
fn create_accumulator(&self) -> Result<Box<dyn Accumulator>> {
Ok(Box::new(VarianceAccumulator::try_new(
StatsType::Population,
)?))
}
fn state_fields(&self) -> Result<Vec<Field>> {
Ok(vec![
Field::new(
&format_state_name(&self.name, "count"),
DataType::UInt64,
true,
),
Field::new(
&format_state_name(&self.name, "mean"),
DataType::Float64,
true,
),
Field::new(
&format_state_name(&self.name, "m2"),
DataType::Float64,
true,
),
])
}
fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> {
vec![self.expr.clone()]
}
fn name(&self) -> &str {
&self.name
}
}
#[derive(Debug)]
pub struct VarianceAccumulator {
m2: f64,
mean: f64,
count: u64,
stats_type: StatsType,
}
impl VarianceAccumulator {
pub fn try_new(s_type: StatsType) -> Result<Self> {
Ok(Self {
m2: 0_f64,
mean: 0_f64,
count: 0_u64,
stats_type: s_type,
})
}
pub fn get_count(&self) -> u64 {
self.count
}
pub fn get_mean(&self) -> f64 {
self.mean
}
pub fn get_m2(&self) -> f64 {
self.m2
}
}
impl Accumulator for VarianceAccumulator {
fn state(&self) -> Result<Vec<ScalarValue>> {
Ok(vec![
ScalarValue::from(self.count),
ScalarValue::from(self.mean),
ScalarValue::from(self.m2),
])
}
fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> {
let values = &cast(&values[0], &DataType::Float64)?;
let arr = values
.as_any()
.downcast_ref::<Float64Array>()
.unwrap()
.iter()
.flatten();
for value in arr {
let new_count = self.count + 1;
let delta1 = value - self.mean;
let new_mean = delta1 / new_count as f64 + self.mean;
let delta2 = value - new_mean;
let new_m2 = self.m2 + delta1 * delta2;
self.count += 1;
self.mean = new_mean;
self.m2 = new_m2;
}
Ok(())
}
fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> {
let counts = states[0].as_any().downcast_ref::<UInt64Array>().unwrap();
let means = states[1].as_any().downcast_ref::<Float64Array>().unwrap();
let m2s = states[2].as_any().downcast_ref::<Float64Array>().unwrap();
for i in 0..counts.len() {
let c = counts.value(i);
if c == 0_u64 {
continue;
}
let new_count = self.count + c;
let new_mean = self.mean * self.count as f64 / new_count as f64
+ means.value(i) * c as f64 / new_count as f64;
let delta = self.mean - means.value(i);
let new_m2 = self.m2
+ m2s.value(i)
+ delta * delta * self.count as f64 * c as f64 / new_count as f64;
self.count = new_count;
self.mean = new_mean;
self.m2 = new_m2;
}
Ok(())
}
fn evaluate(&self) -> Result<ScalarValue> {
let count = match self.stats_type {
StatsType::Population => self.count,
StatsType::Sample => {
if self.count > 0 {
self.count - 1
} else {
self.count
}
}
};
if count <= 1 {
return Err(DataFusionError::Internal(
"At least two values are needed to calculate variance".to_string(),
));
}
if self.count == 0 {
Ok(ScalarValue::Float64(None))
} else {
Ok(ScalarValue::Float64(Some(self.m2 / count as f64)))
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::from_slice::FromSlice;
use crate::physical_plan::expressions::col;
use crate::{error::Result, generic_test_op};
use arrow::record_batch::RecordBatch;
use arrow::{array::*, datatypes::*};
#[test]
fn variance_f64_1() -> Result<()> {
let a: ArrayRef = Arc::new(Float64Array::from_slice(&[1_f64, 2_f64]));
generic_test_op!(
a,
DataType::Float64,
VariancePop,
ScalarValue::from(0.25_f64),
DataType::Float64
)
}
#[test]
fn variance_f64_2() -> Result<()> {
let a: ArrayRef = Arc::new(Float64Array::from_slice(&[
1_f64, 2_f64, 3_f64, 4_f64, 5_f64,
]));
generic_test_op!(
a,
DataType::Float64,
VariancePop,
ScalarValue::from(2_f64),
DataType::Float64
)
}
#[test]
fn variance_f64_3() -> Result<()> {
let a: ArrayRef = Arc::new(Float64Array::from_slice(&[
1_f64, 2_f64, 3_f64, 4_f64, 5_f64,
]));
generic_test_op!(
a,
DataType::Float64,
Variance,
ScalarValue::from(2.5_f64),
DataType::Float64
)
}
#[test]
fn variance_f64_4() -> Result<()> {
let a: ArrayRef = Arc::new(Float64Array::from_slice(&[1.1_f64, 2_f64, 3_f64]));
generic_test_op!(
a,
DataType::Float64,
Variance,
ScalarValue::from(0.9033333333333333_f64),
DataType::Float64
)
}
#[test]
fn variance_i32() -> Result<()> {
let a: ArrayRef = Arc::new(Int32Array::from_slice(&[1, 2, 3, 4, 5]));
generic_test_op!(
a,
DataType::Int32,
VariancePop,
ScalarValue::from(2_f64),
DataType::Float64
)
}
#[test]
fn variance_u32() -> Result<()> {
let a: ArrayRef = Arc::new(UInt32Array::from_slice(&[
1_u32, 2_u32, 3_u32, 4_u32, 5_u32,
]));
generic_test_op!(
a,
DataType::UInt32,
VariancePop,
ScalarValue::from(2.0f64),
DataType::Float64
)
}
#[test]
fn variance_f32() -> Result<()> {
let a: ArrayRef = Arc::new(Float32Array::from_slice(&[
1_f32, 2_f32, 3_f32, 4_f32, 5_f32,
]));
generic_test_op!(
a,
DataType::Float32,
VariancePop,
ScalarValue::from(2_f64),
DataType::Float64
)
}
#[test]
fn test_variance_return_data_type() -> Result<()> {
let data_type = DataType::Float64;
let result_type = variance_return_type(&data_type)?;
assert_eq!(DataType::Float64, result_type);
let data_type = DataType::Decimal(36, 10);
assert!(variance_return_type(&data_type).is_err());
Ok(())
}
#[test]
fn test_variance_1_input() -> Result<()> {
let a: ArrayRef = Arc::new(Float64Array::from_slice(&[1_f64]));
let schema = Schema::new(vec![Field::new("a", DataType::Float64, false)]);
let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![a])?;
let agg = Arc::new(Variance::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let actual = aggregate(&batch, agg);
assert!(actual.is_err());
Ok(())
}
#[test]
fn variance_i32_with_nulls() -> Result<()> {
let a: ArrayRef = Arc::new(Int32Array::from(vec![
Some(1),
None,
Some(3),
Some(4),
Some(5),
]));
generic_test_op!(
a,
DataType::Int32,
VariancePop,
ScalarValue::from(2.1875f64),
DataType::Float64
)
}
#[test]
fn variance_i32_all_nulls() -> Result<()> {
let a: ArrayRef = Arc::new(Int32Array::from(vec![None, None]));
let schema = Schema::new(vec![Field::new("a", DataType::Int32, false)]);
let batch = RecordBatch::try_new(Arc::new(schema.clone()), vec![a])?;
let agg = Arc::new(Variance::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let actual = aggregate(&batch, agg);
assert!(actual.is_err());
Ok(())
}
#[test]
fn variance_f64_merge_1() -> Result<()> {
let a = Arc::new(Float64Array::from_slice(&[1_f64, 2_f64, 3_f64]));
let b = Arc::new(Float64Array::from_slice(&[4_f64, 5_f64]));
let schema = Schema::new(vec![Field::new("a", DataType::Float64, false)]);
let batch1 = RecordBatch::try_new(Arc::new(schema.clone()), vec![a])?;
let batch2 = RecordBatch::try_new(Arc::new(schema.clone()), vec![b])?;
let agg1 = Arc::new(VariancePop::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let agg2 = Arc::new(VariancePop::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let actual = merge(&batch1, &batch2, agg1, agg2)?;
assert!(actual == ScalarValue::from(2_f64));
Ok(())
}
#[test]
fn variance_f64_merge_2() -> Result<()> {
let a = Arc::new(Float64Array::from_slice(&[
1_f64, 2_f64, 3_f64, 4_f64, 5_f64,
]));
let b = Arc::new(Float64Array::from(vec![None]));
let schema = Schema::new(vec![Field::new("a", DataType::Float64, false)]);
let batch1 = RecordBatch::try_new(Arc::new(schema.clone()), vec![a])?;
let batch2 = RecordBatch::try_new(Arc::new(schema.clone()), vec![b])?;
let agg1 = Arc::new(VariancePop::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let agg2 = Arc::new(VariancePop::new(
col("a", &schema)?,
"bla".to_string(),
DataType::Float64,
));
let actual = merge(&batch1, &batch2, agg1, agg2)?;
assert!(actual == ScalarValue::from(2_f64));
Ok(())
}
fn aggregate(
batch: &RecordBatch,
agg: Arc<dyn AggregateExpr>,
) -> Result<ScalarValue> {
let mut accum = agg.create_accumulator()?;
let expr = agg.expressions();
let values = expr
.iter()
.map(|e| e.evaluate(batch))
.map(|r| r.map(|v| v.into_array(batch.num_rows())))
.collect::<Result<Vec<_>>>()?;
accum.update_batch(&values)?;
accum.evaluate()
}
fn merge(
batch1: &RecordBatch,
batch2: &RecordBatch,
agg1: Arc<dyn AggregateExpr>,
agg2: Arc<dyn AggregateExpr>,
) -> Result<ScalarValue> {
let mut accum1 = agg1.create_accumulator()?;
let mut accum2 = agg2.create_accumulator()?;
let expr1 = agg1.expressions();
let expr2 = agg2.expressions();
let values1 = expr1
.iter()
.map(|e| e.evaluate(batch1))
.map(|r| r.map(|v| v.into_array(batch1.num_rows())))
.collect::<Result<Vec<_>>>()?;
let values2 = expr2
.iter()
.map(|e| e.evaluate(batch2))
.map(|r| r.map(|v| v.into_array(batch2.num_rows())))
.collect::<Result<Vec<_>>>()?;
accum1.update_batch(&values1)?;
accum2.update_batch(&values2)?;
let state2 = accum2
.state()?
.iter()
.map(|v| vec![v.clone()])
.map(|x| ScalarValue::iter_to_array(x).unwrap())
.collect::<Vec<_>>();
accum1.merge_batch(&state2)?;
accum1.evaluate()
}
}