|
| 1 | +// Licensed to the Apache Software Foundation (ASF) under one |
| 2 | +// or more contributor license agreements. See the NOTICE file |
| 3 | +// distributed with this work for additional information |
| 4 | +// regarding copyright ownership. The ASF licenses this file |
| 5 | +// to you under the Apache License, Version 2.0 (the |
| 6 | +// "License"); you may not use this file except in compliance |
| 7 | +// with the License. You may obtain a copy of the License at |
| 8 | +// |
| 9 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | +// |
| 11 | +// Unless required by applicable law or agreed to in writing, |
| 12 | +// software distributed under the License is distributed on an |
| 13 | +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
| 14 | +// KIND, either express or implied. See the License for the |
| 15 | +// specific language governing permissions and limitations |
| 16 | +// under the License. |
| 17 | + |
| 18 | +//! Benchmarks for NormalizeNaNAndZero expression |
| 19 | +
|
| 20 | +use arrow::array::Float64Array; |
| 21 | +use arrow::datatypes::{DataType, Field, Schema}; |
| 22 | +use arrow::record_batch::RecordBatch; |
| 23 | +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion}; |
| 24 | +use datafusion::physical_expr::expressions::Column; |
| 25 | +use datafusion::physical_expr::PhysicalExpr; |
| 26 | +use datafusion_comet_spark_expr::NormalizeNaNAndZero; |
| 27 | +use std::hint::black_box; |
| 28 | +use std::sync::Arc; |
| 29 | + |
| 30 | +const BATCH_SIZE: usize = 8192; |
| 31 | + |
| 32 | +fn make_col(name: &str, index: usize) -> Arc<dyn PhysicalExpr> { |
| 33 | + Arc::new(Column::new(name, index)) |
| 34 | +} |
| 35 | + |
| 36 | +/// Create a batch with float64 column containing various values including NaN and -0.0 |
| 37 | +fn create_float_batch(nan_pct: usize, neg_zero_pct: usize, null_pct: usize) -> RecordBatch { |
| 38 | + let mut values: Vec<Option<f64>> = Vec::with_capacity(BATCH_SIZE); |
| 39 | + |
| 40 | + for i in 0..BATCH_SIZE { |
| 41 | + if null_pct > 0 && i % (100 / null_pct.max(1)) == 0 { |
| 42 | + values.push(None); |
| 43 | + } else if nan_pct > 0 && i % (100 / nan_pct.max(1)) == 1 { |
| 44 | + values.push(Some(f64::NAN)); |
| 45 | + } else if neg_zero_pct > 0 && i % (100 / neg_zero_pct.max(1)) == 2 { |
| 46 | + values.push(Some(-0.0)); |
| 47 | + } else { |
| 48 | + values.push(Some(i as f64 * 1.5)); |
| 49 | + } |
| 50 | + } |
| 51 | + |
| 52 | + let array = Float64Array::from(values); |
| 53 | + let schema = Schema::new(vec![Field::new("c1", DataType::Float64, true)]); |
| 54 | + |
| 55 | + RecordBatch::try_new(Arc::new(schema), vec![Arc::new(array)]).unwrap() |
| 56 | +} |
| 57 | + |
| 58 | +fn bench_normalize_nan_and_zero(c: &mut Criterion) { |
| 59 | + let mut group = c.benchmark_group("normalize_nan_and_zero"); |
| 60 | + |
| 61 | + // Test with different percentages of special values |
| 62 | + let test_cases = [ |
| 63 | + ("no_special", 0, 0, 0), |
| 64 | + ("10pct_nan", 10, 0, 0), |
| 65 | + ("10pct_neg_zero", 0, 10, 0), |
| 66 | + ("10pct_null", 0, 0, 10), |
| 67 | + ("mixed_10pct", 5, 5, 5), |
| 68 | + ("all_normal", 0, 0, 0), |
| 69 | + ]; |
| 70 | + |
| 71 | + for (name, nan_pct, neg_zero_pct, null_pct) in test_cases { |
| 72 | + let batch = create_float_batch(nan_pct, neg_zero_pct, null_pct); |
| 73 | + |
| 74 | + let normalize_expr = Arc::new(NormalizeNaNAndZero::new( |
| 75 | + DataType::Float64, |
| 76 | + make_col("c1", 0), |
| 77 | + )); |
| 78 | + |
| 79 | + group.bench_with_input(BenchmarkId::new("float64", name), &batch, |b, batch| { |
| 80 | + b.iter(|| black_box(normalize_expr.evaluate(black_box(batch)).unwrap())); |
| 81 | + }); |
| 82 | + } |
| 83 | + |
| 84 | + group.finish(); |
| 85 | +} |
| 86 | + |
| 87 | +criterion_group!(benches, bench_normalize_nan_and_zero); |
| 88 | +criterion_main!(benches); |
0 commit comments