Skip to content

Commit 4f1830d

Browse files
committed
chore(cubestore): Upgrade DF: Fix various problems with compaction.
1 parent af22ff9 commit 4f1830d

File tree

2 files changed

+4
-3
lines changed

2 files changed

+4
-3
lines changed

rust/cubestore/cubestore/src/store/compaction.rs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1395,12 +1395,13 @@ pub async fn merge_chunks(
13951395
.iter()
13961396
.map(|aggr_col| aggr_col.aggregate_expr(&res.schema()))
13971397
.collect::<Result<Vec<_>, _>>()?;
1398+
let aggregates_len = aggregates.len();
13981399

13991400
res = Arc::new(AggregateExec::try_new(
14001401
AggregateMode::Final,
1401-
PhysicalGroupBy::new(groups, Vec::new(), Vec::new()),
1402+
PhysicalGroupBy::new_single(groups),
14021403
aggregates,
1403-
Vec::new(),
1404+
vec![None; aggregates_len],
14041405
res.clone(),
14051406
schema,
14061407
)?);

rust/cubestore/cubestore/src/table/parquet.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ pub struct ParquetTableStore {
9090

9191
impl ParquetTableStore {
9292
pub fn read_columns(&self, path: &str) -> Result<Vec<RecordBatch>, CubeError> {
93-
let builder = ParquetRecordBatchReaderBuilder::try_new(File::create_new(path)?)?;
93+
let builder = ParquetRecordBatchReaderBuilder::try_new(File::open(path)?)?;
9494
let mut r = builder.with_batch_size(self.row_group_size).build()?;
9595
let mut batches = Vec::new();
9696
for b in r {

0 commit comments

Comments
 (0)