Skip to content

Commit 237a915

Browse files
committed
Separate chunker from batcher
The chunker was part of the batcher and responsible for transforming input data into the batcher's chain format. Hence, the batcher needed to be aware of its input types, although it would not otherwise use this information. With this change, the chunker is separate of the batcher. This simplifies the logic within the chunker slightly, but most importantly moves the responsibility to form chunks to whoever holds the batcher. In Differential, this is `arrange_core`. It now learns about an input container type and a chunker and uses the chunker to convert input data to chunks of sorted and consolidated data. Signed-off-by: Moritz Hoffmann <[email protected]>
1 parent 4147948 commit 237a915

File tree

13 files changed

+126
-125
lines changed

13 files changed

+126
-125
lines changed

differential-dataflow/examples/columnar.rs

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,8 +44,9 @@ fn main() {
4444
let data_pact = ExchangeCore::<ColumnBuilder<((String,()),u64,i64)>,_>::new_core(|x: &((&str,()),&u64,&i64)| (x.0).0.as_bytes().iter().map(|x| *x as u64).sum::<u64>() as u64);
4545
let keys_pact = ExchangeCore::<ColumnBuilder<((String,()),u64,i64)>,_>::new_core(|x: &((&str,()),&u64,&i64)| (x.0).0.as_bytes().iter().map(|x| *x as u64).sum::<u64>() as u64);
4646

47-
let data = arrange_core::<_,_,Col2KeyBatcher<_,_,_>, ColKeyBuilder<_,_,_>, ColKeySpine<_,_,_>>(&data, data_pact, "Data");
48-
let keys = arrange_core::<_,_,Col2KeyBatcher<_,_,_>, ColKeyBuilder<_,_,_>, ColKeySpine<_,_,_>>(&keys, keys_pact, "Keys");
47+
use crate::batcher::Col2ValChunker;
48+
let data = arrange_core::<_,_,_,Col2ValChunker<WordCount>,Col2KeyBatcher<_,_,_>, ColKeyBuilder<_,_,_>, ColKeySpine<_,_,_>>(&data, data_pact, "Data");
49+
let keys = arrange_core::<_,_,_,Col2ValChunker<WordCount>,Col2KeyBatcher<_,_,_>, ColKeyBuilder<_,_,_>, ColKeySpine<_,_,_>>(&keys, keys_pact, "Keys");
4950

5051
keys.join_core(&data, |_k, &(), &()| Option::<()>::None)
5152
.probe_with(&mut probe);
@@ -373,7 +374,8 @@ pub mod batcher {
373374
use differential_dataflow::trace::implementations::merge_batcher::MergeBatcher;
374375

375376
/// A batcher for columnar storage.
376-
pub type Col2ValBatcher<K, V, T, R> = MergeBatcher<Column<((K,V),T,R)>, Chunker<Column<((K,V),T,R)>>, merger::ColumnMerger<(K,V),T,R>>;
377+
pub type Col2ValChunker<T> = Chunker<Column<T>>;
378+
pub type Col2ValBatcher<K, V, T, R> = MergeBatcher<merger::ColumnMerger<(K,V),T,R>>;
377379
pub type Col2KeyBatcher<K, T, R> = Col2ValBatcher<K, (), T, R>;
378380

379381
// First draft: build a "chunker" and a "merger".
@@ -408,11 +410,11 @@ pub mod batcher {
408410

409411
impl<'a, D, T, R, C2> PushInto<&'a mut Column<(D, T, R)>> for Chunker<C2>
410412
where
411-
D: for<'b> Columnar,
413+
D: Columnar,
412414
for<'b> columnar::Ref<'b, D>: Ord,
413-
T: for<'b> Columnar,
415+
T: Columnar,
414416
for<'b> columnar::Ref<'b, T>: Ord,
415-
R: for<'b> Columnar + for<'b> Semigroup<columnar::Ref<'b, R>>,
417+
R: Columnar + for<'b> Semigroup<columnar::Ref<'b, R>>,
416418
for<'b> columnar::Ref<'b, R>: Ord,
417419
C2: Container + for<'b, 'c> PushInto<(columnar::Ref<'b, D>, columnar::Ref<'b, T>, &'c R)>,
418420
{

differential-dataflow/examples/spines.rs

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,23 +28,26 @@ fn main() {
2828

2929
match mode.as_str() {
3030
"new" => {
31+
use differential_dataflow::trace::implementations::ColumnationChunker;
3132
use differential_dataflow::trace::implementations::ord_neu::{ColKeyBatcher, ColKeyBuilder, ColKeySpine};
32-
let data = data.arrange::<ColKeyBatcher<_,_,_>, ColKeyBuilder<_,_,_>, ColKeySpine<_,_,_>>();
33-
let keys = keys.arrange::<ColKeyBatcher<_,_,_>, ColKeyBuilder<_,_,_>, ColKeySpine<_,_,_>>();
33+
let data = data.arrange::<ColumnationChunker<_>,ColKeyBatcher<_,_,_>, ColKeyBuilder<_,_,_>, ColKeySpine<_,_,_>>();
34+
let keys = keys.arrange::<ColumnationChunker<_>,ColKeyBatcher<_,_,_>, ColKeyBuilder<_,_,_>, ColKeySpine<_,_,_>>();
3435
keys.join_core(&data, |_k, &(), &()| Option::<()>::None)
3536
.probe_with(&mut probe);
3637
},
3738
"old" => {
39+
use differential_dataflow::trace::implementations::VecChunker;
3840
use differential_dataflow::trace::implementations::ord_neu::{OrdKeyBatcher, RcOrdKeyBuilder, OrdKeySpine};
39-
let data = data.arrange::<OrdKeyBatcher<_,_,_>, RcOrdKeyBuilder<_,_,_>, OrdKeySpine<_,_,_>>();
40-
let keys = keys.arrange::<OrdKeyBatcher<_,_,_>, RcOrdKeyBuilder<_,_,_>, OrdKeySpine<_,_,_>>();
41+
let data = data.arrange::<VecChunker<_>,OrdKeyBatcher<_,_,_>, RcOrdKeyBuilder<_,_,_>, OrdKeySpine<_,_,_>>();
42+
let keys = keys.arrange::<VecChunker<_>,OrdKeyBatcher<_,_,_>, RcOrdKeyBuilder<_,_,_>, OrdKeySpine<_,_,_>>();
4143
keys.join_core(&data, |_k, &(), &()| Option::<()>::None)
4244
.probe_with(&mut probe);
4345
},
4446
"rhh" => {
47+
use differential_dataflow::trace::implementations::VecChunker;
4548
use differential_dataflow::trace::implementations::rhh::{HashWrapper, VecBatcher, VecBuilder, VecSpine};
46-
let data = data.map(|x| HashWrapper { inner: x }).arrange::<VecBatcher<_,(),_,_>, VecBuilder<_,(),_,_>, VecSpine<_,(),_,_>>();
47-
let keys = keys.map(|x| HashWrapper { inner: x }).arrange::<VecBatcher<_,(),_,_>, VecBuilder<_,(),_,_>, VecSpine<_,(),_,_>>();
49+
let data = data.map(|x| HashWrapper { inner: x }).arrange::<VecChunker<_>,VecBatcher<_,(),_,_>, VecBuilder<_,(),_,_>, VecSpine<_,(),_,_>>();
50+
let keys = keys.map(|x| HashWrapper { inner: x }).arrange::<VecChunker<_>,VecBatcher<_,(),_,_>, VecBuilder<_,(),_,_>, VecSpine<_,(),_,_>>();
4851
keys.join_core(&data, |_k, &(), &()| Option::<()>::None)
4952
.probe_with(&mut probe);
5053
},

differential-dataflow/src/operators/arrange/arrangement.rs

Lines changed: 41 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ use crate::{Data, ExchangeData, Collection, AsCollection, Hashable};
3030
use crate::difference::Semigroup;
3131
use crate::lattice::Lattice;
3232
use crate::trace::{self, Trace, TraceReader, BatchReader, Batcher, Builder, Cursor};
33-
use crate::trace::implementations::{KeyBatcher, KeyBuilder, KeySpine, ValBatcher, ValBuilder, ValSpine};
33+
use crate::trace::implementations::{KeyBatcher, KeyBuilder, KeySpine, ValBatcher, ValBuilder, ValSpine, VecChunker};
3434

3535
use trace::wrappers::enter::{TraceEnter, BatchEnter,};
3636
use trace::wrappers::enter_at::TraceEnter as TraceEnterAt;
@@ -76,7 +76,7 @@ where
7676
use ::timely::dataflow::scopes::Child;
7777
use ::timely::progress::timestamp::Refines;
7878
use timely::Container;
79-
use timely::container::PushInto;
79+
use timely::container::{ContainerBuilder, PushInto};
8080

8181
impl<G, Tr> Arranged<G, Tr>
8282
where
@@ -348,20 +348,22 @@ where
348348
G: Scope<Timestamp: Lattice>,
349349
{
350350
/// Arranges updates into a shared trace.
351-
fn arrange<Ba, Bu, Tr>(&self) -> Arranged<G, TraceAgent<Tr>>
351+
fn arrange<Chu, Ba, Bu, Tr>(&self) -> Arranged<G, TraceAgent<Tr>>
352352
where
353-
Ba: Batcher<Input=C, Time=G::Timestamp> + 'static,
354-
Bu: Builder<Time=G::Timestamp, Input=Ba::Output, Output = Tr::Batch>,
353+
Chu: ContainerBuilder<Container=Ba::Container> + for<'a> PushInto<&'a mut C>,
354+
Ba: Batcher<Time=G::Timestamp> + 'static,
355+
Bu: Builder<Time=G::Timestamp, Input=Ba::Container, Output = Tr::Batch>,
355356
Tr: Trace<Time=G::Timestamp> + 'static,
356357
{
357-
self.arrange_named::<Ba, Bu, Tr>("Arrange")
358+
self.arrange_named::<Chu, Ba, Bu, Tr>("Arrange")
358359
}
359360

360361
/// Arranges updates into a shared trace, with a supplied name.
361-
fn arrange_named<Ba, Bu, Tr>(&self, name: &str) -> Arranged<G, TraceAgent<Tr>>
362+
fn arrange_named<Chu, Ba, Bu, Tr>(&self, name: &str) -> Arranged<G, TraceAgent<Tr>>
362363
where
363-
Ba: Batcher<Input=C, Time=G::Timestamp> + 'static,
364-
Bu: Builder<Time=G::Timestamp, Input=Ba::Output, Output = Tr::Batch>,
364+
Chu: ContainerBuilder<Container=Ba::Container> + for<'a> PushInto<&'a mut C>,
365+
Ba: Batcher<Time=G::Timestamp> + 'static,
366+
Bu: Builder<Time=G::Timestamp, Input=Ba::Container, Output = Tr::Batch>,
365367
Tr: Trace<Time=G::Timestamp> + 'static,
366368
;
367369
}
@@ -373,14 +375,15 @@ where
373375
V: ExchangeData,
374376
R: ExchangeData + Semigroup,
375377
{
376-
fn arrange_named<Ba, Bu, Tr>(&self, name: &str) -> Arranged<G, TraceAgent<Tr>>
378+
fn arrange_named<Chu, Ba, Bu, Tr>(&self, name: &str) -> Arranged<G, TraceAgent<Tr>>
377379
where
378-
Ba: Batcher<Input=Vec<((K, V), G::Timestamp, R)>, Time=G::Timestamp> + 'static,
379-
Bu: Builder<Time=G::Timestamp, Input=Ba::Output, Output = Tr::Batch>,
380+
Chu: ContainerBuilder<Container=Ba::Container> + for<'a> PushInto<&'a mut Vec<((K, V), G::Timestamp, R)>>,
381+
Ba: Batcher<Time=G::Timestamp> + 'static,
382+
Bu: Builder<Time=G::Timestamp, Input=Ba::Container, Output = Tr::Batch>,
380383
Tr: Trace<Time=G::Timestamp> + 'static,
381384
{
382385
let exchange = Exchange::new(move |update: &((K,V),G::Timestamp,R)| (update.0).0.hashed().into());
383-
arrange_core::<_, _, Ba, Bu, _>(&self.inner, exchange, name)
386+
arrange_core::<_, _, _, Chu, Ba, Bu, _>(&self.inner, exchange, name)
384387
}
385388
}
386389

@@ -389,12 +392,14 @@ where
389392
/// This operator arranges a stream of values into a shared trace, whose contents it maintains.
390393
/// It uses the supplied parallelization contract to distribute the data, which does not need to
391394
/// be consistently by key (though this is the most common).
392-
pub fn arrange_core<G, P, Ba, Bu, Tr>(stream: &StreamCore<G, Ba::Input>, pact: P, name: &str) -> Arranged<G, TraceAgent<Tr>>
395+
pub fn arrange_core<G, P, C, Chu, Ba, Bu, Tr>(stream: &StreamCore<G, C>, pact: P, name: &str) -> Arranged<G, TraceAgent<Tr>>
393396
where
394397
G: Scope<Timestamp: Lattice>,
395-
P: ParallelizationContract<G::Timestamp, Ba::Input>,
396-
Ba: Batcher<Time=G::Timestamp,Input: Container + Clone + 'static> + 'static,
397-
Bu: Builder<Time=G::Timestamp, Input=Ba::Output, Output = Tr::Batch>,
398+
P: ParallelizationContract<G::Timestamp, C>,
399+
C: Container + Clone + 'static,
400+
Chu: ContainerBuilder<Container=Ba::Container> + for<'a> PushInto<&'a mut C>,
401+
Ba: Batcher<Time=G::Timestamp> + 'static,
402+
Bu: Builder<Time=G::Timestamp, Input=Ba::Container, Output = Tr::Batch>,
398403
Tr: Trace<Time=G::Timestamp>+'static,
399404
{
400405
// The `Arrange` operator is tasked with reacting to an advancing input
@@ -443,6 +448,8 @@ where
443448
// Initialize to the minimal input frontier.
444449
let mut prev_frontier = Antichain::from_elem(<G::Timestamp as Timestamp>::minimum());
445450

451+
let mut chunker = Chu::default();
452+
446453
move |input, output| {
447454

448455
// As we receive data, we need to (i) stash the data and (ii) keep *enough* capabilities.
@@ -451,7 +458,11 @@ where
451458

452459
input.for_each(|cap, data| {
453460
capabilities.insert(cap.retain());
454-
batcher.push_container(data);
461+
chunker.push_into(data);
462+
while let Some(chunk) = chunker.extract() {
463+
let chunk = std::mem::take(chunk);
464+
batcher.push_into(chunk);
465+
}
455466
});
456467

457468
// The frontier may have advanced by multiple elements, which is an issue because
@@ -481,6 +492,11 @@ where
481492
// If there is at least one capability not in advance of the input frontier ...
482493
if capabilities.elements().iter().any(|c| !input.frontier().less_equal(c.time())) {
483494

495+
while let Some(chunk) = chunker.finish() {
496+
let chunk = std::mem::take(chunk);
497+
batcher.push_into(chunk);
498+
}
499+
484500
let mut upper = Antichain::new(); // re-used allocation for sealing batches.
485501

486502
// For each capability not in advance of the input frontier ...
@@ -547,14 +563,15 @@ impl<G, K: ExchangeData+Hashable, R: ExchangeData+Semigroup> Arrange<G, Vec<((K,
547563
where
548564
G: Scope<Timestamp: Lattice+Ord>,
549565
{
550-
fn arrange_named<Ba, Bu, Tr>(&self, name: &str) -> Arranged<G, TraceAgent<Tr>>
566+
fn arrange_named<Chu, Ba, Bu, Tr>(&self, name: &str) -> Arranged<G, TraceAgent<Tr>>
551567
where
552-
Ba: Batcher<Input=Vec<((K,()),G::Timestamp,R)>, Time=G::Timestamp> + 'static,
553-
Bu: Builder<Time=G::Timestamp, Input=Ba::Output, Output = Tr::Batch>,
568+
Chu: ContainerBuilder<Container=Ba::Container> + for<'a> PushInto<&'a mut Vec<((K, ()), G::Timestamp, R)>>,
569+
Ba: Batcher<Time=G::Timestamp> + 'static,
570+
Bu: Builder<Time=G::Timestamp, Input=Ba::Container, Output = Tr::Batch>,
554571
Tr: Trace<Time=G::Timestamp> + 'static,
555572
{
556573
let exchange = Exchange::new(move |update: &((K,()),G::Timestamp,R)| (update.0).0.hashed().into());
557-
arrange_core::<_,_,Ba,Bu,_>(&self.map(|k| (k, ())).inner, exchange, name)
574+
arrange_core::<_,_,_,Chu,Ba,Bu,_>(&self.map(|k| (k, ())).inner, exchange, name)
558575
}
559576
}
560577

@@ -587,7 +604,7 @@ where
587604
}
588605

589606
fn arrange_by_key_named(&self, name: &str) -> Arranged<G, TraceAgent<ValSpine<K, V, G::Timestamp, R>>> {
590-
self.arrange_named::<ValBatcher<_,_,_,_>,ValBuilder<_,_,_,_>,_>(name)
607+
self.arrange_named::<VecChunker<_>, ValBatcher<_,_,_,_>,ValBuilder<_,_,_,_>,_>(name)
591608
}
592609
}
593610

@@ -622,6 +639,6 @@ where
622639

623640
fn arrange_by_self_named(&self, name: &str) -> Arranged<G, TraceAgent<KeySpine<K, G::Timestamp, R>>> {
624641
self.map(|k| (k, ()))
625-
.arrange_named::<KeyBatcher<_,_,_>,KeyBuilder<_,_,_>,_>(name)
642+
.arrange_named::<VecChunker<_>, KeyBatcher<_,_,_>,KeyBuilder<_,_,_>,_>(name)
626643
}
627644
}

differential-dataflow/src/operators/consolidate.rs

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
//! underlying system can more clearly see that no work must be done in the later case, and we can
77
//! drop out of, e.g. iterative computations.
88
9+
use timely::container::{ContainerBuilder, PushInto};
910
use timely::dataflow::Scope;
1011

1112
use crate::{Collection, ExchangeData, Hashable};
@@ -44,22 +45,23 @@ where
4445
/// });
4546
/// ```
4647
pub fn consolidate(&self) -> Self {
47-
use crate::trace::implementations::{KeyBatcher, KeyBuilder, KeySpine};
48-
self.consolidate_named::<KeyBatcher<_, _, _>,KeyBuilder<_,_,_>, KeySpine<_,_,_>,_>("Consolidate", |key,&()| key.clone())
48+
use crate::trace::implementations::{VecChunker, KeyBatcher, KeyBuilder, KeySpine};
49+
self.consolidate_named::<VecChunker<_>,KeyBatcher<_, _, _>,KeyBuilder<_,_,_>, KeySpine<_,_,_>,_>("Consolidate", |key,&()| key.clone())
4950
}
5051

5152
/// As `consolidate` but with the ability to name the operator, specify the trace type,
5253
/// and provide the function `reify` to produce owned keys and values..
53-
pub fn consolidate_named<Ba, Bu, Tr, F>(&self, name: &str, reify: F) -> Self
54+
pub fn consolidate_named<Chu, Ba, Bu, Tr, F>(&self, name: &str, reify: F) -> Self
5455
where
55-
Ba: Batcher<Input=Vec<((D,()),G::Timestamp,R)>, Time=G::Timestamp> + 'static,
56+
Chu: ContainerBuilder<Container=Ba::Container> + for<'a> PushInto<&'a mut Vec<((D, ()), G::Timestamp, R)>>,
57+
Ba: Batcher<Time=G::Timestamp> + 'static,
5658
Tr: for<'a> crate::trace::Trace<Time=G::Timestamp,Diff=R>+'static,
57-
Bu: Builder<Time=Tr::Time, Input=Ba::Output, Output=Tr::Batch>,
59+
Bu: Builder<Time=Tr::Time, Input=Ba::Container, Output=Tr::Batch>,
5860
F: Fn(Tr::Key<'_>, Tr::Val<'_>) -> D + 'static,
5961
{
6062
use crate::operators::arrange::arrangement::Arrange;
6163
self.map(|k| (k, ()))
62-
.arrange_named::<Ba, Bu, Tr>(name)
64+
.arrange_named::<Chu, Ba, Bu, Tr>(name)
6365
.as_collection(reify)
6466
}
6567

differential-dataflow/src/trace/implementations/merge_batcher.rs

Lines changed: 15 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -10,23 +10,16 @@
1010
//! Implementations of `MergeBatcher` can be instantiated through the choice of both
1111
//! the chunker and the merger, provided their respective output and input types align.
1212
13-
use std::marker::PhantomData;
14-
1513
use timely::progress::frontier::AntichainRef;
1614
use timely::progress::{frontier::Antichain, Timestamp};
1715
use timely::Container;
18-
use timely::container::{ContainerBuilder, PushInto};
16+
use timely::container::PushInto;
1917

2018
use crate::logging::{BatcherEvent, Logger};
2119
use crate::trace::{Batcher, Builder, Description};
2220

2321
/// Creates batches from containers of unordered tuples.
24-
///
25-
/// To implement `Batcher`, the container builder `C` must accept `&mut Input` as inputs,
26-
/// and must produce outputs of type `M::Chunk`.
27-
pub struct MergeBatcher<Input, C, M: Merger> {
28-
/// Transforms input streams to chunks of sorted, consolidated data.
29-
chunker: C,
22+
pub struct MergeBatcher<M: Merger> {
3023
/// A sequence of power-of-two length lists of sorted, consolidated containers.
3124
///
3225
/// Do not push/pop directly but use the corresponding functions ([`Self::chain_push`]/[`Self::chain_pop`]).
@@ -43,54 +36,32 @@ pub struct MergeBatcher<Input, C, M: Merger> {
4336
logger: Option<Logger>,
4437
/// Timely operator ID.
4538
operator_id: usize,
46-
/// The `Input` type needs to be called out as the type of container accepted, but it is not otherwise present.
47-
_marker: PhantomData<Input>,
4839
}
4940

50-
impl<Input, C, M> Batcher for MergeBatcher<Input, C, M>
41+
impl<M> Batcher for MergeBatcher<M>
5142
where
52-
C: ContainerBuilder<Container=M::Chunk> + Default + for<'a> PushInto<&'a mut Input>,
5343
M: Merger<Time: Timestamp>,
5444
{
55-
type Input = Input;
5645
type Time = M::Time;
57-
type Output = M::Chunk;
46+
type Container = M::Chunk;
5847

5948
fn new(logger: Option<Logger>, operator_id: usize) -> Self {
6049
Self {
6150
logger,
6251
operator_id,
63-
chunker: C::default(),
6452
merger: M::default(),
6553
chains: Vec::new(),
6654
stash: Vec::new(),
6755
frontier: Antichain::new(),
6856
lower: Antichain::from_elem(M::Time::minimum()),
69-
_marker: PhantomData,
70-
}
71-
}
72-
73-
/// Push a container of data into this merge batcher. Updates the internal chain structure if
74-
/// needed.
75-
fn push_container(&mut self, container: &mut Input) {
76-
self.chunker.push_into(container);
77-
while let Some(chunk) = self.chunker.extract() {
78-
let chunk = std::mem::take(chunk);
79-
self.insert_chain(vec![chunk]);
8057
}
8158
}
8259

8360
// Sealing a batch means finding those updates with times not greater or equal to any time
8461
// in `upper`. All updates must have time greater or equal to the previously used `upper`,
8562
// which we call `lower`, by assumption that after sealing a batcher we receive no more
8663
// updates with times not greater or equal to `upper`.
87-
fn seal<B: Builder<Input = Self::Output, Time = Self::Time>>(&mut self, upper: Antichain<M::Time>) -> B::Output {
88-
// Finish
89-
while let Some(chunk) = self.chunker.finish() {
90-
let chunk = std::mem::take(chunk);
91-
self.insert_chain(vec![chunk]);
92-
}
93-
64+
fn seal<B: Builder<Input = Self::Container, Time = Self::Time>>(&mut self, upper: Antichain<M::Time>) -> B::Output {
9465
// Merge all remaining chains into a single chain.
9566
while self.chains.len() > 1 {
9667
let list1 = self.chain_pop().unwrap();
@@ -125,8 +96,16 @@ where
12596
self.frontier.borrow()
12697
}
12798
}
99+
impl<M> PushInto<M::Chunk> for MergeBatcher<M>
100+
where
101+
M: Merger,
102+
{
103+
fn push_into(&mut self, item: M::Chunk) {
104+
self.insert_chain(vec![item]);
105+
}
106+
}
128107

129-
impl<Input, C, M: Merger> MergeBatcher<Input, C, M> {
108+
impl<M: Merger> MergeBatcher<M> {
130109
/// Insert a chain and maintain chain properties: Chains are geometrically sized and ordered
131110
/// by decreasing length.
132111
fn insert_chain(&mut self, chain: Vec<M::Chunk>) {
@@ -190,7 +169,7 @@ impl<Input, C, M: Merger> MergeBatcher<Input, C, M> {
190169
}
191170
}
192171

193-
impl<Input, C, M: Merger> Drop for MergeBatcher<Input, C, M> {
172+
impl<M: Merger> Drop for MergeBatcher<M> {
194173
fn drop(&mut self) {
195174
// Cleanup chain to retract accounting information.
196175
while self.chain_pop().is_some() {}

differential-dataflow/src/trace/implementations/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ pub mod huffman_container;
4747
pub mod chunker;
4848

4949
// Opinionated takes on default spines.
50+
pub use self::chunker::{ColumnationChunker, VecChunker};
5051
pub use self::ord_neu::OrdValSpine as ValSpine;
5152
pub use self::ord_neu::OrdValBatcher as ValBatcher;
5253
pub use self::ord_neu::RcOrdValBuilder as ValBuilder;

0 commit comments

Comments
 (0)