-
Notifications
You must be signed in to change notification settings - Fork 182
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Implement a DistinctTotal trait and operators #75
Comments
4 tasks
tbg
added a commit
to tbg/differential-dataflow
that referenced
this issue
Aug 12, 2017
Closes TimelyDataflow#75. For convenience, here's the diff to `src/operators/count.rs` from which most of the code was lifted: ```diff --- src/operators/count.rs 2017-08-11 10:54:44.000000000 -0400 +++ src/operators/distinct.rs 2017-08-12 01:26:09.000000000 -0400 @@ -13,6 +13,9 @@ //! This ordering can be exploited in several cases to avoid computation when only the first few //! elements are required. +// FIXME(tschottdorf): the above comment doesn't seem pertinent here or in `{min,count}.rs`. Probably +// copy pasta. + use std::default::Default; use timely::dataflow::*; @@ -28,9 +31,9 @@ use trace::{BatchReader, Cursor, Trace, TraceReader}; use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace; -/// Extension trait for the `count` differential dataflow method. -pub trait CountTotal<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { - /// Counts the number of occurrences of each element. +/// Extension trait for the `distinct` differential dataflow method. +pub trait DistinctTotal<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { + /// Deduplicates the elements of the input stream. /// /// # Examples /// @@ -40,19 +43,19 @@ /// extern crate differential_dataflow; /// /// use differential_dataflow::input::Input; - /// use differential_dataflow::operators::CountTotal; + /// use differential_dataflow::operators::DistinctTotal; /// /// fn main() { /// ::timely::example(|scope| { /// // report the number of occurrences of each key /// scope.new_collection_from(1 .. 10).1 /// .map(|x| x / 3) - /// .count_total(); + /// .distinct_total(); /// }); /// } /// ``` - fn count_total(&self) -> Collection<G, (K, R), isize>; - /// Counts the number of occurrences of each element. + fn distinct_total(&self) -> Collection<G, K, isize>; + /// Deduplicates the elements of the input stream. /// /// This method is a specialization for when the key is an unsigned integer fit for distributing the data. /// @@ -64,38 +67,40 @@ /// extern crate differential_dataflow; /// /// use differential_dataflow::input::Input; - /// use differential_dataflow::operators::CountTotal; + /// use differential_dataflow::operators::DistinctTotal; /// /// fn main() { /// ::timely::example(|scope| { /// // report the number of occurrences of each key /// scope.new_collection_from(1 .. 10u32).1 /// .map(|x| x / 3) - /// .count_total_u(); + /// .distinct_total_u(); /// }); /// } /// ``` - fn count_total_u(&self) -> Collection<G, (K, R), isize> where K: Unsigned+Copy; + fn distinct_total_u(&self) -> Collection<G, K, isize> where K: Unsigned+Copy; } -impl<G: Scope, K: Data+Default+Hashable, R: Diff> CountTotal<G, K, R> for Collection<G, K, R> +impl<G: Scope, K: Data+Default+Hashable, R: Diff> DistinctTotal<G, K, R> for Collection<G, K, R> where G::Timestamp: TotalOrder+Ord { - fn count_total(&self) -> Collection<G, (K, R), isize> { + fn distinct_total(&self) -> Collection<G, K, isize> { self.arrange_by_self() - .count_total_core() - .map(|(k,c)| (k.item, c)) + .distinct_total_core() + .map(|k| k.item) } - fn count_total_u(&self) -> Collection<G, (K, R), isize> where K: Unsigned+Copy { + fn distinct_total_u(&self) -> Collection<G, K, isize> where K: Unsigned+Copy { self.map(|k| (UnsignedWrapper::from(k), ())) .arrange(DefaultKeyTrace::new()) - .count_total_core() - .map(|(k,c)| (k.item, c)) + .distinct_total_core() + .map(|k| k.item) } } /// Extension trait for the `group_arranged` differential dataflow method. -pub trait CountTotalCore<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { +pub trait DistinctTotalCore<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { + /// FIXME(tschottdorf): is this the right comment? Looks cargo-culted from group.rs. + /// /// Applies `group` to arranged data, and returns an arrangement of output data. /// /// This method is used by the more ergonomic `group`, `distinct`, and `count` methods, although @@ -110,7 +115,7 @@ /// /// use differential_dataflow::input::Input; /// use differential_dataflow::operators::arrange::Arrange; - /// use differential_dataflow::operators::count::CountTotalCore; + /// use differential_dataflow::operators::distinct::DistinctTotalCore; /// use differential_dataflow::trace::Trace; /// use differential_dataflow::trace::implementations::ord::OrdKeySpine; /// use differential_dataflow::hashable::OrdWrapper; @@ -122,24 +127,24 @@ /// scope.new_collection_from(1 .. 10u32).1 /// .map(|x| (OrdWrapper { item: x / 3 }, ())) /// .arrange(OrdKeySpine::new()) - /// .count_total_core(); + /// .distinct_total_core(); /// }); /// } /// ``` - fn count_total_core(&self) -> Collection<G, (K, R), isize>; + fn distinct_total_core(&self) -> Collection<G, K, isize>; } -impl<G: Scope, K: Data, R: Diff, T1> CountTotalCore<G, K, R> for Arranged<G, K, (), R, T1> +impl<G: Scope, K: Data, R: Diff, T1> DistinctTotalCore<G, K, R> for Arranged<G, K, (), R, T1> where G::Timestamp: TotalOrder+Ord, T1: TraceReader<K, (), G::Timestamp, R>+Clone+'static, T1::Batch: BatchReader<K, (), G::Timestamp, R> { - fn count_total_core(&self) -> Collection<G, (K, R), isize> { + fn distinct_total_core(&self) -> Collection<G, K, isize> { let mut trace = self.trace.clone(); - self.stream.unary_stream(Pipeline, "CountTotal", move |input, output| { + self.stream.unary_stream(Pipeline, "DistinctTotal", move |input, output| { input.for_each(|capability, batches| { @@ -150,31 +155,34 @@ let (mut trace_cursor, trace_storage) = trace.cursor_through(batch.lower()).unwrap(); while batch_cursor.key_valid(&batch_storage) { - - let key: K = batch_cursor.key(&batch_storage).clone(); + let key = batch_cursor.key(&batch_storage); let mut count = R::zero(); - trace_cursor.seek_key(&trace_storage, batch_cursor.key(&batch_storage)); - if trace_cursor.key_valid(&trace_storage) && trace_cursor.key(&trace_storage) == batch_cursor.key(&batch_storage) { + // Compute the multiplicity of this key before the current batch. + trace_cursor.seek_key(&trace_storage, key); + if trace_cursor.key_valid(&trace_storage) && trace_cursor.key(&trace_storage) == key { trace_cursor.map_times(&trace_storage, |_, diff| count = count + diff); } + // Take into account the current batch. At each time, check whether the + // "presence" of the key changes. If it was previously present (i.e. had + // nonzero multiplicity) but now is no more, emit -1. Conversely, if it is + // newly present, emit +1. In both remaining cases, the result remains + // unchanged (note that this is better than the naive approach which would + // eliminate the "previous" record and immediately re-add it). batch_cursor.map_times(&batch_storage, |time, diff| { - - if !count.is_zero() { - session.give(((key.clone(), count), time.clone(), -1)); - } + let mut mult = if count.is_zero() { 0 } else { -1 }; count = count + diff; - if !count.is_zero() { - session.give(((key.clone(), count), time.clone(), 1)); + mult += if count.is_zero() { 0 } else { 1 }; + if mult != 0 { + session.give((key.clone(), time.clone(), mult)); } - }); batch_cursor.step_key(&batch_storage); } - // tidy up the shared input trace. + // Tidy up the shared input trace. trace.advance_by(batch.upper()); trace.distinguish_since(batch.upper()); } ```
tbg
added a commit
to tbg/differential-dataflow
that referenced
this issue
Aug 12, 2017
Closes TimelyDataflow#75. For convenience, here's the diff to `src/operators/count.rs` from which most of the code was lifted: ```diff --- src/operators/count.rs 2017-08-11 10:54:44.000000000 -0400 +++ src/operators/distinct.rs 2017-08-12 01:26:09.000000000 -0400 @@ -13,6 +13,9 @@ //! This ordering can be exploited in several cases to avoid computation when only the first few //! elements are required. +// FIXME(tschottdorf): the above comment doesn't seem pertinent here or in `{min,count}.rs`. Probably +// copy pasta. + use std::default::Default; use timely::dataflow::*; @@ -28,9 +31,9 @@ use trace::{BatchReader, Cursor, Trace, TraceReader}; use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace; -/// Extension trait for the `count` differential dataflow method. -pub trait CountTotal<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { - /// Counts the number of occurrences of each element. +/// Extension trait for the `distinct` differential dataflow method. +pub trait DistinctTotal<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { + /// Deduplicates the elements of the input stream. /// /// # Examples /// @@ -40,19 +43,19 @@ /// extern crate differential_dataflow; /// /// use differential_dataflow::input::Input; - /// use differential_dataflow::operators::CountTotal; + /// use differential_dataflow::operators::DistinctTotal; /// /// fn main() { /// ::timely::example(|scope| { /// // report the number of occurrences of each key /// scope.new_collection_from(1 .. 10).1 /// .map(|x| x / 3) - /// .count_total(); + /// .distinct_total(); /// }); /// } /// ``` - fn count_total(&self) -> Collection<G, (K, R), isize>; - /// Counts the number of occurrences of each element. + fn distinct_total(&self) -> Collection<G, K, isize>; + /// Deduplicates the elements of the input stream. /// /// This method is a specialization for when the key is an unsigned integer fit for distributing the data. /// @@ -64,38 +67,40 @@ /// extern crate differential_dataflow; /// /// use differential_dataflow::input::Input; - /// use differential_dataflow::operators::CountTotal; + /// use differential_dataflow::operators::DistinctTotal; /// /// fn main() { /// ::timely::example(|scope| { /// // report the number of occurrences of each key /// scope.new_collection_from(1 .. 10u32).1 /// .map(|x| x / 3) - /// .count_total_u(); + /// .distinct_total_u(); /// }); /// } /// ``` - fn count_total_u(&self) -> Collection<G, (K, R), isize> where K: Unsigned+Copy; + fn distinct_total_u(&self) -> Collection<G, K, isize> where K: Unsigned+Copy; } -impl<G: Scope, K: Data+Default+Hashable, R: Diff> CountTotal<G, K, R> for Collection<G, K, R> +impl<G: Scope, K: Data+Default+Hashable, R: Diff> DistinctTotal<G, K, R> for Collection<G, K, R> where G::Timestamp: TotalOrder+Ord { - fn count_total(&self) -> Collection<G, (K, R), isize> { + fn distinct_total(&self) -> Collection<G, K, isize> { self.arrange_by_self() - .count_total_core() - .map(|(k,c)| (k.item, c)) + .distinct_total_core() + .map(|k| k.item) } - fn count_total_u(&self) -> Collection<G, (K, R), isize> where K: Unsigned+Copy { + fn distinct_total_u(&self) -> Collection<G, K, isize> where K: Unsigned+Copy { self.map(|k| (UnsignedWrapper::from(k), ())) .arrange(DefaultKeyTrace::new()) - .count_total_core() - .map(|(k,c)| (k.item, c)) + .distinct_total_core() + .map(|k| k.item) } } /// Extension trait for the `group_arranged` differential dataflow method. -pub trait CountTotalCore<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { +pub trait DistinctTotalCore<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { + /// FIXME(tschottdorf): is this the right comment? Looks cargo-culted from group.rs. + /// /// Applies `group` to arranged data, and returns an arrangement of output data. /// /// This method is used by the more ergonomic `group`, `distinct`, and `count` methods, although @@ -110,7 +115,7 @@ /// /// use differential_dataflow::input::Input; /// use differential_dataflow::operators::arrange::Arrange; - /// use differential_dataflow::operators::count::CountTotalCore; + /// use differential_dataflow::operators::distinct::DistinctTotalCore; /// use differential_dataflow::trace::Trace; /// use differential_dataflow::trace::implementations::ord::OrdKeySpine; /// use differential_dataflow::hashable::OrdWrapper; @@ -122,24 +127,24 @@ /// scope.new_collection_from(1 .. 10u32).1 /// .map(|x| (OrdWrapper { item: x / 3 }, ())) /// .arrange(OrdKeySpine::new()) - /// .count_total_core(); + /// .distinct_total_core(); /// }); /// } /// ``` - fn count_total_core(&self) -> Collection<G, (K, R), isize>; + fn distinct_total_core(&self) -> Collection<G, K, isize>; } -impl<G: Scope, K: Data, R: Diff, T1> CountTotalCore<G, K, R> for Arranged<G, K, (), R, T1> +impl<G: Scope, K: Data, R: Diff, T1> DistinctTotalCore<G, K, R> for Arranged<G, K, (), R, T1> where G::Timestamp: TotalOrder+Ord, T1: TraceReader<K, (), G::Timestamp, R>+Clone+'static, T1::Batch: BatchReader<K, (), G::Timestamp, R> { - fn count_total_core(&self) -> Collection<G, (K, R), isize> { + fn distinct_total_core(&self) -> Collection<G, K, isize> { let mut trace = self.trace.clone(); - self.stream.unary_stream(Pipeline, "CountTotal", move |input, output| { + self.stream.unary_stream(Pipeline, "DistinctTotal", move |input, output| { input.for_each(|capability, batches| { @@ -150,31 +155,34 @@ let (mut trace_cursor, trace_storage) = trace.cursor_through(batch.lower()).unwrap(); while batch_cursor.key_valid(&batch_storage) { - - let key: K = batch_cursor.key(&batch_storage).clone(); + let key = batch_cursor.key(&batch_storage); let mut count = R::zero(); - trace_cursor.seek_key(&trace_storage, batch_cursor.key(&batch_storage)); - if trace_cursor.key_valid(&trace_storage) && trace_cursor.key(&trace_storage) == batch_cursor.key(&batch_storage) { + // Compute the multiplicity of this key before the current batch. + trace_cursor.seek_key(&trace_storage, key); + if trace_cursor.key_valid(&trace_storage) && trace_cursor.key(&trace_storage) == key { trace_cursor.map_times(&trace_storage, |_, diff| count = count + diff); } + // Take into account the current batch. At each time, check whether the + // "presence" of the key changes. If it was previously present (i.e. had + // nonzero multiplicity) but now is no more, emit -1. Conversely, if it is + // newly present, emit +1. In both remaining cases, the result remains + // unchanged (note that this is better than the naive approach which would + // eliminate the "previous" record and immediately re-add it). batch_cursor.map_times(&batch_storage, |time, diff| { - - if !count.is_zero() { - session.give(((key.clone(), count), time.clone(), -1)); - } + let mut mult = if count.is_zero() { 0 } else { -1 }; count = count + diff; - if !count.is_zero() { - session.give(((key.clone(), count), time.clone(), 1)); + mult += if count.is_zero() { 0 } else { 1 }; + if mult != 0 { + session.give((key.clone(), time.clone(), mult)); } - }); batch_cursor.step_key(&batch_storage); } - // tidy up the shared input trace. + // Tidy up the shared input trace. trace.advance_by(batch.upper()); trace.distinguish_since(batch.upper()); } ```
tbg
added a commit
to tbg/differential-dataflow
that referenced
this issue
Aug 13, 2017
Closes TimelyDataflow#75. For convenience, here's the diff to `src/operators/count.rs` from which most of the code was lifted: ```diff --- src/operators/count.rs 2017-08-12 01:36:23.000000000 -0400 +++ src/operators/distinct.rs 2017-08-13 13:15:35.000000000 -0400 @@ -1,17 +1,7 @@ -//! Group records by a key, and apply a reduction function. +//! Reduce the collection to one occurrence of each distinct element. //! -//! The `group` operators act on data that can be viewed as pairs `(key, val)`. They group records -//! with the same key, and apply user supplied functions to the key and a list of values, which are -//! expected to populate a list of output values. -//! -//! Several variants of `group` exist which allow more precise control over how grouping is done. -//! For example, the `_by` suffixed variants take arbitrary data, but require a key-value selector -//! to be applied to each record. The `_u` suffixed variants use unsigned integers as keys, and -//! will use a dense array rather than a `HashMap` to store their keys. -//! -//! The list of values are presented as an iterator which internally merges sorted lists of values. -//! This ordering can be exploited in several cases to avoid computation when only the first few -//! elements are required. +//! The `distinct_total` and `distinct_total_u` operators are optimizations of the more general +//! `distinct` and `distinct_u` operators for the case in which time is totally ordered. use std::default::Default; @@ -28,89 +18,84 @@ use trace::{BatchReader, Cursor, Trace, TraceReader}; use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace; -/// Extension trait for the `count` differential dataflow method. -pub trait CountTotal<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { - /// Counts the number of occurrences of each element. +/// Extension trait for the `distinct` differential dataflow method. +pub trait DistinctTotal<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { + /// Reduces the collection to one occurrence of each distinct element. /// /// # Examples /// /// ``` - /// # /// extern crate timely; /// extern crate differential_dataflow; /// /// use differential_dataflow::input::Input; - /// use differential_dataflow::operators::CountTotal; + /// use differential_dataflow::operators::DistinctTotal; /// /// fn main() { /// ::timely::example(|scope| { /// // report the number of occurrences of each key /// scope.new_collection_from(1 .. 10).1 /// .map(|x| x / 3) - /// .count_total(); + /// .distinct_total(); /// }); /// } /// ``` - fn count_total(&self) -> Collection<G, (K, R), isize>; - /// Counts the number of occurrences of each element. + fn distinct_total(&self) -> Collection<G, K, isize>; + /// Reduces the collection to one occurrence of each distinct element. /// - /// This method is a specialization for when the key is an unsigned integer fit for distributing the data. + /// This method is a specialization for when the key is an unsigned integer fit for distributing + /// the data. /// /// # Examples /// /// ``` - /// # /// extern crate timely; /// extern crate differential_dataflow; /// /// use differential_dataflow::input::Input; - /// use differential_dataflow::operators::CountTotal; + /// use differential_dataflow::operators::DistinctTotal; /// /// fn main() { /// ::timely::example(|scope| { /// // report the number of occurrences of each key /// scope.new_collection_from(1 .. 10u32).1 /// .map(|x| x / 3) - /// .count_total_u(); + /// .distinct_total_u(); /// }); /// } /// ``` - fn count_total_u(&self) -> Collection<G, (K, R), isize> where K: Unsigned+Copy; + fn distinct_total_u(&self) -> Collection<G, K, isize> where K: Unsigned+Copy; } -impl<G: Scope, K: Data+Default+Hashable, R: Diff> CountTotal<G, K, R> for Collection<G, K, R> +impl<G: Scope, K: Data+Default+Hashable, R: Diff> DistinctTotal<G, K, R> for Collection<G, K, R> where G::Timestamp: TotalOrder+Ord { - fn count_total(&self) -> Collection<G, (K, R), isize> { + fn distinct_total(&self) -> Collection<G, K, isize> { self.arrange_by_self() - .count_total_core() - .map(|(k,c)| (k.item, c)) + .distinct_total_core() + .map(|k| k.item) } - fn count_total_u(&self) -> Collection<G, (K, R), isize> where K: Unsigned+Copy { + fn distinct_total_u(&self) -> Collection<G, K, isize> where K: Unsigned+Copy { self.map(|k| (UnsignedWrapper::from(k), ())) .arrange(DefaultKeyTrace::new()) - .count_total_core() - .map(|(k,c)| (k.item, c)) + .distinct_total_core() + .map(|k| k.item) } } -/// Extension trait for the `group_arranged` differential dataflow method. -pub trait CountTotalCore<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { - /// Applies `group` to arranged data, and returns an arrangement of output data. - /// - /// This method is used by the more ergonomic `group`, `distinct`, and `count` methods, although - /// it can be very useful if one needs to manually attach and re-use existing arranged collections. +/// Extension trait for the `distinct_total_core` differential dataflow method. +pub trait DistinctTotalCore<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { + /// Applies `distinct` to arranged data, and returns a collection of output data. /// /// # Examples /// /// ``` - /// # /// extern crate timely; /// extern crate differential_dataflow; /// /// use differential_dataflow::input::Input; /// use differential_dataflow::operators::arrange::Arrange; - /// use differential_dataflow::operators::count::CountTotalCore; + /// use differential_dataflow::operators::distinct::DistinctTotalCore; /// use differential_dataflow::trace::Trace; /// use differential_dataflow::trace::implementations::ord::OrdKeySpine; /// use differential_dataflow::hashable::OrdWrapper; @@ -122,24 +107,24 @@ /// scope.new_collection_from(1 .. 10u32).1 /// .map(|x| (OrdWrapper { item: x / 3 }, ())) /// .arrange(OrdKeySpine::new()) - /// .count_total_core(); + /// .distinct_total_core(); /// }); /// } /// ``` - fn count_total_core(&self) -> Collection<G, (K, R), isize>; + fn distinct_total_core(&self) -> Collection<G, K, isize>; } -impl<G: Scope, K: Data, R: Diff, T1> CountTotalCore<G, K, R> for Arranged<G, K, (), R, T1> +impl<G: Scope, K: Data, R: Diff, T1> DistinctTotalCore<G, K, R> for Arranged<G, K, (), R, T1> where G::Timestamp: TotalOrder+Ord, T1: TraceReader<K, (), G::Timestamp, R>+Clone+'static, T1::Batch: BatchReader<K, (), G::Timestamp, R> { - fn count_total_core(&self) -> Collection<G, (K, R), isize> { + fn distinct_total_core(&self) -> Collection<G, K, isize> { let mut trace = self.trace.clone(); - self.stream.unary_stream(Pipeline, "CountTotal", move |input, output| { + self.stream.unary_stream(Pipeline, "DistinctTotal", move |input, output| { input.for_each(|capability, batches| { @@ -150,31 +135,34 @@ let (mut trace_cursor, trace_storage) = trace.cursor_through(batch.lower()).unwrap(); while batch_cursor.key_valid(&batch_storage) { - let key = batch_cursor.key(&batch_storage); let mut count = R::zero(); + // Compute the multiplicity of this key before the current batch. trace_cursor.seek_key(&trace_storage, key); if trace_cursor.key_valid(&trace_storage) && trace_cursor.key(&trace_storage) == key { trace_cursor.map_times(&trace_storage, |_, diff| count = count + diff); } + // Take into account the current batch. At each time, check whether the + // "presence" of the key changes. If it was previously present (i.e. had + // nonzero multiplicity) but now is no more, emit -1. Conversely, if it is + // newly present, emit +1. In both remaining cases, the result remains + // unchanged (note that this is better than the naive approach which would + // eliminate the "previous" record and immediately re-add it). batch_cursor.map_times(&batch_storage, |time, diff| { - - if !count.is_zero() { - session.give(((key.clone(), count), time.clone(), -1)); - } + let mut mult = if count.is_zero() { 0 } else { -1 }; count = count + diff; - if !count.is_zero() { - session.give(((key.clone(), count), time.clone(), 1)); + mult += if count.is_zero() { 0 } else { 1 }; + if mult != 0 { + session.give((key.clone(), time.clone(), mult)); } - }); batch_cursor.step_key(&batch_storage); } - // tidy up the shared input trace. + // Tidy up the shared input trace. trace.advance_by(batch.upper()); trace.distinguish_since(batch.upper()); } ```
tbg
added a commit
to tbg/differential-dataflow
that referenced
this issue
Aug 13, 2017
Closes TimelyDataflow#75. For convenience, here's the diff to `src/operators/count.rs` from which most of the code was lifted: ```diff --- src/operators/count.rs 2017-08-12 01:36:23.000000000 -0400 +++ src/operators/distinct.rs 2017-08-13 13:29:22.000000000 -0400 @@ -1,17 +1,7 @@ -//! Group records by a key, and apply a reduction function. +//! Reduce the collection to one occurrence of each distinct element. //! -//! The `group` operators act on data that can be viewed as pairs `(key, val)`. They group records -//! with the same key, and apply user supplied functions to the key and a list of values, which are -//! expected to populate a list of output values. -//! -//! Several variants of `group` exist which allow more precise control over how grouping is done. -//! For example, the `_by` suffixed variants take arbitrary data, but require a key-value selector -//! to be applied to each record. The `_u` suffixed variants use unsigned integers as keys, and -//! will use a dense array rather than a `HashMap` to store their keys. -//! -//! The list of values are presented as an iterator which internally merges sorted lists of values. -//! This ordering can be exploited in several cases to avoid computation when only the first few -//! elements are required. +//! The `distinct_total` and `distinct_total_u` operators are optimizations of the more general +//! `distinct` and `distinct_u` operators for the case in which time is totally ordered. use std::default::Default; @@ -28,89 +18,84 @@ use trace::{BatchReader, Cursor, Trace, TraceReader}; use trace::implementations::ord::OrdKeySpine as DefaultKeyTrace; -/// Extension trait for the `count` differential dataflow method. -pub trait CountTotal<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { - /// Counts the number of occurrences of each element. +/// Extension trait for the `distinct` differential dataflow method. +pub trait DistinctTotal<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { + /// Reduces the collection to one occurrence of each distinct element. /// /// # Examples /// /// ``` - /// # /// extern crate timely; /// extern crate differential_dataflow; /// /// use differential_dataflow::input::Input; - /// use differential_dataflow::operators::CountTotal; + /// use differential_dataflow::operators::DistinctTotal; /// /// fn main() { /// ::timely::example(|scope| { /// // report the number of occurrences of each key /// scope.new_collection_from(1 .. 10).1 /// .map(|x| x / 3) - /// .count_total(); + /// .distinct_total(); /// }); /// } /// ``` - fn count_total(&self) -> Collection<G, (K, R), isize>; - /// Counts the number of occurrences of each element. + fn distinct_total(&self) -> Collection<G, K, isize>; + /// Reduces the collection to one occurrence of each distinct element. /// - /// This method is a specialization for when the key is an unsigned integer fit for distributing the data. + /// This method is a specialization for when the key is an unsigned integer fit for distributing + /// the data. /// /// # Examples /// /// ``` - /// # /// extern crate timely; /// extern crate differential_dataflow; /// /// use differential_dataflow::input::Input; - /// use differential_dataflow::operators::CountTotal; + /// use differential_dataflow::operators::DistinctTotal; /// /// fn main() { /// ::timely::example(|scope| { /// // report the number of occurrences of each key /// scope.new_collection_from(1 .. 10u32).1 /// .map(|x| x / 3) - /// .count_total_u(); + /// .distinct_total_u(); /// }); /// } /// ``` - fn count_total_u(&self) -> Collection<G, (K, R), isize> where K: Unsigned+Copy; + fn distinct_total_u(&self) -> Collection<G, K, isize> where K: Unsigned+Copy; } -impl<G: Scope, K: Data+Default+Hashable, R: Diff> CountTotal<G, K, R> for Collection<G, K, R> +impl<G: Scope, K: Data+Default+Hashable, R: Diff> DistinctTotal<G, K, R> for Collection<G, K, R> where G::Timestamp: TotalOrder+Ord { - fn count_total(&self) -> Collection<G, (K, R), isize> { + fn distinct_total(&self) -> Collection<G, K, isize> { self.arrange_by_self() - .count_total_core() - .map(|(k,c)| (k.item, c)) + .distinct_total_core() + .map(|k| k.item) } - fn count_total_u(&self) -> Collection<G, (K, R), isize> where K: Unsigned+Copy { + fn distinct_total_u(&self) -> Collection<G, K, isize> where K: Unsigned+Copy { self.map(|k| (UnsignedWrapper::from(k), ())) .arrange(DefaultKeyTrace::new()) - .count_total_core() - .map(|(k,c)| (k.item, c)) + .distinct_total_core() + .map(|k| k.item) } } -/// Extension trait for the `group_arranged` differential dataflow method. -pub trait CountTotalCore<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { - /// Applies `group` to arranged data, and returns an arrangement of output data. - /// - /// This method is used by the more ergonomic `group`, `distinct`, and `count` methods, although - /// it can be very useful if one needs to manually attach and re-use existing arranged collections. +/// Extension trait for the `distinct_total_core` differential dataflow method. +pub trait DistinctTotalCore<G: Scope, K: Data, R: Diff> where G::Timestamp: TotalOrder+Ord { + /// Applies `distinct` to arranged data, and returns a collection of output data. /// /// # Examples /// /// ``` - /// # /// extern crate timely; /// extern crate differential_dataflow; /// /// use differential_dataflow::input::Input; /// use differential_dataflow::operators::arrange::Arrange; - /// use differential_dataflow::operators::count::CountTotalCore; + /// use differential_dataflow::operators::distinct::DistinctTotalCore; /// use differential_dataflow::trace::Trace; /// use differential_dataflow::trace::implementations::ord::OrdKeySpine; /// use differential_dataflow::hashable::OrdWrapper; @@ -122,24 +107,24 @@ /// scope.new_collection_from(1 .. 10u32).1 /// .map(|x| (OrdWrapper { item: x / 3 }, ())) /// .arrange(OrdKeySpine::new()) - /// .count_total_core(); + /// .distinct_total_core(); /// }); /// } /// ``` - fn count_total_core(&self) -> Collection<G, (K, R), isize>; + fn distinct_total_core(&self) -> Collection<G, K, isize>; } -impl<G: Scope, K: Data, R: Diff, T1> CountTotalCore<G, K, R> for Arranged<G, K, (), R, T1> +impl<G: Scope, K: Data, R: Diff, T1> DistinctTotalCore<G, K, R> for Arranged<G, K, (), R, T1> where G::Timestamp: TotalOrder+Ord, T1: TraceReader<K, (), G::Timestamp, R>+Clone+'static, T1::Batch: BatchReader<K, (), G::Timestamp, R> { - fn count_total_core(&self) -> Collection<G, (K, R), isize> { + fn distinct_total_core(&self) -> Collection<G, K, isize> { let mut trace = self.trace.clone(); - self.stream.unary_stream(Pipeline, "CountTotal", move |input, output| { + self.stream.unary_stream(Pipeline, "DistinctTotal", move |input, output| { input.for_each(|capability, batches| { @@ -150,31 +135,34 @@ let (mut trace_cursor, trace_storage) = trace.cursor_through(batch.lower()).unwrap(); while batch_cursor.key_valid(&batch_storage) { - let key = batch_cursor.key(&batch_storage); let mut count = R::zero(); + // Compute the multiplicity of this key before the current batch. trace_cursor.seek_key(&trace_storage, key); if trace_cursor.key_valid(&trace_storage) && trace_cursor.key(&trace_storage) == key { trace_cursor.map_times(&trace_storage, |_, diff| count = count + diff); } + // Take into account the current batch. At each time, check whether the + // "presence" of the key changes. If it was previously present (i.e. had + // nonzero multiplicity) but now is no more, emit -1. Conversely, if it is + // newly present, emit +1. In both remaining cases, the result remains + // unchanged (note that this is better than the naive approach which would + // eliminate the "previous" record and immediately re-add it). batch_cursor.map_times(&batch_storage, |time, diff| { - - if !count.is_zero() { - session.give(((key.clone(), count), time.clone(), -1)); - } + let old_distinct = !count.is_zero(); count = count + diff; - if !count.is_zero() { - session.give(((key.clone(), count), time.clone(), 1)); + let new_distinct = !count.is_zero(); + if old_distinct != new_distinct { + session.give((key.clone(), time.clone(), if old_distinct { -1 } else { 1 })); } - }); batch_cursor.step_key(&batch_storage); } - // tidy up the shared input trace. + // Tidy up the shared input trace. trace.advance_by(batch.upper()); trace.distinguish_since(batch.upper()); } ```
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Issue #74 calls for specialized implementations for totally ordered timestamps. The
distinct
anddistinct_u
operators seem to be good candidates for this: they are superficially similar tocount
, and their current implementation is via the very expensivegroup
andgroup_u
operators.This implementation should be very nearly that of
CountTotal
, except that there is further opportunity to avoid sending out when changes in the count of input keys does not change the occurrence of the output key.The text was updated successfully, but these errors were encountered: