Skip to content

Commit

Permalink
doc & refactor: todo scaladoc link (issue hablapps#135)
Browse files Browse the repository at this point in the history
  • Loading branch information
eruizalo committed Jan 23, 2022
1 parent a39ff99 commit 3d15993
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 26 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ jobs:
PGP_SECRET: ${{ secrets.PGP_SECRET }}
SONATYPE_PASSWORD: ${{ secrets.SONATYPE_PASSWORD }}
SONATYPE_USERNAME: ${{ secrets.SONATYPE_USERNAME }}
- name: Test & coverage # TODO running tests twice
- name: Test & coverage
run: sbt coverage core/test coverageReport coverageAggregate
- uses: codecov/codecov-action@v2
with:
Expand Down
13 changes: 0 additions & 13 deletions core/src/main/scala/doric/sem/AggregationOps.scala
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,6 @@ private[sem] trait AggregationOps
* @example {{{
* // Compute the average for all numeric columns cubed by department and group.
* ds.cube("department".cname, "group".cname).avg()
*
* // Compute the max age and average salary, cubed by department and gender.
* ds.cube("department".cname, "gender".cname).agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
* @see [[doric.doc.DRelationalGroupedDataset]] for all the available aggregate functions.
* @see [[org.apache.spark.sql.Dataset.cube(col1:* org.apache.spark.sql.Dataset.cube]]
Expand Down Expand Up @@ -91,14 +85,7 @@ private[sem] trait AggregationOps
* @example {{{
* // Compute the average for all numeric columns rolled up by department and group.
* ds.rollup("department".cname, "group".cname).avg()
*
* // Compute the max age and average salary, rolled up by department and gender.
* ds.rollup("department".cname, "gender".cname).agg(Map(
* "salary" -> "avg",
* "age" -> "max"
* ))
* }}}
* @todo this example is not from doric
* @see [[doric.doc.DRelationalGroupedDataset]] for all the available aggregate functions.
* @see [[org.apache.spark.sql.Dataset.rollup(col1:* org.apache.spark.sql.Dataset.rollup]]
* @group Group Dataframe operation
Expand Down
27 changes: 18 additions & 9 deletions core/src/main/scala/doric/syntax/ArrayColumns.scala
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ private[syntax] trait ArrayColumns {
* Creates a new array column. The input columns must all have the same data type.
*
* @group Array Type
* @see [[org.apache.spark.sql.functions.array]]
* @see org.apache.spark.sql.functions.array
* @todo scaladoc link (issue #135)
*/
def array[T](cols: DoricColumn[T]*): ArrayColumn[T] =
cols.toList.traverse(_.elem).map(f.array(_: _*)).toDC
Expand Down Expand Up @@ -89,7 +90,8 @@ private[syntax] trait ArrayColumns {
* the type of the array elements to return.
* @return
* the column reference with the applied transformation.
* @see [[org.apache.spark.sql.functions.transform]]
* @see org.apache.spark.sql.functions.transform
* @todo scaladoc link (issue #135)
*/
def transform[A](
fun: DoricColumn[T] => DoricColumn[A]
Expand All @@ -110,7 +112,8 @@ private[syntax] trait ArrayColumns {
* the type of the elements of the array
* @return
* the column reference with the provided transformation.
* @see [[org.apache.spark.sql.functions.transform]]
* @see org.apache.spark.sql.functions.transform
* @todo scaladoc link (issue #135)
*/
def transformWithIndex[A](
fun: (DoricColumn[T], IntegerColumn) => DoricColumn[A]
Expand All @@ -136,7 +139,8 @@ private[syntax] trait ArrayColumns {
* type of the final value to return
* @return
* the column reference with the applied transformation.
* @see [[org.apache.spark.sql.functions.aggregate]]
* @see org.apache.spark.sql.functions.aggregate
* @todo scaladoc link (issue #135)
*/
def aggregateWT[A, B](zero: DoricColumn[A])(
merge: (DoricColumn[A], DoricColumn[T]) => DoricColumn[A],
Expand All @@ -160,7 +164,8 @@ private[syntax] trait ArrayColumns {
* type of the transformed values.
* @return
* the column reference with the applied transformation.
* @see [[org.apache.spark.sql.functions.aggregate]]
* @see org.apache.spark.sql.functions.aggregate
* @todo scaladoc link (issue #135)
*/
def aggregate[A](
zero: DoricColumn[A]
Expand All @@ -179,7 +184,8 @@ private[syntax] trait ArrayColumns {
* the condition to filter.
* @return
* the column reference with the filter applied.
* @see [[org.apache.spark.sql.functions.filter]]
* @see org.apache.spark.sql.functions.filter
* @todo scaladoc link (issue #135)
*/
def filter(p: DoricColumn[T] => BooleanColumn): DoricColumn[F[T]] =
(col.elem, p(x).elem)
Expand All @@ -196,7 +202,8 @@ private[syntax] trait ArrayColumns {
* (col, index) => predicate, the Boolean predicate to filter the input column
* given the index. Indices start at 0.
* @group Array Type
* @see [[org.apache.spark.sql.functions.filter]]
* @see org.apache.spark.sql.functions.filter
* @todo scaladoc link (issue #135)
*/
def filterWIndex(
function: (DoricColumn[T], IntegerColumn) => BooleanColumn
Expand Down Expand Up @@ -252,7 +259,8 @@ private[syntax] trait ArrayColumns {
* `nullReplacement`.
*
* @group Array Type
* @see [[org.apache.spark.sql.functions.array_join]]
* @see org.apache.spark.sql.functions.array_join
* @todo scaladoc link (issue #135)
*/
def join(
delimiter: StringColumn,
Expand All @@ -268,7 +276,8 @@ private[syntax] trait ArrayColumns {
* Concatenates the elements of `column` using the `delimiter`. Nulls are deleted
*
* @group Array Type
* @see [[org.apache.spark.sql.functions.array_join]]
* @see org.apache.spark.sql.functions.array_join
* @todo scaladoc link (issue #135)
*/
def join(delimiter: StringColumn): StringColumn =
(col.elem, delimiter.elem)
Expand Down
2 changes: 1 addition & 1 deletion core/src/main/scala/doric/syntax/CommonColumns.scala
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ private[syntax] trait CommonColumns extends ColGetters[NamedDoricColumn] {
* @group All Types
* @see [[org.apache.spark.sql.functions.array_repeat(left* org.apache.spark.sql.functions.array_repeat]]
*/
def repeatArray[B](times: IntegerColumn): ArrayColumn[T] =
def repeatArray(times: IntegerColumn): ArrayColumn[T] =
(column.elem, times.elem)
.mapN((c1, c2) => {
new Column(ArrayRepeat(c1.expr, c2.expr))
Expand Down
4 changes: 2 additions & 2 deletions core/src/test/scala/doric/TypedColumnTest.scala
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ trait TypedColumnTest extends Matchers with DatasetComparer {
): Unit = {
import Equalities._

val eqCond: BooleanColumn = SparkType[T].dataType.typeName match {
case "map" =>
val eqCond: BooleanColumn = SparkType[T].dataType match {
case _: MapType =>
val compare: (Column => Column) => BooleanColumn = sparkFun =>
{
sparkFun(f.col(doricCol.value)) === sparkFun(f.col(sparkCol.value))
Expand Down

0 comments on commit 3d15993

Please sign in to comment.