Skip to content

Commit

Permalink
doc: links for some functions
Browse files Browse the repository at this point in the history
Some functions scaladoc issue --> hablapps#135
  • Loading branch information
eruizalo committed Jan 10, 2022
1 parent 9b57a58 commit 7d59780
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 45 deletions.
4 changes: 2 additions & 2 deletions core/src/main/scala/doric/syntax/BooleanColumns.scala
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ private[syntax] trait BooleanColumns {
*
* @throws java.lang.RuntimeException if the condition is false
* @group Boolean Type
* @see [[org.apache.spark.sql.functions.assert_true]]
* @see [[org.apache.spark.sql.functions.assert_true(c:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.assert_true]]
*/
def assertTrue: NullColumn = column.elem.map(f.assert_true).toDC

Expand All @@ -76,7 +76,7 @@ private[syntax] trait BooleanColumns {
*
* @throws java.lang.RuntimeException if the condition is false
* @group Boolean Type
* @see [[org.apache.spark.sql.functions.assert_true]]
* @see [[org.apache.spark.sql.functions.assert_true(c:org\.apache\.spark\.sql\.Column,e:* org.apache.spark.sql.functions.assert_true]]
*/
def assertTrue(msg: StringColumn): NullColumn =
(column.elem, msg.elem).mapN(f.assert_true).toDC
Expand Down
16 changes: 8 additions & 8 deletions core/src/main/scala/doric/syntax/DateColumns.scala
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ private[syntax] trait DateColumns {
* Date column after adding months
* @note
* Timestamp columns will be truncated to Date column
* @see [[org.apache.spark.sql.functions.add_months]]
* @see [[org.apache.spark.sql.functions.add_months(startDate:org\.apache\.spark\.sql\.Column,numMonths:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.add_months]]
*/
def addMonths(nMonths: IntegerColumn): DateColumn =
(column.elem, nMonths.elem).mapN(f.add_months).toDC
Expand All @@ -46,7 +46,7 @@ private[syntax] trait DateColumns {
* @note
* Timestamp columns will be truncated to Date column
* @group Date & Timestamp Type
* @see [[org.apache.spark.sql.functions.date_add]]
* @see [[org.apache.spark.sql.functions.date_add(start:org\.apache\.spark\.sql\.Column,days:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.date_add]]
*/
def addDays(days: IntegerColumn): DateColumn =
(column.elem, days.elem).mapN(f.date_add).toDC
Expand Down Expand Up @@ -79,7 +79,7 @@ private[syntax] trait DateColumns {
* @note
* Timestamp columns will be truncated to Date column
* @group Date & Timestamp Type
* @see [[org.apache.spark.sql.functions.date_sub]]
* @see [[org.apache.spark.sql.functions.date_sub(start:org\.apache\.spark\.sql\.Column,days:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.date_sub]]
*/
def subDays(days: IntegerColumn): DateColumn =
(column.elem, days.elem).mapN(f.date_sub).toDC
Expand Down Expand Up @@ -164,7 +164,7 @@ private[syntax] trait DateColumns {
* @param dateCol
* Date or Timestamp column
* @group Date & Timestamp Type
* @see [[org.apache.spark.sql.functions.months_between]]
* @see [[org.apache.spark.sql.functions.months_between(end:org\.apache\.spark\.sql\.Column,start:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.months_between]]
*/
def monthsBetween(dateCol: DoricColumn[T]): DoubleColumn =
(column.elem, dateCol.elem).mapN(f.months_between).toDC
Expand All @@ -178,7 +178,7 @@ private[syntax] trait DateColumns {
* If `roundOff` is set to true, the result is rounded off to 8 digits;
* it is not rounded otherwise.
* @group Date & Timestamp Type
* @see [[org.apache.spark.sql.functions.months_between]]
* @see [[org.apache.spark.sql.functions.months_between(end:org\.apache\.spark\.sql\.Column,start:org\.apache\.spark\.sql\.Column,roundOff:* org.apache.spark.sql.functions.months_between]]
*/
def monthsBetween(
dateCol: DoricColumn[T],
Expand Down Expand Up @@ -260,7 +260,7 @@ private[syntax] trait DateColumns {
* A long
*
* @group Date & Timestamp Type
* @see [[org.apache.spark.sql.functions.unix_timestamp]]
* @see [[org.apache.spark.sql.functions.unix_timestamp(s:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.unix_timestamp]]
*/
def unixTimestamp: LongColumn = column.elem.map(f.unix_timestamp).toDC

Expand All @@ -287,15 +287,15 @@ private[syntax] trait DateColumns {
* Transform date to timestamp
*
* @group Date Type
* @see [[org.apache.spark.sql.functions.to_timestamp]]
* @see [[org.apache.spark.sql.functions.to_timestamp(s:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.to_timestamp]]
*/
def toTimestamp: TimestampColumn = column.elem.map(f.to_timestamp).toDC

/**
* Transform date to Instant
*
* @group Date Type
* @see [[org.apache.spark.sql.functions.to_timestamp]]
* @see [[org.apache.spark.sql.functions.to_timestamp(s:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.to_timestamp]]
*/
def toInstant: InstantColumn = column.elem.map(f.to_timestamp).toDC
}
Expand Down
48 changes: 19 additions & 29 deletions core/src/main/scala/doric/syntax/StringColumns.scala
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,8 @@ private[syntax] trait StringColumns {
* @note
* The position is not zero based, but 1 based index. returns 0 if substr
* could not be found in str.
* @see [[org.apache.spark.sql.functions.locate]]
* @see org.apache.spark.sql.functions.locate
* @todo scaladoc link (issue #135)
*/
def locate(
substr: StringColumn,
Expand Down Expand Up @@ -209,7 +210,7 @@ private[syntax] trait StringColumns {
* Trim the spaces from left end for the specified string value.
*
* @group String Type
* @see [[org.apache.spark.sql.functions.ltrim]]
* @see [[org.apache.spark.sql.functions.ltrim(e:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.ltrim]]
*/
def ltrim: StringColumn = s.elem.map(f.ltrim).toDC

Expand All @@ -218,7 +219,7 @@ private[syntax] trait StringColumns {
* string column.
*
* @group String Type
* @see [[org.apache.spark.sql.functions.ltrim]]
* @see [[org.apache.spark.sql.functions.ltrim(e:org\.apache\.spark\.sql\.Column,trimString:* org.apache.spark.sql.functions.ltrim]]
*/
def ltrim(trimString: StringColumn): StringColumn =
(s.elem, trimString.elem)
Expand All @@ -232,7 +233,7 @@ private[syntax] trait StringColumns {
* byte position `pos` of `src` and proceeding for `len` bytes.
*
* @group String Type
* @see [[org.apache.spark.sql.functions.overlay]]
* @see [[org.apache.spark.sql.functions.overlay(src:org\.apache\.spark\.sql\.Column,replace:org\.apache\.spark\.sql\.Column,pos:org\.apache\.spark\.sql\.Column,len:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.overlay]]
*/
def overlay(
replace: StringColumn,
Expand Down Expand Up @@ -267,7 +268,7 @@ private[syntax] trait StringColumns {
* with replacement.
*
* @group String Type
* @see [[org.apache.spark.sql.functions.regexp_replace]]
* @see [[org.apache.spark.sql.functions.regexp_replace(e:org\.apache\.spark\.sql\.Column,pattern:org\.apache\.spark\.sql\.Column,* org.apache.spark.sql.functions.regexp_replace]]
*/
def regexpReplace(
pattern: StringColumn,
Expand Down Expand Up @@ -302,7 +303,7 @@ private[syntax] trait StringColumns {
* Trim the spaces from right end for the specified string value.
*
* @group String Type
* @see [[org.apache.spark.sql.functions.rtrim]]
* @see [[org.apache.spark.sql.functions.rtrim(e:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.rtrim]]
*/
def rtrim: StringColumn = s.elem.map(f.rtrim).toDC

Expand All @@ -311,7 +312,7 @@ private[syntax] trait StringColumns {
* string column.
*
* @group String Type
* @see [[org.apache.spark.sql.functions.rtrim]]
* @see [[org.apache.spark.sql.functions.rtrim(e:org\.apache\.spark\.sql\.Column,trimString:* org.apache.spark.sql.functions.rtrim]]
*/
def rtrim(trimString: StringColumn): StringColumn =
(s.elem, trimString.elem)
Expand All @@ -326,19 +327,6 @@ private[syntax] trait StringColumns {
*/
def soundex: StringColumn = s.elem.map(f.soundex).toDC

/**
* Splits str around matches of the given pattern.
*
* @param pattern
* a string representing a regular expression. The regex string should be
* a Java regular expression.
*
* @group String Type
* @see [[org.apache.spark.sql.functions.split]]
*/
def split(pattern: StringColumn): ArrayColumn[String] =
split(pattern, (-1).lit)

/**
* Splits str around matches of the given pattern.
*
Expand All @@ -353,10 +341,12 @@ private[syntax] trait StringColumns {
* contain all input beyond the last matched regex.
* - __limit less than or equal to 0__: `regex` will be applied as many times as possible,
* and the resulting array can be of any size.
* @see org.apache.spark.sql.functions.split
* @todo scaladoc link (issue #135)
*/
def split(
pattern: StringColumn,
limit: IntegerColumn
limit: IntegerColumn = (-1).lit
): ArrayColumn[String] =
(s.elem, pattern.elem, limit.elem)
.mapN((str, p, l) => new Column(StringSplit(str.expr, p.expr, l.expr)))
Expand Down Expand Up @@ -421,7 +411,7 @@ private[syntax] trait StringColumns {
* Trim the spaces from both ends for the specified string column.
*
* @group String Type
* @see [[org.apache.spark.sql.functions.trim]]
* @see [[org.apache.spark.sql.functions.trim(e:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.trim]]
*/
def trim: StringColumn = s.elem.map(f.trim).toDC

Expand All @@ -430,7 +420,7 @@ private[syntax] trait StringColumns {
* column (literal).
*
* @group String Type
* @see [[org.apache.spark.sql.functions.trim]]
* @see [[org.apache.spark.sql.functions.trim(e:org\.apache\.spark\.sql\.Column,trimString:* org.apache.spark.sql.functions.trim]]
*/
def trim(trimString: StringColumn): StringColumn =
(s.elem, trimString.elem)
Expand Down Expand Up @@ -467,7 +457,7 @@ private[syntax] trait StringColumns {
* String ends with. Returns a boolean column based on a string match.
*
* @group String Type
* @see [[org.apache.spark.sql.Column.endsWith]]
* @see [[org.apache.spark.sql.Column.endsWith(other:* org.apache.spark.sql.Column.endsWith]]
*/
def endsWith(dc: StringColumn): BooleanColumn =
(s.elem, dc.elem).mapN(_.endsWith(_)).toDC
Expand Down Expand Up @@ -499,7 +489,7 @@ private[syntax] trait StringColumns {
* String starts with. Returns a boolean column based on a string match.
*
* @group String Type
* @see [[org.apache.spark.sql.Column.startsWith]]
* @see [[org.apache.spark.sql.Column.startsWith(other:* org.apache.spark.sql.Column.startsWith]]
*/
def startsWith(dc: StringColumn): BooleanColumn =
(s.elem, dc.elem).mapN(_.startsWith(_)).toDC
Expand Down Expand Up @@ -547,7 +537,7 @@ private[syntax] trait StringColumns {
* A long
*
* @group String Type
* @see [[org.apache.spark.sql.functions.unix_timestamp]]
* @see [[org.apache.spark.sql.functions.unix_timestamp(s:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.unix_timestamp]]
*/
def unixTimestamp: LongColumn = s.elem.map(f.unix_timestamp).toDC

Expand All @@ -559,7 +549,7 @@ private[syntax] trait StringColumns {
* @throws java.lang.IllegalArgumentException if invalid pattern
*
* @group String Type
* @see [[org.apache.spark.sql.functions.unix_timestamp]]
* @see [[org.apache.spark.sql.functions.unix_timestamp(s:org\.apache\.spark\.sql\.Column,p:* org.apache.spark.sql.functions.unix_timestamp]]
*/
def unixTimestamp(pattern: StringColumn): LongColumn =
(s.elem, pattern.elem)
Expand Down Expand Up @@ -595,7 +585,7 @@ private[syntax] trait StringColumns {
* @return
* A date, or null if `e` was a string that could not be cast to a date
* or `format` was an invalid format
* @see [[org.apache.spark.sql.functions.to_date]]
* @see [[org.apache.spark.sql.functions.to_date(e:org\.apache\.spark\.sql\.Column,fmt:* org.apache.spark.sql.functions.to_date]]
*/
def toDate(format: StringColumn): LocalDateColumn =
(s.elem, format.elem)
Expand All @@ -617,7 +607,7 @@ private[syntax] trait StringColumns {
* @return
* A timestamp, or null if `s` was a string that could not be cast to a
* timestamp or `format` was an invalid format
* @see [[org.apache.spark.sql.functions.to_timestamp]]
* @see [[org.apache.spark.sql.functions.to_timestamp(s:org\.apache\.spark\.sql\.Column,fmt:* org.apache.spark.sql.functions.to_timestamp]]
*/
def toTimestamp(format: StringColumn): InstantColumn =
(s.elem, format.elem)
Expand Down
14 changes: 8 additions & 6 deletions core/src/main/scala/doric/syntax/TimestampColumns.scala
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,8 @@ private[syntax] trait TimestampColumns {
*
* @throws java.time.DateTimeException if invalid timeZone
* @group Timestamp Type
* @see [[org.apache.spark.sql.functions.from_utc_timestamp]]
* @see org.apache.spark.sql.functions.from_utc_timestamp
* @todo scaladoc link (issue #135)
*/
def fromUtc(timeZone: StringColumn): TimestampColumn =
(column.elem, timeZone.elem)
Expand All @@ -49,7 +50,8 @@ private[syntax] trait TimestampColumns {
*
* @throws java.time.DateTimeException if invalid timeZone
* @group Timestamp Type
* @see [[org.apache.spark.sql.functions.to_utc_timestamp]]
* @see org.apache.spark.sql.functions.to_utc_timestamp
* @todo scaladoc link (issue #135)
*/
def toUtc(timeZone: StringColumn): TimestampColumn =
(column.elem, timeZone.elem)
Expand All @@ -74,7 +76,7 @@ private[syntax] trait TimestampColumns {
* `1 second`. Check `org.apache.spark.unsafe.types.CalendarInterval` for
* valid duration identifiers.
* @group Timestamp Type
* @see [[org.apache.spark.sql.functions.window]]
* @see [[org.apache.spark.sql.functions.window(timeColumn:org\.apache\.spark\.sql\.Column,windowDuration:String):* org.apache.spark.sql.functions.window]]
*/
def window(windowDuration: String): RowColumn =
column.elem.map(x => f.window(x, windowDuration)).toDC
Expand Down Expand Up @@ -102,7 +104,7 @@ private[syntax] trait TimestampColumns {
* start 15 minutes past the hour, e.g. 12:15-13:15, 13:15-14:15... provide
* `startTime` as `15 minutes`.
* @group Timestamp Type
* @see [[org.apache.spark.sql.functions.window]]
* @see [[org.apache.spark.sql.functions.window(timeColumn:org\.apache\.spark\.sql\.Column,windowDuration:String,slideDuration:String,startTime:* org.apache.spark.sql.functions.window]]
*/
def window(
windowDuration: String,
Expand All @@ -119,7 +121,7 @@ private[syntax] trait TimestampColumns {
* @group Timestamp Type
* @return
* a Date Column without the hour
* @see [[org.apache.spark.sql.functions.to_date]]
* @see [[org.apache.spark.sql.functions.to_date(e:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.to_date]]
*/
def toDate: DateColumn = column.elem.map(f.to_date).toDC

Expand All @@ -129,7 +131,7 @@ private[syntax] trait TimestampColumns {
* @group Timestamp Type
* @return
* a LocalDate Column without the hour
* @see [[org.apache.spark.sql.functions.to_date]]
* @see [[org.apache.spark.sql.functions.to_date(e:org\.apache\.spark\.sql\.Column):* org.apache.spark.sql.functions.to_date]]
*/
def toLocalDate: LocalDateColumn = column.elem.map(f.to_date).toDC
}
Expand Down

0 comments on commit 7d59780

Please sign in to comment.