Skip to content

Commit

Permalink
Merge pull request #15 from shivaram/sparkr-groupby-retrain
Browse files Browse the repository at this point in the history
Revert workaround in SparkR to retain grouped cols
  • Loading branch information
rxin committed May 8, 2015
2 parents b8b87e1 + c1de670 commit 5f923c0
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 14 deletions.
4 changes: 1 addition & 3 deletions R/pkg/R/group.R
Expand Up @@ -103,9 +103,7 @@ setMethod("agg",
}
}
jcols <- lapply(cols, function(c) { c@jc })
# the GroupedData.agg(col, cols*) API does not contain grouping Column
sdf <- callJStatic("org.apache.spark.sql.api.r.SQLUtils", "aggWithGrouping",
x@sgd, listToSeq(jcols))
sdf <- callJMethod(x@sgd, "agg", jcols[[1]], listToSeq(jcols[-1]))
} else {
stop("agg can only support Column or character")
}
Expand Down
11 changes: 0 additions & 11 deletions sql/core/src/main/scala/org/apache/spark/sql/api/r/SQLUtils.scala
Expand Up @@ -72,17 +72,6 @@ private[r] object SQLUtils {
sqlContext.createDataFrame(rowRDD, schema)
}

// A helper to include grouping columns in Agg()
def aggWithGrouping(gd: GroupedData, exprs: Column*): DataFrame = {
val aggExprs = exprs.map { col =>
col.expr match {
case expr: NamedExpression => expr
case expr: Expression => Alias(expr, expr.simpleString)()
}
}
gd.toDF(aggExprs)
}

def dfToRowRDD(df: DataFrame): JavaRDD[Array[Byte]] = {
df.map(r => rowToRBytes(r))
}
Expand Down

0 comments on commit 5f923c0

Please sign in to comment.