Skip to content

Commit

Permalink
address comments
Browse files Browse the repository at this point in the history
  • Loading branch information
huaxingao committed Nov 19, 2018
1 parent 9e2b0f9 commit 2ebfe5a
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 6 deletions.
11 changes: 6 additions & 5 deletions R/pkg/R/mllib_clustering.R
Expand Up @@ -649,24 +649,25 @@ setMethod("write.ml", signature(object = "LDAModel", path = "character"),
#' @note spark.assignClusters(SparkDataFrame) since 3.0.0
setMethod("spark.assignClusters",
signature(data = "SparkDataFrame"),
function(data, k = 2L, initMode = "random", maxIter = 20L, srcCol = "src",
dstCol = "dst", weightCol = NULL) {
function(data, k = 2L, initMode = c("random", "degree"), maxIter = 20L,
sourceCol = "src", destinationCol = "dst", weightCol = NULL) {
if (!is.numeric(k) || k < 1) {
stop("k should be a number with value >= 1.")
}
if (!is.integer(maxIter) || maxIter <= 0) {
stop("maxIter should be a number with value > 0.")
}
initMode <- match.arg(initMode)
if (!is.null(weightCol) && weightCol == "") {
weightCol <- NULL
} else if (!is.null(weightCol)) {
weightCol <- as.character(weightCol)
}
jobj <- callJStatic("org.apache.spark.ml.r.PowerIterationClusteringWrapper",
"getPowerIterationClustering",
as.integer(k), as.character(initMode),
as.integer(maxIter), as.character(srcCol),
as.character(dstCol), weightCol)
as.integer(k), initMode,
as.integer(maxIter), as.character(sourceCol),
as.character(destinationCol), weightCol)
object <- new("PowerIterationClustering", jobj = jobj)
dataFrame(callJMethod(object@jobj, "assignClusters", data@sdf))
})
2 changes: 1 addition & 1 deletion R/pkg/vignettes/sparkr-vignettes.Rmd
Expand Up @@ -978,7 +978,7 @@ Power Iteration Clustering (PIC) is a scalable graph clustering algorithm. `spar
df <- createDataFrame(list(list(0L, 1L, 1.0), list(0L, 2L, 1.0),
list(1L, 2L, 1.0), list(3L, 4L, 1.0),
list(4L, 0L, 0.1)), schema = c("src", "dst", "weight"))
head(spark.assignClusters(df, initMode="degree", weightCol="weight"))
head(spark.assignClusters(df, initMode = "degree", weightCol = "weight"))
```

#### FP-growth
Expand Down

0 comments on commit 2ebfe5a

Please sign in to comment.