Skip to content

Commit

Permalink
Addressing review comments
Browse files Browse the repository at this point in the history
  • Loading branch information
MaxGekk committed Jul 13, 2018
1 parent 07a4ac0 commit 7533114
Show file tree
Hide file tree
Showing 5 changed files with 24 additions and 2 deletions.
2 changes: 2 additions & 0 deletions R/pkg/NAMESPACE
Original file line number Diff line number Diff line change
Expand Up @@ -435,6 +435,8 @@ export("as.DataFrame",
"spark.addFile",
"spark.getSparkFilesRootDirectory",
"spark.getSparkFiles",
"spark.numCores",
"spark.numExecutors",
"sql",
"str",
"tableToDF",
Expand Down
4 changes: 2 additions & 2 deletions R/pkg/R/context.R
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ setCheckpointDir <- function(directory) {
#' @note spark.numCores since 2.4.0
spark.numCores <- function() {
sc <- getSparkContext()
invisible(callJMethod(sc, "numCores"))
callJMethod(sc, "numCores")
}

#' Total number of executors registered in the cluster at the moment.
Expand All @@ -461,5 +461,5 @@ spark.numCores <- function() {
#' @note spark.numExecutors since 2.4.0
spark.numExecutors <- function() {
sc <- getSparkContext()
invisible(callJMethod(sc, "numExecutors"))
callJMethod(sc, "numExecutors")
}
6 changes: 6 additions & 0 deletions core/src/main/scala/org/apache/spark/SparkContext.scala
Original file line number Diff line number Diff line change
Expand Up @@ -2340,12 +2340,18 @@ class SparkContext(config: SparkConf) extends Logging {
/**
* Total number of CPU cores of all executors registered in the cluster at the moment.
* The number reflects current status of the cluster and can change in the future.
*
* @note This method is experimental, and its behavior can be changed in the next releases.
* @since 2.4.0
*/
def numCores: Int = taskScheduler.numCores

/**
* Total number of executors registered in the cluster at the moment.
* The number reflects current status of the cluster and can change in the future.
*
* @note This method is experimental, and its behavior can be changed in the next releases.
* @since 2.4.0
*/
def numExecutors: Int = taskScheduler.numExecutors

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,12 +131,18 @@ class JavaSparkContext(val sc: SparkContext)
/**
* Total number of CPU cores of all executors registered in the cluster at the moment.
* The number reflects current status of the cluster and can change in the future.
*
* @note This method is experimental, and its behavior can be changed in the next releases.
* @since 2.4.0
*/
def numCores: java.lang.Integer = sc.numCores

/**
* Total number of executors registered in the cluster at the moment.
* The number reflects current status of the cluster and can change in the future.
*
* @note This method is experimental, and its behavior can be changed in the next releases.
* @since 2.4.0
*/
def numExecutors: java.lang.Integer = sc.numExecutors

Expand Down
8 changes: 8 additions & 0 deletions python/pyspark/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,16 +409,24 @@ def defaultMinPartitions(self):
@property
def numCores(self):
"""
.. note:: Experimental
Total number of CPU cores of all executors registered in the cluster at the moment.
The number reflects current status of the cluster and can change in the future.
.. versionadded:: 2.4.0
"""
return self._jsc.sc().numCores()

@property
def numExecutors(self):
"""
.. note:: Experimental
Total number of executors registered in the cluster at the moment.
The number reflects current status of the cluster and can change in the future.
.. versionadded:: 2.4.0
"""
return self._jsc.sc().numExecutors()

Expand Down

0 comments on commit 7533114

Please sign in to comment.