Skip to content

Commit

Permalink
[SPARK-4860][pyspark][sql] using Scala implementations of sample()
Browse files Browse the repository at this point in the history
…and `takeSample()`
  • Loading branch information
jbencook committed Dec 22, 2014
1 parent 6ee6aa7 commit 020cbdf
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 0 deletions.
29 changes: 29 additions & 0 deletions python/pyspark/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -2085,6 +2085,35 @@ def subtract(self, other, numPartitions=None):
else:
raise ValueError("Can only subtract another SchemaRDD")

def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this SchemaRDD.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.sample(False, 0.5, 97).count()
2L
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
seed = seed if seed is not None else random.randint(0, sys.maxint)
rdd = self._jschema_rdd.baseSchemaRDD().sample(
withReplacement, fraction, long(seed))
return SchemaRDD(rdd.toJavaSchemaRDD(), self.sql_ctx)

def takeSample(self, withReplacement, num, seed=None):
"""Return a fixed-size sampled subset of this SchemaRDD.
>>> srdd = sqlCtx.inferSchema(rdd)
>>> srdd.takeSample(False, 2, 97)
[Row(field1=3, field2=u'row3'), Row(field1=1, field2=u'row1')]
"""
seed = seed if seed is not None else random.randint(0, sys.maxint)
with SCCallSiteSync(self.context) as css:
bytesInJava = self._jschema_rdd.baseSchemaRDD() \
.takeSampleToPython(withReplacement, num, long(seed)) \
.iterator()
cls = _create_cls(self.schema())
return map(cls, self._collect_iterator_through_file(bytesInJava))


def _test():
import doctest
Expand Down
14 changes: 14 additions & 0 deletions sql/core/src/main/scala/org/apache/spark/sql/SchemaRDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -437,6 +437,20 @@ class SchemaRDD(
}.grouped(100).map(batched => pickle.dumps(batched.toArray)).toIterable)
}

/**
* Serializes the Array[Row] returned by SchemaRDD's takeSample(), using the same
* format as javaToPython and collectToPython. It is used by pyspark.
*/
private[sql] def takeSampleToPython(withReplacement: Boolean,
num: Int,
seed: Long): JList[Array[Byte]] = {
val fieldTypes = schema.fields.map(_.dataType)
val pickle = new Pickler
new java.util.ArrayList(this.takeSample(withReplacement, num, seed).map { row =>
EvaluatePython.rowToArray(row, fieldTypes)
}.grouped(100).map(batched => pickle.dumps(batched.toArray)).toIterable)
}

/**
* Creates SchemaRDD by applying own schema to derived RDD. Typically used to wrap return value
* of base RDD functions that do not change schema.
Expand Down

0 comments on commit 020cbdf

Please sign in to comment.