Skip to content

Commit

Permalink
[SPARK-2024] renaming pickled -> pickledRDD
Browse files Browse the repository at this point in the history
  • Loading branch information
kanzhang committed Jul 30, 2014
1 parent d998ad6 commit 6591e37
Showing 1 changed file with 15 additions and 15 deletions.
30 changes: 15 additions & 15 deletions python/pyspark/rdd.py
Expand Up @@ -1049,9 +1049,9 @@ def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None
@param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickled = self._toPickleSerialization()
batched = isinstance(pickled._jrdd_deserializer, BatchedSerializer)
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickled._jrdd, batched, jconf,
pickledRDD = self._toPickleSerialization()
batched = isinstance(pickledRDD._jrdd_deserializer, BatchedSerializer)
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, batched, jconf,
keyConverter, valueConverter, True)

def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
Expand All @@ -1076,9 +1076,9 @@ def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueCl
@param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickled = self._toPickleSerialization()
batched = isinstance(pickled._jrdd_deserializer, BatchedSerializer)
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickled._jrdd, batched, path,
pickledRDD = self._toPickleSerialization()
batched = isinstance(pickledRDD._jrdd_deserializer, BatchedSerializer)
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, batched, path,
outputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf)

def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
Expand All @@ -1093,9 +1093,9 @@ def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
@param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickled = self._toPickleSerialization()
batched = isinstance(pickled._jrdd_deserializer, BatchedSerializer)
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickled._jrdd, batched, jconf,
pickledRDD = self._toPickleSerialization()
batched = isinstance(pickledRDD._jrdd_deserializer, BatchedSerializer)
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, batched, jconf,
keyConverter, valueConverter, False)

def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
Expand All @@ -1121,9 +1121,9 @@ def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=No
@param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickled = self._toPickleSerialization()
batched = isinstance(pickled._jrdd_deserializer, BatchedSerializer)
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickled._jrdd, batched,
pickledRDD = self._toPickleSerialization()
batched = isinstance(pickledRDD._jrdd_deserializer, BatchedSerializer)
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, batched,
path, outputFormatClass, keyClass, valueClass, keyConverter, valueConverter,
jconf, compressionCodecClass)

Expand All @@ -1138,9 +1138,9 @@ def saveAsSequenceFile(self, path, compressionCodecClass=None):
@param path: path to sequence file
@param compressionCodecClass: (None by default)
"""
pickled = self._toPickleSerialization()
batched = isinstance(pickled._jrdd_deserializer, BatchedSerializer)
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickled._jrdd, batched,
pickledRDD = self._toPickleSerialization()
batched = isinstance(pickledRDD._jrdd_deserializer, BatchedSerializer)
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, batched,
path, compressionCodecClass)

def saveAsPickleFile(self, path, batchSize=10):
Expand Down

0 comments on commit 6591e37

Please sign in to comment.