Skip to content

Commit

Permalink
Fix Python tests
Browse files Browse the repository at this point in the history
  • Loading branch information
rxin committed Jul 1, 2016
1 parent 7599382 commit fad616f
Showing 1 changed file with 4 additions and 7 deletions.
11 changes: 4 additions & 7 deletions python/pyspark/sql/streaming.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ def load(self, path=None, format=None, schema=None, **options):
>>> json_sdf = spark.readStream.format("json")\
.schema(sdf_schema)\
.load(os.path.join(tempfile.mkdtemp(),'data'))
.load(tempfile.mkdtemp())
>>> json_sdf.isStreaming
True
>>> json_sdf.schema == sdf_schema
Expand Down Expand Up @@ -382,8 +382,7 @@ def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
>>> json_sdf = spark.readStream.json(os.path.join(tempfile.mkdtemp(), 'data'), \
schema = sdf_schema)
>>> json_sdf = spark.readStream.json(tempfile.mkdtemp()), schema = sdf_schema)
>>> json_sdf.isStreaming
True
>>> json_sdf.schema == sdf_schema
Expand Down Expand Up @@ -411,8 +410,7 @@ def parquet(self, path):
.. note:: Experimental.
>>> parquet_sdf = spark.readStream.schema(sdf_schema)\
.parquet(os.path.join(tempfile.mkdtemp()))
>>> parquet_sdf = spark.readStream.schema(sdf_schema).parquet(tempfile.mkdtemp())
>>> parquet_sdf.isStreaming
True
>>> parquet_sdf.schema == sdf_schema
Expand Down Expand Up @@ -512,8 +510,7 @@ def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=Non
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
>>> csv_sdf = spark.readStream.csv(os.path.join(tempfile.mkdtemp(), 'data'), \
schema = sdf_schema)
>>> csv_sdf = spark.readStream.csv(tempfile.mkdtemp()), schema = sdf_schema)
>>> csv_sdf.isStreaming
True
>>> csv_sdf.schema == sdf_schema
Expand Down

0 comments on commit fad616f

Please sign in to comment.