From 8890b60de7b94b97b9d87560cbb06faa8a838bf3 Mon Sep 17 00:00:00 2001 From: Felix Cheung Date: Tue, 9 May 2017 23:16:16 -0700 Subject: [PATCH 1/2] disable run bucketBy saveAsTable in pyspark doctest --- R/pkg/inst/tests/testthat/test_sparkSQL.R | 1 + python/pyspark/sql/readwriter.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R index 19aa61e9a56c3..72f186fea2a60 100644 --- a/R/pkg/inst/tests/testthat/test_sparkSQL.R +++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R @@ -678,6 +678,7 @@ test_that("jsonRDD() on a RDD with json string", { test_that("test tableNames and tables", { count <- count(listTables()) + expect_equal(count, 0) df <- read.json(jsonPath) createOrReplaceTempView(df, "table1") diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py index 90ce8f81eb7fd..61a6b76a79aed 100644 --- a/python/pyspark/sql/readwriter.py +++ b/python/pyspark/sql/readwriter.py @@ -575,7 +575,7 @@ def bucketBy(self, numBuckets, col, *cols): .. note:: Applicable for file-based data sources in combination with :py:meth:`DataFrameWriter.saveAsTable`. - >>> (df.write.format('parquet') + >>> (df.write.format('parquet') # doctest: +SKIP ... .bucketBy(100, 'year', 'month') ... .mode("overwrite") ... .saveAsTable('bucketed_table')) @@ -602,7 +602,7 @@ def sortBy(self, col, *cols): :param col: a name of a column, or a list of names. :param cols: additional names (optional). If `col` is a list it should be empty. - >>> (df.write.format('parquet') + >>> (df.write.format('parquet') # doctest: +SKIP ... .bucketBy(100, 'year', 'month') ... .sortBy('day') ... .mode("overwrite") From a5b73ae540a7e63d3398b92ae5c64592985e141d Mon Sep 17 00:00:00 2001 From: Felix Cheung Date: Wed, 10 May 2017 09:18:10 -0700 Subject: [PATCH 2/2] remove check --- R/pkg/inst/tests/testthat/test_sparkSQL.R | 1 - 1 file changed, 1 deletion(-) diff --git a/R/pkg/inst/tests/testthat/test_sparkSQL.R b/R/pkg/inst/tests/testthat/test_sparkSQL.R index 72f186fea2a60..19aa61e9a56c3 100644 --- a/R/pkg/inst/tests/testthat/test_sparkSQL.R +++ b/R/pkg/inst/tests/testthat/test_sparkSQL.R @@ -678,7 +678,6 @@ test_that("jsonRDD() on a RDD with json string", { test_that("test tableNames and tables", { count <- count(listTables()) - expect_equal(count, 0) df <- read.json(jsonPath) createOrReplaceTempView(df, "table1")