diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py index 1c1519b3f1651..45c5fd6b71f75 100644 --- a/python/pyspark/sql/readwriter.py +++ b/python/pyspark/sql/readwriter.py @@ -276,6 +276,7 @@ def save(self, path=None, format=None, mode=None, partitionBy=(), **options): * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` (default case): Throw an exception if data already exists. + :param partitionBy: names of partitioning columns :param options: all other string options >>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data')) @@ -316,6 +317,7 @@ def saveAsTable(self, name, format=None, mode=None, partitionBy=(), **options): :param name: the table name :param format: the format used to save :param mode: one of `append`, `overwrite`, `error`, `ignore` (default: error) + :param partitionBy: names of partitioning columns :param options: all other string options """ self.partitionBy(partitionBy).mode(mode).options(**options) @@ -350,7 +352,7 @@ def parquet(self, path, mode=None, partitionBy=()): * ``overwrite``: Overwrite existing data. * ``ignore``: Silently ignore this operation if data already exists. * ``error`` (default case): Throw an exception if data already exists. - + :param partitionBy: names of partitioning columns >>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data')) """ self.partitionBy(partitionBy).mode(mode)