diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala index b61324ff98ae5..4388f6e12671a 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala @@ -1177,7 +1177,7 @@ object SQLConf { .createWithDefault(4 * 1024 * 1024) val FILES_MIN_PARTITION_NUM = buildConf("spark.sql.files.minPartitionNum") - .doc("The suggested (not guaranteed) minimum number of splitting file partitions. " + + .doc("The suggested (not guaranteed) minimum number of split file partitions. " + "If not set, the default value is `spark.default.parallelism`. This configuration is " + "effective only when using file-based sources such as Parquet, JSON and ORC.") .version("3.1.0") diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala index 5a8052e932b8e..8a6e6b5ee801d 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategySuite.scala @@ -550,14 +550,14 @@ class FileSourceStrategySuite extends QueryTest with SharedSparkSession with Pre } withSQLConf(SQLConf.FILES_MIN_PARTITION_NUM.key -> "16") { - val partitions = (1 to 100).map(i => s"file$i" -> 128*1024*1024) + val partitions = (1 to 100).map(i => s"file$i" -> 128 * 1024 * 1024) val table = createTable(files = partitions) - // partition is limit by filesMaxPartitionBytes(128MB) + // partition is limited by filesMaxPartitionBytes(128MB) assert(table.rdd.partitions.length == 100) } withSQLConf(SQLConf.FILES_MIN_PARTITION_NUM.key -> "32") { - val partitions = (1 to 800).map(i => s"file$i" -> 4*1024*1024) + val partitions = (1 to 800).map(i => s"file$i" -> 4 * 1024 * 1024) val table = createTable(files = partitions) assert(table.rdd.partitions.length == 50) }