Skip to content

Commit

Permalink
[SPARK-42821][SQL] Remove unused parameters in splitFiles methods
Browse files Browse the repository at this point in the history
### What changes were proposed in this pull request?
The pr aims to remove unused parameters in PartitionedFileUtil.splitFiles methods

### Why are the changes needed?
Make the code more concise.

### Does this PR introduce _any_ user-facing change?
No.

### How was this patch tested?
Pass GA.

Closes #40454 from panbingkun/minor_PartitionedFileUtil.

Authored-by: panbingkun <pbk1982@gmail.com>
Signed-off-by: Sean Owen <srowen@gmail.com>
  • Loading branch information
panbingkun authored and srowen committed Nov 8, 2023
1 parent 9d93b71 commit eabea64
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -687,7 +687,7 @@ case class FileSourceScanExec(
* @param selectedPartitions Hive-style partition that are part of the read.
*/
private def createReadRDD(
readFile: (PartitionedFile) => Iterator[InternalRow],
readFile: PartitionedFile => Iterator[InternalRow],
selectedPartitions: Array[PartitionDirectory]): RDD[InternalRow] = {
val openCostInBytes = relation.sparkSession.sessionState.conf.filesOpenCostInBytes
val maxSplitBytes =
Expand All @@ -711,7 +711,6 @@ case class FileSourceScanExec(
val isSplitable = relation.fileFormat.isSplitable(
relation.sparkSession, relation.options, file.getPath)
PartitionedFileUtil.splitFiles(
sparkSession = relation.sparkSession,
file = file,
isSplitable = isSplitable,
maxSplitBytes = maxSplitBytes,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,11 @@ package org.apache.spark.sql.execution
import org.apache.hadoop.fs.{BlockLocation, FileStatus, LocatedFileStatus}

import org.apache.spark.paths.SparkPath
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources._

object PartitionedFileUtil {
def splitFiles(
sparkSession: SparkSession,
file: FileStatusWithMetadata,
isSplitable: Boolean,
maxSplitBytes: Long,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,6 @@ trait FileScan extends Scan
}
partition.files.flatMap { file =>
PartitionedFileUtil.splitFiles(
sparkSession = sparkSession,
file = file,
isSplitable = isSplitable(file.getPath),
maxSplitBytes = maxSplitBytes,
Expand Down

0 comments on commit eabea64

Please sign in to comment.