-
Notifications
You must be signed in to change notification settings - Fork 28k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-24691][SQL]Dispatch the type support check in FileFormat implementation #21667
Changes from 8 commits
63818c1
e39cd02
e376ada
cf4147f
55df128
8740ac0
d4b4d13
757b82a
9ed3a7d
13de60e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.expressions._ | |
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection | ||
import org.apache.spark.sql.internal.SQLConf | ||
import org.apache.spark.sql.sources.Filter | ||
import org.apache.spark.sql.types.StructType | ||
import org.apache.spark.sql.types.{DataType, StructType} | ||
|
||
|
||
/** | ||
|
@@ -57,7 +57,7 @@ trait FileFormat { | |
dataSchema: StructType): OutputWriterFactory | ||
|
||
/** | ||
* Returns whether this format support returning columnar batch or not. | ||
* Returns whether this format supports returning columnar batch or not. | ||
* | ||
* TODO: we should just have different traits for the different formats. | ||
*/ | ||
|
@@ -152,6 +152,11 @@ trait FileFormat { | |
} | ||
} | ||
|
||
/** | ||
* Returns whether this format supports the given [[DataType]] in read/write path. | ||
* By default all data types are supported. | ||
*/ | ||
def supportDataType(dataType: DataType, isReadPath: Boolean): Boolean = true | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. who does not overwrite it? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. then why not remove this default implementation and create There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Then we also need to update There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. makes sense There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Technically this is an internal API but we better concern about the compatibility particularly here in practice when it's possible. I think I already see we are concerned about Avro, right? I still doubt if it's a good idea to expose this in this trait. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is the only way to allow avro to define its supported types. BTW the default true value here is good for compatibility: if a file source doesn't know this API, it doesn't need to implement it and the behavior is unchanged, which is, no check applied. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I meant to say it's not free to remove or change the signature later once we happen to add it. Do we plan to refactor or remove this There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. the entire FileFormat will be migrated to data source v2 in the future. The FileFormat will be still there for backward compatibility, and I don't think we will update it frequently. |
||
} | ||
|
||
/** | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -66,7 +66,6 @@ class CSVFileFormat extends TextBasedFileFormat with DataSourceRegister { | |
job: Job, | ||
options: Map[String, String], | ||
dataSchema: StructType): OutputWriterFactory = { | ||
DataSourceUtils.verifyWriteSchema(this, dataSchema) | ||
val conf = job.getConfiguration | ||
val csvOptions = new CSVOptions( | ||
options, | ||
|
@@ -98,7 +97,6 @@ class CSVFileFormat extends TextBasedFileFormat with DataSourceRegister { | |
filters: Seq[Filter], | ||
options: Map[String, String], | ||
hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = { | ||
DataSourceUtils.verifyReadSchema(this, dataSchema) | ||
val broadcastedHadoopConf = | ||
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf)) | ||
|
||
|
@@ -153,6 +151,15 @@ class CSVFileFormat extends TextBasedFileFormat with DataSourceRegister { | |
override def hashCode(): Int = getClass.hashCode() | ||
|
||
override def equals(other: Any): Boolean = other.isInstanceOf[CSVFileFormat] | ||
|
||
override def supportDataType(dataType: DataType, isReadPath: Boolean): Boolean = dataType match { | ||
case _: AtomicType => true | ||
|
||
case udt: UserDefinedType[_] => supportDataType(udt.sqlType, isReadPath) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
||
case _ => false | ||
} | ||
|
||
} | ||
|
||
private[csv] class CsvOutputWriter( | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -30,7 +30,7 @@ import org.apache.spark.sql.catalyst.json.{JacksonGenerator, JacksonParser, JSON | |
import org.apache.spark.sql.catalyst.util.CompressionCodecs | ||
import org.apache.spark.sql.execution.datasources._ | ||
import org.apache.spark.sql.sources._ | ||
import org.apache.spark.sql.types.{StringType, StructType} | ||
import org.apache.spark.sql.types._ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. If we employ the blacklist, I think it'd be better that you don't fold these imports. |
||
import org.apache.spark.util.SerializableConfiguration | ||
|
||
class JsonFileFormat extends TextBasedFileFormat with DataSourceRegister { | ||
|
@@ -65,8 +65,6 @@ class JsonFileFormat extends TextBasedFileFormat with DataSourceRegister { | |
job: Job, | ||
options: Map[String, String], | ||
dataSchema: StructType): OutputWriterFactory = { | ||
DataSourceUtils.verifyWriteSchema(this, dataSchema) | ||
|
||
val conf = job.getConfiguration | ||
val parsedOptions = new JSONOptions( | ||
options, | ||
|
@@ -98,8 +96,6 @@ class JsonFileFormat extends TextBasedFileFormat with DataSourceRegister { | |
filters: Seq[Filter], | ||
options: Map[String, String], | ||
hadoopConf: Configuration): PartitionedFile => Iterator[InternalRow] = { | ||
DataSourceUtils.verifyReadSchema(this, dataSchema) | ||
|
||
val broadcastedHadoopConf = | ||
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf)) | ||
|
||
|
@@ -148,6 +144,23 @@ class JsonFileFormat extends TextBasedFileFormat with DataSourceRegister { | |
override def hashCode(): Int = getClass.hashCode() | ||
|
||
override def equals(other: Any): Boolean = other.isInstanceOf[JsonFileFormat] | ||
|
||
override def supportDataType(dataType: DataType, isReadPath: Boolean): Boolean = dataType match { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yea, supported types are very specific to datasource's implementation. |
||
case _: AtomicType => true | ||
|
||
case st: StructType => st.forall { f => supportDataType(f.dataType, isReadPath) } | ||
|
||
case ArrayType(elementType, _) => supportDataType(elementType, isReadPath) | ||
|
||
case MapType(keyType, valueType, _) => | ||
supportDataType(keyType, isReadPath) && supportDataType(valueType, isReadPath) | ||
|
||
case udt: UserDefinedType[_] => supportDataType(udt.sqlType, isReadPath) | ||
|
||
case _: NullType => true | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why JSON supports null type but CSV doesn't? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. currently null type is not handled in UnivocityParser |
||
|
||
case _ => false | ||
} | ||
} | ||
|
||
private[json] class JsonOutputWriter( | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
FileFormat
is internal so it's nothing about public API, but just about design choice.Generally it's ok to have a central place to put some business logic for different cases. However, here we can't access all
FileFormat
implementations, Hive ORC is in Hive module. Now the only choice is: dispatch the business logic into implementations.So +1 on the approach taken by this PR.