-
Notifications
You must be signed in to change notification settings - Fork 28.1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[SPARK-19724][SQL]create a managed table with an existed default table should throw an exception #17272
[SPARK-19724][SQL]create a managed table with an existed default table should throw an exception #17272
Changes from all commits
a0ba419
65d7ea9
0e753cc
c8d9b77
cd4a091
739f207
4351cd7
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -164,15 +164,6 @@ abstract class ExternalCatalogSuite extends SparkFunSuite with BeforeAndAfterEac | |
assert(actual.tableType === CatalogTableType.EXTERNAL) | ||
} | ||
|
||
test("create table when the table already exists") { | ||
val catalog = newBasicCatalog() | ||
assert(catalog.listTables("db2").toSet == Set("tbl1", "tbl2")) | ||
val table = newTable("tbl1", "db2") | ||
intercept[TableAlreadyExistsException] { | ||
catalog.createTable(table, ignoreIfExists = false) | ||
} | ||
} | ||
|
||
test("drop table") { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. since in |
||
val catalog = newBasicCatalog() | ||
assert(catalog.listTables("db2").toSet == Set("tbl1", "tbl2")) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,8 +19,6 @@ package org.apache.spark.sql.execution.command | |
|
||
import java.net.URI | ||
|
||
import org.apache.hadoop.fs.Path | ||
|
||
import org.apache.spark.sql._ | ||
import org.apache.spark.sql.catalyst.catalog._ | ||
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan | ||
|
@@ -155,6 +153,8 @@ case class CreateDataSourceTableAsSelectCommand( | |
} else { | ||
table.storage.locationUri | ||
} | ||
|
||
sparkSession.sessionState.catalog.checkTableOrPathExists(table, ignoreIfExists = false) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. because in following |
||
val result = saveDataIntoTable( | ||
sparkSession, table, tableLocation, query, SaveMode.Overwrite, tableExists = false) | ||
val newTable = table.copy( | ||
|
@@ -163,7 +163,9 @@ case class CreateDataSourceTableAsSelectCommand( | |
// the schema of df). It is important since the nullability may be changed by the relation | ||
// provider (for example, see org.apache.spark.sql.parquet.DefaultSource). | ||
schema = result.schema) | ||
sessionState.catalog.createTable(newTable, ignoreIfExists = false) | ||
// we have checked the table/path exists above before saveDataIntoTable, here we | ||
// set ignoreIfExists to true | ||
sessionState.catalog.createTable(newTable, ignoreIfExists = true) | ||
|
||
result match { | ||
case fs: HadoopFsRelation if table.partitionColumnNames.nonEmpty && | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
we have check if the table exists in SessionCatalog.createtable