Skip to content

Commit

Permalink
small
Browse files Browse the repository at this point in the history
  • Loading branch information
Feng Liu committed Jan 29, 2018
1 parent 2d903cf commit 41829e4
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 4 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, hadoopConf: Configurat
* should interpret these special data source properties and restore the original table metadata
* before returning it.
*/
private[hive] def getRawTable(db: String, table: String): CatalogTable = withClient {
private[hive] def getRawTable(db: String, table: String): CatalogTable = {
client.getTable(db, table)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -346,15 +346,19 @@ private[hive] class HiveClientImpl(
client.getDatabasesByPattern(pattern).asScala
}

private def getRawTableOption(dbName: String, tableName: String): Option[HiveTable] = {
Option(client.getTable(dbName, tableName, false /* do not throw exception */))
}

override def tableExists(dbName: String, tableName: String): Boolean = withHiveState {
Option(client.getTable(dbName, tableName, false /* do not throw exception */)).nonEmpty
getRawTableOption(dbName, tableName).nonEmpty
}

override def getTableOption(
dbName: String,
tableName: String): Option[CatalogTable] = withHiveState {
logDebug(s"Looking up $dbName.$tableName")
Option(client.getTable(dbName, tableName, false)).map { h =>
getRawTableOption(dbName, tableName).map { h =>
// Note: Hive separates partition columns and the schema, but for us the
// partition columns are part of the schema
val cols = h.getCols.asScala.map(fromHiveColumn)
Expand Down Expand Up @@ -817,7 +821,6 @@ private[hive] class HiveClientImpl(
uri.toURL
}
clientLoader.addJar(jarURL)
runSqlHive(s"ADD JAR $path")
}

def newSession(): HiveClientImpl = {
Expand Down

0 comments on commit 41829e4

Please sign in to comment.