Skip to content

Commit

Permalink
Migrate deltalake/databricks tests to native filesystem
Browse files Browse the repository at this point in the history
  • Loading branch information
anusudarsan committed Mar 25, 2024
1 parent 3157d50 commit 0a6825e
Show file tree
Hide file tree
Showing 5 changed files with 12 additions and 9 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@ hive.metastore.uri=https://${ENV:DATABRICKS_HOST}:443/api/2.0/unity-hms-proxy/me
hive.metastore.http.client.bearer-token=${ENV:DATABRICKS_TOKEN}
hive.metastore.http.client.additional-headers=X-Databricks-Catalog-Name:${ENV:DATABRICKS_UNITY_CATALOG_NAME}
hive.metastore.http.client.authentication.type=BEARER
fs.hadoop.enabled=false
fs.native-s3.enabled=true
# We need to give access to bucket owner (the AWS account integrated with Databricks), otherwise files won't be readable from Databricks
hive.s3.upload-acl-type=BUCKET_OWNER_FULL_CONTROL
s3.canned-acl=BUCKET_OWNER_FULL_CONTROL
delta.enable-non-concurrent-writes=true
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,7 @@ hive.metastore.uri=https://${ENV:DATABRICKS_HOST}:443/api/2.0/unity-hms-proxy/me
hive.metastore.http.client.bearer-token=${ENV:DATABRICKS_TOKEN}
hive.metastore.http.client.additional-headers=X-Databricks-Catalog-Name:${ENV:DATABRICKS_UNITY_CATALOG_NAME}
hive.metastore.http.client.authentication.type=BEARER
fs.hadoop.enabled=false
fs.native-s3.enabled=true
# We need to give access to bucket owner (the AWS account integrated with Databricks), otherwise files won't be readable from Databricks
hive.s3.upload-acl-type=BUCKET_OWNER_FULL_CONTROL
s3.canned-acl=BUCKET_OWNER_FULL_CONTROL
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
connector.name=delta_lake
hive.metastore=glue
hive.metastore.glue.region=${ENV:AWS_REGION}
fs.hadoop.enabled=true
fs.hadoop.enabled=false
fs.native-s3.enabled=true
# We need to give access to bucket owner (the AWS account integrated with Databricks), otherwise files won't be readable from Databricks
hive.s3.upload-acl-type=BUCKET_OWNER_FULL_CONTROL
s3.canned-acl=BUCKET_OWNER_FULL_CONTROL
delta.enable-non-concurrent-writes=true
delta.hive-catalog-name=hive
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
connector.name=hive
hive.metastore=glue
hive.metastore.glue.region=${ENV:AWS_REGION}
fs.hadoop.enabled=true
fs.hadoop.enabled=false
fs.native-s3.enabled=true
# We need to give access to bucket owner (the AWS account integrated with Databricks), otherwise files won't be readable from Databricks
hive.s3.upload-acl-type=BUCKET_OWNER_FULL_CONTROL
s3.canned-acl=BUCKET_OWNER_FULL_CONTROL
hive.non-managed-table-writes-enabled=true
# Required by some product tests
hive.hive-views.enabled=true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,8 @@ public class TestDeltaLakeJmx
public void testJmxTablesExposedByDeltaLakeConnectorBackedByGlueMetastore()
{
assertThat(onTrino().executeQuery("SHOW TABLES IN jmx.current LIKE '%name=delta%'")).containsOnly(
row("io.trino.hdfs:name=delta,type=trinofilesystemcachestats"),
row("io.trino.hdfs:name=delta,type=trinohdfsfilesystemstats"),
row("io.trino.plugin.hive.metastore.cache:name=delta,type=cachinghivemetastore"),
row("io.trino.plugin.hive.metastore.glue:name=delta,type=gluehivemetastore"),
row("io.trino.hdfs.s3:name=delta,type=trinos3filesystem"),
row("io.trino.plugin.hive:catalog=delta,name=delta,type=fileformatdatasourcestats"),
row("trino.plugin.deltalake.transactionlog:catalog=delta,name=delta,type=transactionlogaccess"));
}
Expand Down

0 comments on commit 0a6825e

Please sign in to comment.