diff --git a/fe/fe-core/src/main/java/org/apache/doris/common/util/LocationPath.java b/fe/fe-core/src/main/java/org/apache/doris/common/util/LocationPath.java index d11b8121c6c0a1..e4b9aa0b25c121 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/common/util/LocationPath.java +++ b/fe/fe-core/src/main/java/org/apache/doris/common/util/LocationPath.java @@ -18,6 +18,7 @@ package org.apache.doris.common.util; import org.apache.doris.common.UserException; +import org.apache.doris.datasource.property.storage.AzurePropertyUtils; import org.apache.doris.datasource.property.storage.StorageProperties; import org.apache.doris.datasource.property.storage.exception.StoragePropertiesException; import org.apache.doris.fs.FileSystemType; @@ -307,6 +308,10 @@ public static String getTempWritePath(String loc, String prefix) { } public TFileType getTFileTypeForBE() { + if ((SchemaTypeMapper.ABFS.getSchema().equals(schema) || SchemaTypeMapper.ABFSS.getSchema() + .equals(schema)) && AzurePropertyUtils.isOneLakeLocation(normalizedLocation)) { + return TFileType.FILE_HDFS; + } if (StringUtils.isNotBlank(normalizedLocation) && isHdfsOnOssEndpoint(normalizedLocation)) { return TFileType.FILE_HDFS; } @@ -324,6 +329,10 @@ public Path toStorageLocation() { public FileSystemType getFileSystemType() { + if ((SchemaTypeMapper.ABFS.getSchema().equals(schema) || SchemaTypeMapper.ABFSS.getSchema() + .equals(schema)) && AzurePropertyUtils.isOneLakeLocation(normalizedLocation)) { + return FileSystemType.HDFS; + } return SchemaTypeMapper.fromSchemaToFileSystemType(schema); } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java index 0bdfc2a4337929..245da66724b324 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/metastore/IcebergRestProperties.java @@ -21,7 +21,6 @@ import org.apache.doris.datasource.property.ConnectorProperty; import org.apache.doris.datasource.property.ParamRules; import org.apache.doris.datasource.property.storage.AbstractS3CompatibleProperties; -import org.apache.doris.datasource.property.storage.HdfsCompatibleProperties; import org.apache.doris.datasource.property.storage.StorageProperties; import com.google.common.collect.Maps; @@ -86,6 +85,7 @@ public class IcebergRestProperties extends AbstractIcebergProperties { @ConnectorProperty(names = {"iceberg.rest.oauth2.credential"}, required = false, + sensitive = true, description = "The oauth2 credential for the iceberg rest catalog service.") private String icebergRestOauth2Credential; @@ -150,19 +150,10 @@ public class IcebergRestProperties extends AbstractIcebergProperties { @ConnectorProperty(names = {"iceberg.rest.secret-access-key"}, required = false, + sensitive = true, description = "The secret access key for the iceberg rest catalog service.") private String icebergRestSecretAccessKey = ""; - @ConnectorProperty(names = {"iceberg.rest.connection-timeout-ms"}, - required = false, - description = "Connection timeout in milliseconds for the REST catalog HTTP client. Default: 10000 (10s).") - private String icebergRestConnectionTimeoutMs = "10000"; - - @ConnectorProperty(names = {"iceberg.rest.socket-timeout-ms"}, - required = false, - description = "Socket timeout in milliseconds for the REST catalog HTTP client. Default: 60000 (60s).") - private String icebergRestSocketTimeoutMs = "60000"; - protected IcebergRestProperties(Map props) { super(props); } @@ -269,13 +260,6 @@ private void addOptionalProperties() { if (isIcebergRestVendedCredentialsEnabled()) { icebergRestCatalogProperties.put(VENDED_CREDENTIALS_HEADER, VENDED_CREDENTIALS_VALUE); } - - if (Strings.isNotBlank(icebergRestConnectionTimeoutMs)) { - icebergRestCatalogProperties.put("rest.client.connection-timeout-ms", icebergRestConnectionTimeoutMs); - } - if (Strings.isNotBlank(icebergRestSocketTimeoutMs)) { - icebergRestCatalogProperties.put("rest.client.socket-timeout-ms", icebergRestSocketTimeoutMs); - } } private void addAuthenticationProperties() { @@ -339,14 +323,12 @@ public void toFileIOProperties(List storagePropertiesList, Map fileIOProperties, Configuration conf) { for (StorageProperties storageProperties : storagePropertiesList) { - if (storageProperties instanceof HdfsCompatibleProperties) { - storageProperties.getBackendConfigProperties().forEach(conf::set); - } else if (storageProperties instanceof AbstractS3CompatibleProperties) { + if (storageProperties instanceof AbstractS3CompatibleProperties) { // For all S3-compatible storage types, put properties in fileIOProperties map toS3FileIOProperties((AbstractS3CompatibleProperties) storageProperties, fileIOProperties); } else { // For other storage types, just use fileIOProperties map - fileIOProperties.putAll(storageProperties.getBackendConfigProperties()); + conf.addResource(storageProperties.getHadoopStorageConfig()); } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzureProperties.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzureProperties.java index a0e217f2b51b39..b4848aa61b6d39 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzureProperties.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzureProperties.java @@ -20,8 +20,11 @@ import org.apache.doris.common.Config; import org.apache.doris.common.UserException; import org.apache.doris.datasource.property.ConnectorProperty; +import org.apache.doris.datasource.property.ParamRules; +import org.apache.doris.datasource.property.storage.exception.AzureAuthType; import com.google.common.base.Strings; +import com.google.common.collect.ImmutableSet; import lombok.Getter; import lombok.Setter; import org.apache.hadoop.conf.Configuration; @@ -29,6 +32,7 @@ import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.stream.Stream; /** @@ -67,6 +71,7 @@ public class AzureProperties extends StorageProperties { @Getter @ConnectorProperty(names = {"azure.account_name", "azure.access_key", "s3.access_key", "AWS_ACCESS_KEY", "ACCESS_KEY", "access_key"}, + required = false, sensitive = true, description = "The access key of S3.") protected String accountName = ""; @@ -75,9 +80,37 @@ public class AzureProperties extends StorageProperties { @ConnectorProperty(names = {"azure.account_key", "azure.secret_key", "s3.secret_key", "AWS_SECRET_KEY", "secret_key"}, sensitive = true, + required = false, description = "The secret key of S3.") protected String accountKey = ""; + @ConnectorProperty(names = {"azure.oauth2_client_id"}, + required = false, + description = "The client id of Azure AD application.") + private String clientId; + + @ConnectorProperty(names = {"azure.oauth2_client_secret"}, + required = false, + sensitive = true, + description = "The client secret of Azure AD application.") + private String clientSecret; + + + @ConnectorProperty(names = {"azure.oauth2_server_uri"}, + required = false, + description = "The account host of Azure blob.") + private String oauthServerUri; + + @ConnectorProperty(names = {"azure.oauth2_account_host"}, + required = false, + description = "The account host of Azure blob.") + private String accountHost; + + @ConnectorProperty(names = {"azure.auth_type"}, + required = false, + description = "The auth type of Azure blob.") + private String azureAuthType = AzureAuthType.SharedKey.name(); + @Getter @ConnectorProperty(names = {"container", "azure.bucket", "s3.bucket"}, required = false, @@ -108,11 +141,11 @@ public AzureProperties(Map origProps) { public void initNormalizeAndCheckProps() { super.initNormalizeAndCheckProps(); //check endpoint - if (!endpoint.endsWith(AZURE_ENDPOINT_SUFFIX)) { - throw new IllegalArgumentException(String.format("Endpoint '%s' is not valid. It should end with '%s'.", - endpoint, AZURE_ENDPOINT_SUFFIX)); - } this.endpoint = formatAzureEndpoint(endpoint, accountName); + buildRules().validate(); + if (AzureAuthType.OAuth2.name().equals(azureAuthType) && (!isIcebergRestCatalog())) { + throw new UnsupportedOperationException("OAuth2 auth type is only supported for iceberg rest catalog"); + } } public static boolean guessIsMe(Map origProps) { @@ -134,14 +167,25 @@ public static boolean guessIsMe(Map origProps) { @Override public Map getBackendConfigProperties() { + if (!azureAuthType.equalsIgnoreCase("OAuth2")) { + Map s3Props = new HashMap<>(); + s3Props.put("AWS_ENDPOINT", endpoint); + s3Props.put("AWS_REGION", "dummy_region"); + s3Props.put("AWS_ACCESS_KEY", accountName); + s3Props.put("AWS_SECRET_KEY", accountKey); + s3Props.put("AWS_NEED_OVERRIDE_ENDPOINT", "true"); + s3Props.put("provider", "azure"); + s3Props.put("use_path_style", usePathStyle); + return s3Props; + } + // oauth2 use hadoop config Map s3Props = new HashMap<>(); - s3Props.put("AWS_ENDPOINT", endpoint); - s3Props.put("AWS_REGION", "dummy_region"); - s3Props.put("AWS_ACCESS_KEY", accountName); - s3Props.put("AWS_SECRET_KEY", accountKey); - s3Props.put("AWS_NEED_OVERRIDE_ENDPOINT", "true"); - s3Props.put("provider", "azure"); - s3Props.put("use_path_style", usePathStyle); + hadoopStorageConfig.forEach(entry -> { + String key = entry.getKey(); + + s3Props.put(key, entry.getValue()); + + }); return s3Props; } @@ -186,10 +230,19 @@ public void initializeHadoopStorageConfig() { hadoopStorageConfig.set(k, v); } }); - setAzureAccountKeys(hadoopStorageConfig, accountName, accountKey); + if (azureAuthType != null && azureAuthType.equalsIgnoreCase("OAuth2")) { + setHDFSAzureOauth2Config(hadoopStorageConfig); + } else { + setHDFSAzureAccountKeys(hadoopStorageConfig, accountName, accountKey); + } + } + + @Override + protected Set schemas() { + return ImmutableSet.of("wasb", "wasbs", "abfs", "abfss"); } - private static void setAzureAccountKeys(Configuration conf, String accountName, String accountKey) { + private static void setHDFSAzureAccountKeys(Configuration conf, String accountName, String accountKey) { String[] endpoints = { "dfs.core.windows.net", "blob.core.windows.net" @@ -201,4 +254,48 @@ private static void setAzureAccountKeys(Configuration conf, String accountName, conf.set("fs.azure.account.key", accountKey); } + private void setHDFSAzureOauth2Config(Configuration conf) { + conf.set(String.format("fs.azure.account.auth.type.%s", accountHost), "OAuth"); + conf.set(String.format("fs.azure.account.oauth.provider.type.%s", accountHost), + "org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider"); + conf.set(String.format("fs.azure.account.oauth2.client.id.%s", accountHost), clientId); + conf.set(String.format("fs.azure.account.oauth2.client.secret.%s", accountHost), clientSecret); + conf.set(String.format("fs.azure.account.oauth2.client.endpoint.%s", accountHost), oauthServerUri); + } + + private ParamRules buildRules() { + return new ParamRules() + // OAuth2 requires either credential or token, but not both + .requireIf(azureAuthType, AzureAuthType.OAuth2.name(), new String[]{accountHost, + clientId, + clientSecret, + oauthServerUri}, "When auth_type is OAuth2, oauth2_account_host, oauth2_client_id" + + ", oauth2_client_secret, and oauth2_server_uri are required.") + .requireIf(azureAuthType, AzureAuthType.SharedKey.name(), new String[]{accountName, accountKey}, + "When auth_type is SharedKey, account_name and account_key are required."); + } + + // NB:Temporary check: + // Temporary check: Currently using OAuth2 for accessing Onalake storage via HDFS. + // In the future, OAuth2 will be supported via native SDK to reduce maintenance. + // For now, OAuth2 authentication is only allowed for Iceberg REST. + // TODO: Remove this temporary check later + private static final String ICEBERG_CATALOG_TYPE_KEY = "iceberg.catalog.type"; + private static final String ICEBERG_CATALOG_TYPE_REST = "rest"; + private static final String TYPE_KEY = "type"; + private static final String ICEBERG_VALUE = "iceberg"; + + private boolean isIcebergRestCatalog() { + // check iceberg type + boolean hasIcebergType = origProps.entrySet().stream() + .anyMatch(entry -> TYPE_KEY.equalsIgnoreCase(entry.getKey()) + && ICEBERG_VALUE.equalsIgnoreCase(entry.getValue())); + if (!hasIcebergType && origProps.keySet().stream().anyMatch(TYPE_KEY::equalsIgnoreCase)) { + return false; + } + return origProps.entrySet().stream() + .anyMatch(entry -> ICEBERG_CATALOG_TYPE_KEY.equalsIgnoreCase(entry.getKey()) + && ICEBERG_CATALOG_TYPE_REST.equalsIgnoreCase(entry.getValue())); + } + } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java index 8c986b74da0208..f126620fba68a7 100644 --- a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/AzurePropertyUtils.java @@ -25,6 +25,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.Map; +import java.util.regex.Pattern; public class AzurePropertyUtils { @@ -69,10 +70,16 @@ public static String validateAndNormalizeUri(String path) throws UserException { || lower.startsWith("s3://"))) { throw new StoragePropertiesException("Unsupported Azure URI scheme: " + path); } - + if (isOneLakeLocation(path)) { + return path; + } return convertToS3Style(path); } + private static final Pattern ONELAKE_PATTERN = Pattern.compile( + "abfs[s]?://([^@]+)@([^/]+)\\.dfs\\.fabric\\.microsoft\\.com(/.*)?", Pattern.CASE_INSENSITIVE); + + /** * Converts an Azure Blob Storage URI into a unified {@code s3:///} format. *

@@ -186,4 +193,8 @@ public static String validateAndGetUri(Map props) { .findFirst() .orElseThrow(() -> new StoragePropertiesException("Properties must contain 'uri' key")); } + + public static boolean isOneLakeLocation(String location) { + return ONELAKE_PATTERN.matcher(location).matches(); + } } diff --git a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/exception/AzureAuthType.java b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/exception/AzureAuthType.java new file mode 100644 index 00000000000000..269ce5a8da0a0b --- /dev/null +++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/exception/AzureAuthType.java @@ -0,0 +1,23 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package org.apache.doris.datasource.property.storage.exception; + +public enum AzureAuthType { + OAuth2, + SharedKey; +} diff --git a/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java b/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java index a896931babeedc..0051ea494b0871 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java @@ -283,4 +283,21 @@ public void testHdfsStorageLocationConvert() { Assertions.assertEquals(location, locationPath.getNormalizedLocation()); } + @Test + public void testOnelakeStorageLocationConvert() { + String location = "abfss://1a2b3c4d-1234-5678-abcd-9876543210ef@onelake.dfs.fabric.microsoft.com/myworkspace/lakehouse/default/Files/data/test.parquet"; + LocationPath locationPath = LocationPath.of(location, STORAGE_PROPERTIES_MAP); + Assertions.assertEquals(TFileType.FILE_HDFS, locationPath.getTFileTypeForBE()); + Assertions.assertEquals(FileSystemType.HDFS, locationPath.getFileSystemType()); + location = "abfs://1a2b3c4d-1234-5678-abcd-9876543210ef@onelake.dfs.fabric.microsoft.com/myworkspace/lakehouse/default/Files/data/test.parquet"; + locationPath = LocationPath.of(location, STORAGE_PROPERTIES_MAP); + Assertions.assertEquals(TFileType.FILE_HDFS, locationPath.getTFileTypeForBE()); + Assertions.assertEquals(FileSystemType.HDFS, locationPath.getFileSystemType()); + location = "abfss://mycontainer@mystorageaccount.dfs.core.windows.net/data/2025/11/11/"; + locationPath = LocationPath.of(location, STORAGE_PROPERTIES_MAP); + Assertions.assertEquals(TFileType.FILE_S3, locationPath.getTFileTypeForBE()); + Assertions.assertEquals(FileSystemType.S3, locationPath.getFileSystemType()); + + } + } diff --git a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertiesTest.java b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertiesTest.java index 7542bdbce29220..927a9f1c1484db 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertiesTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertiesTest.java @@ -20,6 +20,7 @@ import org.apache.doris.common.UserException; import org.apache.doris.datasource.property.storage.exception.StoragePropertiesException; +import org.apache.hadoop.conf.Configuration; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -84,7 +85,7 @@ public void testMissingProvider() throws UserException { origProps.put("s3.endpoint", "https://mystorageaccount.net"); // Expect an exception due to missing provider origProps.put("provider", "azure"); - Assertions.assertThrows(IllegalArgumentException.class, () -> + Assertions.assertDoesNotThrow(() -> StorageProperties.createPrimary(origProps), "Endpoint 'https://mystorageaccount.net' is not valid. It should end with '.blob.core.windows.net'."); } @@ -110,7 +111,7 @@ public void testParsingUri() throws Exception { Assertions.assertEquals("s3://mycontainer/blob.txt", azureProperties.validateAndNormalizeUri("https://mystorageaccount.blob.core.windows.net/mycontainer/blob.txt")); Assertions.assertThrowsExactly(StoragePropertiesException.class, () -> - azureProperties.validateAndGetUri(origProps), + azureProperties.validateAndGetUri(origProps), "props must contain uri"); origProps.put("uri", "https://mystorageaccount.blob.core.windows.net/mycontainer/blob.txt"); Assertions.assertEquals("https://mystorageaccount.blob.core.windows.net/mycontainer/blob.txt", @@ -170,4 +171,46 @@ public void testEmptyPath() throws UserException { Assertions.assertThrows(StoragePropertiesException.class, () -> azureProperties.validateAndNormalizeUri(""), "Path cannot be empty."); } + + @Test + public void testOneLake() throws UserException { + origProps.put("azure.auth_type", "OAuth2"); + origProps.put("azure.endpoint", "https://onelake.dfs.fabric.microsoft.com"); + Assertions.assertThrows(StoragePropertiesException.class, () -> + StorageProperties.createPrimary(origProps), "For OAuth2 authentication, please provide oauth2_client_id, " + + "oauth2_tenant_id, oauth2_client_secret, and oauth2_server_uri."); + origProps.put("azure.oauth2_client_id", "5c64f06f-5289-5289-5289-5aa0820ee310"); + Assertions.assertThrows(StoragePropertiesException.class, () -> + StorageProperties.createPrimary(origProps), "For OAuth2 authentication, please provide oauth2_client_id, " + + "oauth2_tenant_id, oauth2_client_secret, and oauth2_server_uri."); + origProps.put("azure.oauth2_tenant_id", "72f988bf-5289-5289-5289-2d7cd011db47"); + Assertions.assertThrows(StoragePropertiesException.class, () -> + StorageProperties.createPrimary(origProps), "For OAuth2 authentication, please provide oauth2_client_id, " + + "oauth2_tenant_id, oauth2_client_secret, and oauth2_server_uri."); + origProps.put("azure.oauth2_client_secret", "myAzureClientSecret"); + Assertions.assertThrows(StoragePropertiesException.class, () -> + StorageProperties.createPrimary(origProps), "For OAuth2 authentication, please provide oauth2_client_id, " + + "oauth2_tenant_id, oauth2_client_secret, and oauth2_server_uri."); + origProps.put("azure.oauth2_server_uri", "https://login.microsoftonline.com/72f988bf-5289-5289-5289-2d7cd011db47/oauth2/token"); + Assertions.assertThrows(StoragePropertiesException.class, () -> + StorageProperties.createPrimary(origProps), "For OAuth2 authentication, please provide oauth2_client_id, " + + "oauth2_tenant_id, oauth2_client_secret, and oauth2_server_uri."); + origProps.put("azure.oauth2_account_host", "onelake.dfs.fabric.microsoft.com"); + Assertions.assertThrows(StoragePropertiesException.class, () -> + StorageProperties.createPrimary(origProps), "For OAuth2 authentication, please provide oauth2_client_id, " + + "oauth2_tenant_id, oauth2_client_secret, and oauth2_server_uri."); + origProps.put("fs.azure.support", "true"); + Assertions.assertThrows(UnsupportedOperationException.class, () -> + StorageProperties.createPrimary(origProps), "Azure OAuth2 is not supported in the current backend."); + origProps.put("type", "iceberg"); + origProps.put("iceberg.catalog.type", "rest"); + AzureProperties azureProperties = (AzureProperties) StorageProperties.createPrimary(origProps); + Configuration hadoopStorageConfig = azureProperties.getHadoopStorageConfig(); + Assertions.assertEquals("OAuth", hadoopStorageConfig.get("fs.azure.account.auth.type.onelake.dfs.fabric.microsoft.com")); + Assertions.assertEquals("org.apache.hadoop.fs.azurebfs.oauth2.ClientCredsTokenProvider", hadoopStorageConfig.get("fs.azure.account.oauth.provider.type.onelake.dfs.fabric.microsoft.com")); + Assertions.assertEquals("5c64f06f-5289-5289-5289-5aa0820ee310", hadoopStorageConfig.get("fs.azure.account.oauth2.client.id.onelake.dfs.fabric.microsoft.com")); + Assertions.assertEquals("myAzureClientSecret", hadoopStorageConfig.get("fs.azure.account.oauth2.client.secret.onelake.dfs.fabric.microsoft.com")); + Assertions.assertEquals("https://login.microsoftonline.com/72f988bf-5289-5289-5289-2d7cd011db47/oauth2/token", hadoopStorageConfig.get("fs.azure.account.oauth2.client.endpoint.onelake.dfs.fabric.microsoft.com")); + + } } diff --git a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertyUtilsTest.java b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertyUtilsTest.java index dc6eb8ad74c015..cbc584d9a10b0f 100644 --- a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertyUtilsTest.java +++ b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/AzurePropertyUtilsTest.java @@ -43,6 +43,22 @@ public void testS3Uri() throws Exception { Assertions.assertEquals(expected, AzurePropertyUtils.validateAndNormalizeUri(input)); } + @Test + public void testAbfssUri() throws Exception { + String input = "abfss://container@account.blob.core.windows.net/data/file.txt"; + String expected = "s3://container/data/file.txt"; + Assertions.assertEquals(expected, AzurePropertyUtils.validateAndNormalizeUri(input)); + input = "abfs://container@account.blob.core.windows.net/data/file.txt"; + expected = "s3://container/data/file.txt"; + Assertions.assertEquals(expected, AzurePropertyUtils.validateAndNormalizeUri(input)); + input = "abfss://1a2b3c4d-1234-5678-abcd-9876543210ef@onelake.dfs.fabric.microsoft.com/myworkspace/lakehouse/default/Files/data/test.parquet"; + Assertions.assertEquals(input, + AzurePropertyUtils.validateAndNormalizeUri(input)); + input = "abfs://1a2b3c4d-1234-5678-abcd-9876543210ef@onelake.dfs.fabric.microsoft.com/myworkspace/lakehouse/default/Files/data/test.parquet"; + Assertions.assertEquals(input, + AzurePropertyUtils.validateAndNormalizeUri(input)); + } + @Test public void testAbfssUriWithoutPath() throws Exception { String input = "abfss://container@account.blob.core.windows.net";