Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
morningman committed Dec 6, 2022
1 parent 8edb597 commit 117f578
Show file tree
Hide file tree
Showing 10 changed files with 0 additions and 55 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ suite("test_hdfs_tvf") {
String enabled = context.config.otherConfigs.get("enableHiveTest")
if (enabled != null && enabled.equalsIgnoreCase("true")) {
try {
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""

// test csv foramt
uri = "${defaultFS}" + "/user/doris/preinstalled_data/csv_format_test/all_types.csv"
Expand Down Expand Up @@ -193,7 +192,6 @@ suite("test_hdfs_tvf") {
assertTrue(result2[0][0] == 5, "Insert should update 12 rows")
qt_insert """ select * from test_hdfs_tvf order by id; """
} finally {
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ suite("test_hive_orc", "all_types") {
String hms_port = context.config.otherConfigs.get("hms_port")
String catalog_name = "hive_test_orc"
sql """admin set frontend config ("enable_multi_catalog" = "true")"""
sql """admin set frontend config ("enable_new_load_scan_node" = "true");"""
sql """drop catalog if exists ${catalog_name}"""
sql """
create catalog if not exists ${catalog_name} properties (
Expand All @@ -90,7 +89,6 @@ suite("test_hive_orc", "all_types") {
only_partition_col()

} finally {
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,6 @@ suite("test_hive_parquet", "p0") {
String hms_port = context.config.otherConfigs.get("hms_port")
String catalog_name = "hive_test_parquet"
sql """admin set frontend config ("enable_multi_catalog" = "true")"""
sql """admin set frontend config ("enable_new_load_scan_node" = "true");"""
sql """drop catalog if exists ${catalog_name}"""
sql """
create catalog if not exists ${catalog_name} properties (
Expand Down Expand Up @@ -176,7 +175,6 @@ suite("test_hive_parquet", "p0") {
q19()
q20()
} finally {
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -202,13 +202,6 @@ suite("test_array_load", "load_p0") {

try {
for ( i in 0..1 ) {
// should be deleted after new_load_scan is ready
if (i == 1) {
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
} else {
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
}

// case1: import array data in json format and enable vectorized engine
try {
sql "DROP TABLE IF EXISTS ${testTable}"
Expand Down Expand Up @@ -280,7 +273,6 @@ suite("test_array_load", "load_p0") {
}
}
} finally {
try_sql("""ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");""")
}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,6 @@ suite("test_load_json_column_exclude_schema_without_jsonpath", "p0") {

def load_array_data = {new_json_reader_flag, table_name, strip_flag, read_flag, format_flag, exprs, json_paths,
json_root, where_expr, fuzzy_flag, column_sep, file_name ->
// should be deleted after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""

// load the json data
streamLoad {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,6 @@ suite("test_load_json_null_to_nullable", "p0") {

def load_array_data = {new_json_reader_flag, table_name, strip_flag, read_flag, format_flag, exprs, json_paths,
json_root, where_expr, fuzzy_flag, column_sep, file_name ->
// should be deleted after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""

// load the json data
streamLoad {
table table_name
Expand Down Expand Up @@ -77,9 +74,6 @@ suite("test_load_json_null_to_nullable", "p0") {
assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
}
}

// should be deleted after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
}

def check_data_correct = {table_name ->
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,6 @@ suite("test_load_json_with_jsonpath", "p0") {

def load_array_data = {new_json_reader_flag, table_name, strip_flag, read_flag, format_flag, exprs, json_paths,
json_root, where_expr, fuzzy_flag, column_sep, file_name ->

// should be deleted after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""

// load the json data
streamLoad {
table table_name
Expand Down Expand Up @@ -78,9 +74,6 @@ suite("test_load_json_with_jsonpath", "p0") {
assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
}
}

// should be deleted after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
}

def check_data_correct = {table_name ->
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,6 @@ suite("test_hdfs_json_load", "p0") {

def load_from_hdfs1 = {new_json_reader_flag, strip_flag, fuzzy_flag, testTablex, label, fileName,
fsPath, hdfsUser, exprs, jsonpaths, json_root, columns_parameter, where ->
// should be delete after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""

def hdfsFilePath = "${fsPath}/user/doris/preinstalled_data/json_format_test/${fileName}"
def result1= sql """
LOAD LABEL ${label} (
Expand Down Expand Up @@ -76,9 +73,6 @@ suite("test_hdfs_json_load", "p0") {
assertTrue(result1.size() == 1)
assertTrue(result1[0].size() == 1)
assertTrue(result1[0][0] == 0, "Query OK, 0 rows affected")

// should be delete after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
}

def check_load_result = {checklabel, testTablex ->
Expand Down
13 changes: 0 additions & 13 deletions regression-test/suites/load_p0/stream_load/test_json_load.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,6 @@ suite("test_json_load", "p0") {

def load_json_data = {new_json_reader_flag, label, strip_flag, read_flag, format_flag, exprs, json_paths,
json_root, where_expr, fuzzy_flag, file_name, ignore_failure=false ->
// should be delete after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""

// load the json data
streamLoad {
Expand Down Expand Up @@ -150,9 +148,6 @@ suite("test_json_load", "p0") {
assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
}
}

// should be deleted after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
}

def load_from_hdfs1 = {testTablex, label, hdfsFilePath, format, brokerName, hdfsUser, hdfsPasswd ->
Expand Down Expand Up @@ -529,8 +524,6 @@ suite("test_json_load", "p0") {
try {
sql "DROP TABLE IF EXISTS ${testTable}"
create_test_table3.call(testTable)
// should be delete after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
// load the json data
streamLoad {
table "${testTable}"
Expand All @@ -557,16 +550,12 @@ suite("test_json_load", "p0") {
assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
}
}
// should be deleted after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
sql "sync"
qt_select13 "select * from ${testTable} order by id"


sql "DROP TABLE IF EXISTS ${testTable}"
create_test_table3.call(testTable)
// should be delete after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
// load the json data
streamLoad {
table "${testTable}"
Expand All @@ -593,8 +582,6 @@ suite("test_json_load", "p0") {
assertTrue(json.NumberLoadedRows > 0 && json.LoadBytes > 0)
}
}
// should be deleted after new_load_scan is ready
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
sql "sync"
qt_select13 "select * from ${testTable} order by id"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,6 @@ suite("test_txt_special_delimiter", "p0") {
PROPERTIES ("replication_allocation" = "tag.location.default: 1");
"""
for ( i in 0..1 ) {
// should be deleted after new_load_scan is ready
if (i == 1) {
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
} else {
sql """ADMIN SET FRONTEND CONFIG ("enable_new_load_scan_node" = "true");"""
}

// test special_delimiter success
streamLoad {
table "${tableName}"
Expand Down

0 comments on commit 117f578

Please sign in to comment.