Skip to content

Commit

Permalink
rename function name
Browse files Browse the repository at this point in the history
  • Loading branch information
xiarixiaoyao committed Dec 17, 2021
1 parent 1c350e0 commit 2bdeabf
Show file tree
Hide file tree
Showing 4 changed files with 7 additions and 7 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ public static HiveSyncConfig buildHiveSyncConfig(TypedProperties props, String b
// Now by default ParquetWriteSupport will write DecimalType to parquet as int32/int64 when the scale of decimalType < Decimal.MAX_LONG_DIGITS(),
// but AvroParquetReader which used by HoodieParquetReader cannot support read int32/int64 as DecimalType.
// try to find current schema whether contains that DecimalType, and auto set the value of "hoodie.parquet.writeLegacyFormat.enabled"
public static void autoModifyParquetWriteLegacyFormatParameter(Map<String, String> properties, StructType schema) {
public static void mayBeOverwriteParquetWriteLegacyFormatProp(Map<String, String> properties, StructType schema) {
if (DataTypeUtils.foundSmallPrecisionDecimalType(schema)
&& !Boolean.parseBoolean(properties.getOrDefault("hoodie.parquet.writeLegacyFormat.enabled", "false"))) {
properties.put("hoodie.parquet.writeLegacyFormat.enabled", "true");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@
import java.util.List;
import java.util.Map;

import static org.apache.hudi.DataSourceUtils.autoModifyParquetWriteLegacyFormatParameter;
import static org.apache.hudi.DataSourceUtils.mayBeOverwriteParquetWriteLegacyFormatProp;
import static org.apache.hudi.common.model.HoodieFileFormat.PARQUET;
import static org.apache.hudi.hive.ddl.HiveSyncMode.HMS;
import static org.hamcrest.CoreMatchers.containsString;
Expand Down Expand Up @@ -302,7 +302,7 @@ public void testAutoModifyParquetWriteLegacyFormatParameter(boolean smallDecimal
options.put("hoodie.parquet.writeLegacyFormat.enabled", String.valueOf(defaultWriteValue));

// start test
autoModifyParquetWriteLegacyFormatParameter(options, structType);
mayBeOverwriteParquetWriteLegacyFormatProp(options, structType);

// check result
boolean res = Boolean.parseBoolean(options.get("hoodie.parquet.writeLegacyFormat.enabled"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
import java.util.Map;
import java.util.Optional;

import static org.apache.hudi.DataSourceUtils.autoModifyParquetWriteLegacyFormatParameter;
import static org.apache.hudi.DataSourceUtils.mayBeOverwriteParquetWriteLegacyFormatProp;

/**
* DataSource V2 implementation for managing internal write logic. Only called internally.
Expand Down Expand Up @@ -69,7 +69,7 @@ public Optional<DataSourceWriter> createWriter(String writeUUID, StructType sche
Boolean.parseBoolean(HoodieTableConfig.POPULATE_META_FIELDS.defaultValue()));
Map<String, String> properties = options.asMap();
// Auto set the value of "hoodie.parquet.writeLegacyFormat.enabled"
autoModifyParquetWriteLegacyFormatParameter(properties, schema);
mayBeOverwriteParquetWriteLegacyFormatProp(properties, schema);
// 1st arg to createHoodieConfig is not really required to be set. but passing it anyways.
HoodieWriteConfig config = DataSourceUtils.createHoodieConfig(options.get(HoodieWriteConfig.AVRO_SCHEMA_STRING.key()).get(), path, tblName, properties);
boolean arePartitionRecordsSorted = HoodieInternalConfig.getBulkInsertIsPartitionRecordsSorted(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@

import java.util.Map;

import static org.apache.hudi.DataSourceUtils.autoModifyParquetWriteLegacyFormatParameter;
import static org.apache.hudi.DataSourceUtils.mayBeOverwriteParquetWriteLegacyFormatProp;

/**
* DataSource V2 implementation for managing internal write logic. Only called internally.
Expand All @@ -56,7 +56,7 @@ public Table getTable(StructType schema, Transform[] partitioning, Map<String, S
boolean arePartitionRecordsSorted = Boolean.parseBoolean(properties.getOrDefault(HoodieInternalConfig.BULKINSERT_ARE_PARTITIONER_RECORDS_SORTED,
Boolean.toString(HoodieInternalConfig.DEFAULT_BULKINSERT_ARE_PARTITIONER_RECORDS_SORTED)));
// Auto set the value of "hoodie.parquet.writeLegacyFormat.enabled"
autoModifyParquetWriteLegacyFormatParameter(properties, schema);
mayBeOverwriteParquetWriteLegacyFormatProp(properties, schema);
// 1st arg to createHoodieConfig is not really required to be set. but passing it anyways.
HoodieWriteConfig config = DataSourceUtils.createHoodieConfig(properties.get(HoodieWriteConfig.AVRO_SCHEMA_STRING.key()), path, tblName, properties);
return new HoodieDataSourceInternalTable(instantTime, config, schema, getSparkSession(),
Expand Down

0 comments on commit 2bdeabf

Please sign in to comment.