From fb986601f43e79c4ba7c6ad0b95e701b4e20b647 Mon Sep 17 00:00:00 2001 From: tisonkong <360548494@qq.com> Date: Sat, 29 Dec 2018 10:02:27 +0800 Subject: [PATCH 1/3] [CARBONDATA-3126]Correct some spell error in CarbonData --- .../core/constants/CarbonCommonConstants.java | 2 +- .../core/datastore/filesystem/CarbonFile.java | 2 +- .../core/datastore/filesystem/LocalCarbonFile.java | 2 +- .../carbondata/core/datastore/impl/FileFactory.java | 2 +- .../core/statusmanager/LoadMetadataDetails.java | 2 +- .../core/statusmanager/SegmentStatusManager.java | 2 +- .../apache/carbondata/core/util/CarbonProperties.java | 2 +- .../core/CarbonPropertiesValidationTest.java | 2 +- docs/hive-guide.md | 4 ++-- .../carbondata/examples/SparkSessionExample.scala | 6 +++--- .../apache/carbondata/examples/util/ExampleUtils.scala | 4 ++-- .../carbondata/hadoop/api/CarbonFileInputFormat.java | 2 +- .../carbondata/hadoop/api/CarbonTableInputFormat.java | 2 +- .../cluster/sdv/generated/ColumndictTestCase.scala | 4 ++-- .../spark/testsuite/datamap/TestDataMapCommand.scala | 2 +- .../StandardPartitionTableQueryTestCase.scala | 10 +++++----- .../scala/org/apache/carbondata/api/CarbonStore.scala | 8 ++++---- .../org/apache/spark/sql/test/TestQueryExecutor.scala | 2 +- .../org/apache/spark/sql/test/util/QueryTest.scala | 2 +- .../spark/sql/carbondata/datasource/TestUtil.scala | 2 +- .../execution/command/mutation/DeleteExecution.scala | 2 +- .../command/preaaggregate/PreAggregateListeners.scala | 2 +- .../spark/sql/test/Spark2TestQueryExecutor.scala | 4 ++-- .../carbondata/spark/util/AllDictionaryTestCase.scala | 4 ++-- .../spark/util/ExternalColumnDictionaryTestCase.scala | 4 ++-- .../processing/datatypes/StructDataType.java | 2 +- 26 files changed, 41 insertions(+), 41 deletions(-) diff --git a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java index 387bf3b90ff..0d7c8de6964 100644 --- a/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java +++ b/core/src/main/java/org/apache/carbondata/core/constants/CarbonCommonConstants.java @@ -1122,7 +1122,7 @@ private CarbonCommonConstants() { public static final int CARBON_SORT_STORAGE_INMEMORY_IN_MB_DEFAULT = 512; /* - * whether to enable prefetch for rowbatch to enhance row reconstruction during compaction + * whether to enable prefetch for rowBatch to enhance row reconstruction during compaction */ @CarbonProperty public static final String CARBON_COMPACTION_PREFETCH_ENABLE = diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/CarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/CarbonFile.java index ce50259e594..be083389170 100644 --- a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/CarbonFile.java +++ b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/CarbonFile.java @@ -36,7 +36,7 @@ public interface CarbonFile { CarbonFile[] listFiles(); - List listFiles(Boolean recurssive) throws IOException; + List listFiles(Boolean recursive) throws IOException; List listFiles(boolean recursive, CarbonFileFilter fileFilter) throws IOException; diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/LocalCarbonFile.java b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/LocalCarbonFile.java index 98d61f8f7fb..2cace558404 100644 --- a/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/LocalCarbonFile.java +++ b/core/src/main/java/org/apache/carbondata/core/datastore/filesystem/LocalCarbonFile.java @@ -168,7 +168,7 @@ public boolean delete() { } @Override - public List listFiles(Boolean recurssive) { + public List listFiles(Boolean recursive) { if (!file.isDirectory()) { return new ArrayList(); } diff --git a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java index 0d828ad877b..9ea6834757d 100644 --- a/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java +++ b/core/src/main/java/org/apache/carbondata/core/datastore/impl/FileFactory.java @@ -191,7 +191,7 @@ public static DataOutputStream getDataOutputStream(String path, FileType fileTyp * @param fileType * @param bufferSize * @param compressorName name of compressor to write this file - * @return data out put stram + * @return data out put stream * @throws IOException */ public static DataOutputStream getDataOutputStream(String path, FileType fileType, int bufferSize, diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java index b19e7743081..7a16379c991 100644 --- a/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java +++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/LoadMetadataDetails.java @@ -99,7 +99,7 @@ public void setIndexSize(String indexSize) { private static final Logger LOGGER = LogServiceFactory.getLogService(LoadMetadataDetails.class.getName()); - // dont remove static as the write will fail. + // don't remove static as the write will fail. private static final SimpleDateFormat parser = new SimpleDateFormat(CarbonCommonConstants.CARBON_TIMESTAMP_MILLIS); /** diff --git a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java index 32b1e78b493..4a5063fc264 100755 --- a/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java +++ b/core/src/main/java/org/apache/carbondata/core/statusmanager/SegmentStatusManager.java @@ -356,7 +356,7 @@ public static List updateDeletionStatus(AbsoluteTableIdentifier identifi if (listOfLoadFolderDetailsArray.length != 0) { updateDeletionStatus(identifier, loadIds, listOfLoadFolderDetailsArray, invalidLoadIds); if (invalidLoadIds.isEmpty()) { - // All or None , if anything fails then dont write + // All or None , if anything fails then don't write if (carbonTableStatusLock.lockWithRetries()) { LOG.info("Table status lock has been successfully acquired"); // To handle concurrency scenarios, always take latest metadata before writing diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java index 93d622db9d2..706d57a0dd9 100644 --- a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java +++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java @@ -84,7 +84,7 @@ public final class CarbonProperties { private static final CarbonProperties CARBONPROPERTIESINSTANCE = new CarbonProperties(); /** - * porpeties . + * porpeties */ private Properties carbonProperties; diff --git a/core/src/test/java/org/apache/carbondata/core/CarbonPropertiesValidationTest.java b/core/src/test/java/org/apache/carbondata/core/CarbonPropertiesValidationTest.java index b2b03cdfc33..2500f71d799 100644 --- a/core/src/test/java/org/apache/carbondata/core/CarbonPropertiesValidationTest.java +++ b/core/src/test/java/org/apache/carbondata/core/CarbonPropertiesValidationTest.java @@ -37,7 +37,7 @@ public class CarbonPropertiesValidationTest extends TestCase { carbonProperties = CarbonProperties.getInstance(); } - @Test public void testvalidateLockType() + @Test public void testValidateLockType() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { Method validateMethodType = carbonProperties.getClass().getDeclaredMethod("validateLockType"); validateMethodType.setAccessible(true); diff --git a/docs/hive-guide.md b/docs/hive-guide.md index e67505797f3..0e52f1b3e93 100644 --- a/docs/hive-guide.md +++ b/docs/hive-guide.md @@ -58,9 +58,9 @@ import org.apache.spark.sql.CarbonSession._ val rootPath = "hdfs:///user/hadoop/carbon" val storeLocation = s"$rootPath/store" val warehouse = s"$rootPath/warehouse" -val metastoredb = s"$rootPath/metastore_db" +val metaStoreDB = s"$rootPath/metastore_db" -val carbon = SparkSession.builder().enableHiveSupport().config("spark.sql.warehouse.dir", warehouse).config(org.apache.carbondata.core.constants.CarbonCommonConstants.STORE_LOCATION, storeLocation).getOrCreateCarbonSession(storeLocation, metastoredb) +val carbon = SparkSession.builder().enableHiveSupport().config("spark.sql.warehouse.dir", warehouse).config(org.apache.carbondata.core.constants.CarbonCommonConstants.STORE_LOCATION, storeLocation).getOrCreateCarbonSession(storeLocation, metaStoreDB) carbon.sql("create table hive_carbon(id int, name string, scale decimal, country string, salary double) STORED BY 'carbondata'") carbon.sql("LOAD DATA INPATH '/sample.csv' INTO TABLE hive_carbon") diff --git a/examples/spark2/src/main/scala/org/apache/carbondata/examples/SparkSessionExample.scala b/examples/spark2/src/main/scala/org/apache/carbondata/examples/SparkSessionExample.scala index 1164658d68f..66d4b71fabe 100644 --- a/examples/spark2/src/main/scala/org/apache/carbondata/examples/SparkSessionExample.scala +++ b/examples/spark2/src/main/scala/org/apache/carbondata/examples/SparkSessionExample.scala @@ -36,14 +36,14 @@ object SparkSessionExample { + "../../../..").getCanonicalPath val storeLocation = s"$rootPath/examples/spark2/target/store" val warehouse = s"$rootPath/examples/spark2/target/warehouse" - val metastoredb = s"$rootPath/examples/spark2/target/metastore_db" + val metaStoreDB = s"$rootPath/examples/spark2/target/metastore_db" // clean data folder if (true) { val clean = (path: String) => FileUtils.deleteDirectory(new File(path)) clean(storeLocation) clean(warehouse) - clean(metastoredb) + clean(metaStoreDB) } val sparksession = SparkSession @@ -53,7 +53,7 @@ object SparkSessionExample { .enableHiveSupport() .config("spark.sql.warehouse.dir", warehouse) .config("javax.jdo.option.ConnectionURL", - s"jdbc:derby:;databaseName=$metastoredb;create=true") + s"jdbc:derby:;databaseName=$metaStoreDB;create=true") .getOrCreate() CarbonProperties.getInstance() diff --git a/examples/spark2/src/main/scala/org/apache/carbondata/examples/util/ExampleUtils.scala b/examples/spark2/src/main/scala/org/apache/carbondata/examples/util/ExampleUtils.scala index 3064d69768e..bb9f4d0d984 100644 --- a/examples/spark2/src/main/scala/org/apache/carbondata/examples/util/ExampleUtils.scala +++ b/examples/spark2/src/main/scala/org/apache/carbondata/examples/util/ExampleUtils.scala @@ -35,7 +35,7 @@ object ExampleUtils { + "../../../..").getCanonicalPath val storeLocation = s"$rootPath/examples/spark2/target/store" val warehouse = s"$rootPath/examples/spark2/target/warehouse" - val metastoredb = s"$rootPath/examples/spark2/target" + val metaStoreDB = s"$rootPath/examples/spark2/target" CarbonProperties.getInstance() .addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd HH:mm:ss") @@ -57,7 +57,7 @@ object ExampleUtils { .config("spark.sql.warehouse.dir", warehouse) .config("spark.driver.host", "localhost") .config("spark.sql.crossJoin.enabled", "true") - .getOrCreateCarbonSession(storeLocation, metastoredb) + .getOrCreateCarbonSession(storeLocation, metaStoreDB) spark.sparkContext.setLogLevel("ERROR") spark diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java index dbfa4ecc73d..7c08dd91380 100644 --- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java +++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonFileInputFormat.java @@ -157,7 +157,7 @@ public List getSplits(JobContext job) throws IOException { List splits = new ArrayList<>(); boolean useBlockDataMap = job.getConfiguration().getBoolean("filter_blocks", true); // useBlockDataMap would be false in case of SDK when user has not provided any filter, In - // this case we dont want to load block/blocklet datamap. It would be true in all other + // this case we don't want to load block/blocklet datamap. It would be true in all other // scenarios if (useBlockDataMap) { // do block filtering and get split diff --git a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java index ea1bcd3f1b3..c56b1db3124 100644 --- a/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java +++ b/hadoop/src/main/java/org/apache/carbondata/hadoop/api/CarbonTableInputFormat.java @@ -634,7 +634,7 @@ segment needs refreshing. same thing need for select count(*) flow also. String segmentId = Segment.toSegment(blocklet.getSegmentId()).getSegmentNo(); String key = CarbonUpdateUtil.getSegmentBlockNameKey(segmentId, blockName); - // if block is invalid then dont add the count + // if block is invalid then don't add the count SegmentUpdateDetails details = updateStatusManager.getDetailsForABlock(key); if (null == details || !CarbonUpdateUtil.isBlockInvalid(details.getSegmentStatus())) { diff --git a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/ColumndictTestCase.scala b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/ColumndictTestCase.scala index c8e8f1b4297..54740c6f4ea 100644 --- a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/ColumndictTestCase.scala +++ b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/ColumndictTestCase.scala @@ -28,7 +28,7 @@ import org.scalatest.BeforeAndAfterAll class ColumndictTestCase extends QueryTest with BeforeAndAfterAll { - //Load history data from CSV with/without header and specify/dont specify headers in command using external ALL_dictionary_PATH + //Load history data from CSV with/without header and specify/don't specify headers in command using external ALL_dictionary_PATH test("Columndict-TC001", Include) { sql(s"""drop table if exists t3""").collect sql(s"""CREATE TABLE IF NOT EXISTS t3 (ID Int, country String, name String, phonetype String, serialname String, salary Int,floatField float) STORED BY 'carbondata'""").collect @@ -37,7 +37,7 @@ class ColumndictTestCase extends QueryTest with BeforeAndAfterAll { } - //Load history data from CSV with/without header and specify/dont specify headers in command using external columndict + //Load history data from CSV with/without header and specify/don't specify headers in command using external columndict test("Columndict-TC002", Include) { sql(s"""CREATE TABLE IF NOT EXISTS t3 (ID Int, country String, name String, phonetype String, serialname String, salary Int,floatField float) STORED BY 'carbondata'""").collect sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/Data/columndict/data.csv' into table t3 options('COLUMNDICT'='country:$resourcesPath/Data/columndict/country.csv', 'SINGLE_PASS'='true')""").collect diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapCommand.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapCommand.scala index edd3e9cda15..ffe1977f508 100644 --- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapCommand.scala +++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datamap/TestDataMapCommand.scala @@ -302,7 +302,7 @@ class TestDataMapCommand extends QueryTest with BeforeAndAfterAll { test("create pre-agg table with path") { sql("drop table if exists main_preagg") sql("drop table if exists main ") - val warehouse = s"$metastoredb/warehouse" + val warehouse = s"$metaStoreDB/warehouse" val path = warehouse + "/" + System.nanoTime + "_preAggTestPath" sql( s""" diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala index c7957c1f3f5..c19c0b9dd0d 100644 --- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala +++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala @@ -304,7 +304,7 @@ test("Creation of partition table should fail if the colname in table schema and """create table partitionTable (id int,name String) partitioned by(email string) stored by 'carbondata' """.stripMargin) sql("insert into partitionTable select 1,'huawei','abc'") - val location = metastoredb +"/" +"def" + val location = metaStoreDB +"/" +"def" checkAnswer(sql("show partitions partitionTable"), Seq(Row("email=abc"))) sql(s"""alter table partitionTable add partition (email='def') location '$location'""") sql("insert into partitionTable select 1,'huawei','def'") @@ -323,7 +323,7 @@ test("Creation of partition table should fail if the colname in table schema and | PARTITIONED BY (empname String) | STORED BY 'org.apache.carbondata.format' """.stripMargin) - val location = metastoredb +"/" +"ravi" + val location = metaStoreDB +"/" +"ravi" sql(s"""alter table staticpartitionlocload add partition (empname='ravi') location '$location'""") sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE staticpartitionlocload partition(empname='ravi') OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""") val frame = sql("select count(empno) from staticpartitionlocload") @@ -348,7 +348,7 @@ test("Creation of partition table should fail if the colname in table schema and | PARTITIONED BY (empname String) | STORED BY 'org.apache.carbondata.format' """.stripMargin) - val location = metastoredb +"/" +"ravi1" + val location = metaStoreDB +"/" +"ravi1" sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE staticpartitionsetloc partition(empname='ravi') OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""") intercept[Exception] { sql(s"""alter table staticpartitionsetloc partition (empname='ravi') set location '$location'""") @@ -369,7 +369,7 @@ test("Creation of partition table should fail if the colname in table schema and | PARTITIONED BY (empname String) | STORED BY 'org.apache.carbondata.format' """.stripMargin) - val location = metastoredb +"/" +"ravi" + val location = metaStoreDB +"/" +"ravi" sql(s"""alter table staticpartitionlocloadother add partition (empname='ravi') location '$location'""") sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE staticpartitionlocloadother partition(empname='ravi') OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""") sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE staticpartitionlocloadother partition(empname='indra') OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""") @@ -405,7 +405,7 @@ test("Creation of partition table should fail if the colname in table schema and | PARTITIONED BY (empname String) | STORED BY 'org.apache.carbondata.format' """.stripMargin) - val location = metastoredb +"/" +"ravi1" + val location = metaStoreDB +"/" +"ravi1" sql(s"""alter table staticpartitionlocloadother_new add partition (empname='ravi') location '$location'""") sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE staticpartitionlocloadother_new partition(empname='ravi') OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""") sql(s"""LOAD DATA local inpath '$resourcesPath/data.csv' INTO TABLE staticpartitionlocloadother_new partition(empname='indra') OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""") diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala index 45d472e6781..da9d4c2bec9 100644 --- a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala +++ b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala @@ -192,9 +192,9 @@ object CarbonStore { } } finally { if (currentTablePartitions.equals(None)) { - cleanUpPartitionFoldersRecurssively(carbonTable, List.empty[PartitionSpec]) + cleanUpPartitionFoldersRecursively(carbonTable, List.empty[PartitionSpec]) } else { - cleanUpPartitionFoldersRecurssively(carbonTable, currentTablePartitions.get.toList) + cleanUpPartitionFoldersRecursively(carbonTable, currentTablePartitions.get.toList) } if (carbonCleanFilesLock != null) { @@ -204,12 +204,12 @@ object CarbonStore { } /** - * delete partition folders recurssively + * delete partition folders recursively * * @param carbonTable * @param partitionSpecList */ - def cleanUpPartitionFoldersRecurssively(carbonTable: CarbonTable, + def cleanUpPartitionFoldersRecursively(carbonTable: CarbonTable, partitionSpecList: List[PartitionSpec]): Unit = { if (carbonTable != null) { val loadMetadataDetails = SegmentStatusManager diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala index f69a14283c5..0af832b467c 100644 --- a/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala +++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/test/TestQueryExecutor.scala @@ -76,7 +76,7 @@ object TestQueryExecutor { // Otherwise point to respective target folder location localTarget } - val metastoredb = target + val metaStoreDB = target val location = s"$target/dbpath" val masterUrl = { val property = System.getProperty("spark.master.url") diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/test/util/QueryTest.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/test/util/QueryTest.scala index 5a26dd58922..411d5a35345 100644 --- a/integration/spark-common/src/main/scala/org/apache/spark/sql/test/util/QueryTest.scala +++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/test/util/QueryTest.scala @@ -118,7 +118,7 @@ class QueryTest extends PlanTest { lazy val storeLocation = CarbonProperties.getInstance(). getProperty(CarbonCommonConstants.STORE_LOCATION) val resourcesPath = TestQueryExecutor.resourcesPath - val metastoredb = TestQueryExecutor.metastoredb + val metaStoreDB = TestQueryExecutor.metaStoreDB val integrationPath = TestQueryExecutor.integrationPath val dblocation = TestQueryExecutor.location val defaultParallelism = sqlContext.sparkContext.defaultParallelism diff --git a/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/TestUtil.scala b/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/TestUtil.scala index 994ec436784..672e9725f17 100644 --- a/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/TestUtil.scala +++ b/integration/spark-datasource/src/test/scala/org/apache/spark/sql/carbondata/datasource/TestUtil.scala @@ -41,7 +41,7 @@ object TestUtil { + "../../../..").getCanonicalPath val warehouse1 = FileFactory.getPath(s"$rootPath/integration/spark-datasource/target/warehouse").toString val resource = s"$rootPath/integration/spark-datasource/src/test/resources" - val metastoredb1 = s"$rootPath/integration/spark-datasource/target" + val metaStoreDB1 = s"$rootPath/integration/spark-datasource/target" val spark = SparkSession .builder() .enableHiveSupport() diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala index 0f68004b908..a88a02b1f30 100644 --- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala +++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/mutation/DeleteExecution.scala @@ -283,7 +283,7 @@ object DeleteExecution { } catch { case e : MultipleMatchingException => LOGGER.error(e.getMessage) - // dont throw exception here. + // don't throw exception here. case e: Exception => val errorMsg = s"Delete data operation is failed for ${ database }.${ tableName }." LOGGER.error(errorMsg + e.getMessage) diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/preaaggregate/PreAggregateListeners.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/preaaggregate/PreAggregateListeners.scala index b2d21cc4811..eb982645adc 100644 --- a/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/preaaggregate/PreAggregateListeners.scala +++ b/integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/preaaggregate/PreAggregateListeners.scala @@ -257,7 +257,7 @@ object AlterTableDropPartitionMetaListener extends OperationEventListener{ if (parentCarbonTable.hasAggregationDataMap) { // used as a flag to block direct drop partition on aggregate tables fired by the user operationContext.setProperty("isInternalDropCall", "true") - // Filter out all the tables which dont have the partition being dropped. + // Filter out all the tables which don't have the partition being dropped. val childTablesWithoutPartitionColumns = parentCarbonTable.getTableInfo.getDataMapSchemaList.asScala.filter { dataMapSchema => val childColumns = dataMapSchema.getChildSchema.getListOfColumns.asScala diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/test/Spark2TestQueryExecutor.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/test/Spark2TestQueryExecutor.scala index b341d6adab4..eaef9c1af1c 100644 --- a/integration/spark2/src/main/scala/org/apache/spark/sql/test/Spark2TestQueryExecutor.scala +++ b/integration/spark2/src/main/scala/org/apache/spark/sql/test/Spark2TestQueryExecutor.scala @@ -59,7 +59,7 @@ object Spark2TestQueryExecutor { FileFactory.getConfiguration. set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER") } - val metastoredb = s"$integrationPath/spark-common-cluster-test/target" + val metaStoreDB = s"$integrationPath/spark-common-cluster-test/target" val spark = SparkSession .builder().config(conf) .master(TestQueryExecutor.masterUrl) @@ -67,7 +67,7 @@ object Spark2TestQueryExecutor { .enableHiveSupport() .config("spark.sql.warehouse.dir", warehouse) .config("spark.sql.crossJoin.enabled", "true") - .getOrCreateCarbonSession(null, TestQueryExecutor.metastoredb) + .getOrCreateCarbonSession(null, TestQueryExecutor.metaStoreDB) if (warehouse.startsWith("hdfs://")) { System.setProperty(CarbonCommonConstants.HDFS_TEMP_LOCATION, warehouse) CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOCK_TYPE, diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala index 58e5665bbed..c7f080d3502 100644 --- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala +++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/AllDictionaryTestCase.scala @@ -122,7 +122,7 @@ class AllDictionaryTestCase extends Spark2QueryTest with BeforeAndAfterAll { def buildRelation() = { val warehouse = s"$resourcesPath/target/warehouse" val storeLocation = s"$resourcesPath/target/store" - val metastoredb = s"$resourcesPath/target" + val metaStoreDB = s"$resourcesPath/target" CarbonProperties.getInstance() .addProperty("carbon.custom.distribution", "true") CarbonProperties.getInstance() @@ -137,7 +137,7 @@ class AllDictionaryTestCase extends Spark2QueryTest with BeforeAndAfterAll { .config("spark.network.timeout", "600s") .config("spark.executor.heartbeatInterval", "600s") .config("carbon.enable.vector.reader","false") - .getOrCreateCarbonSession(storeLocation, metastoredb) + .getOrCreateCarbonSession(storeLocation, metaStoreDB) val catalog = CarbonEnv.getInstance(spark).carbonMetaStore sampleRelation = catalog.lookupRelation(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME), "sample")(spark).asInstanceOf[CarbonRelation] diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala index 9607bbcc2e8..e4dca67a554 100644 --- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala +++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/util/ExternalColumnDictionaryTestCase.scala @@ -116,7 +116,7 @@ class ExternalColumnDictionaryTestCase extends Spark2QueryTest with BeforeAndAft def buildRelation() = { val warehouse = s"$resourcesPath/target/warehouse" val storeLocation = s"$resourcesPath/target/store" - val metastoredb = s"$resourcesPath/target" + val metaStoreDB = s"$resourcesPath/target" CarbonProperties.getInstance() .addProperty("carbon.custom.distribution", "true") CarbonProperties.getInstance() @@ -131,7 +131,7 @@ class ExternalColumnDictionaryTestCase extends Spark2QueryTest with BeforeAndAft .config("spark.network.timeout", "600s") .config("spark.executor.heartbeatInterval", "600s") .config("carbon.enable.vector.reader","false") - .getOrCreateCarbonSession(storeLocation, metastoredb) + .getOrCreateCarbonSession(storeLocation, metaStoreDB) val catalog = CarbonEnv.getInstance(spark).carbonMetaStore extComplexRelation = catalog .lookupRelation(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME), diff --git a/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java index 31f2234562a..d912a2558cb 100644 --- a/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java +++ b/processing/src/main/java/org/apache/carbondata/processing/datatypes/StructDataType.java @@ -190,7 +190,7 @@ public void setSurrogateIndex(int surrIndex) { children.get(i).writeByteArray(data[i], dataOutputStream, logHolder); } - // For other children elements which dont have data, write empty + // For other children elements which don't have data, write empty for (int i = data.length; i < children.size(); i++) { children.get(i).writeByteArray(null, dataOutputStream, logHolder); } From d7dbd70ec08de340a30ce6fab1699625a888f05d Mon Sep 17 00:00:00 2001 From: tisonkong <360548494@qq.com> Date: Sat, 29 Dec 2018 14:45:54 +0800 Subject: [PATCH 2/3] [CARBONDATA-3126]Correct some spell error in CarbonData --- .../org/apache/carbondata/common/Strings.java | 6 ++-- .../carbondata/common/StringsSuite.java | 2 +- .../metadata/schema/table/CarbonTable.java | 2 +- .../impl/DictionaryBasedResultCollector.java | 8 +++--- .../DictionaryBasedVectorResultCollector.java | 10 +++---- .../core/util/CarbonProperties.java | 2 +- .../carbondata/core/util/CarbonUtil.java | 2 +- .../sdv/generated/DataLoadingTestCase.scala | 2 +- .../complexType/TestComplexTypeQuery.scala | 20 ++++++------- .../strategy/CarbonLateDecodeStrategy.scala | 12 ++++---- .../loading/model/CarbonLoadModelBuilder.java | 28 +++++++++---------- 11 files changed, 47 insertions(+), 47 deletions(-) diff --git a/common/src/main/java/org/apache/carbondata/common/Strings.java b/common/src/main/java/org/apache/carbondata/common/Strings.java index 35c24ba542c..4bb9dc8186d 100644 --- a/common/src/main/java/org/apache/carbondata/common/Strings.java +++ b/common/src/main/java/org/apache/carbondata/common/Strings.java @@ -28,14 +28,14 @@ public class Strings { * Provide same function as mkString in Scala. * This is added to avoid JDK 8 dependency. */ - public static String mkString(String[] strings, String delimeter) { + public static String mkString(String[] strings, String delimiter) { Objects.requireNonNull(strings); - Objects.requireNonNull(delimeter); + Objects.requireNonNull(delimiter); StringBuilder builder = new StringBuilder(); for (int i = 0; i < strings.length; i++) { builder.append(strings[i]); if (i != strings.length - 1) { - builder.append(delimeter); + builder.append(delimiter); } } return builder.toString(); diff --git a/common/src/test/java/org/apache/carbondata/common/StringsSuite.java b/common/src/test/java/org/apache/carbondata/common/StringsSuite.java index 65da32b50f7..ccabaf7adb9 100644 --- a/common/src/test/java/org/apache/carbondata/common/StringsSuite.java +++ b/common/src/test/java/org/apache/carbondata/common/StringsSuite.java @@ -28,7 +28,7 @@ public void testMkStringNullString() { } @Test(expected = NullPointerException.class) - public void testMkStringNullDelimeter() { + public void testMkStringNullDelimiter() { Strings.mkString(new String[]{"abc"}, null); } diff --git a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java index 82bf148b530..daaed9d017c 100644 --- a/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java +++ b/core/src/main/java/org/apache/carbondata/core/metadata/schema/table/CarbonTable.java @@ -391,7 +391,7 @@ private void fillDimensionsAndMeasuresForTables(TableSchema tableSchema) { } /** - * This method will add implict dimension into carbontable + * This method will add implicit dimension into carbontable * * @param dimensionOrdinal * @param dimensions diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java index 69b142f3e1d..cdabbc025a6 100644 --- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java +++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedResultCollector.java @@ -62,7 +62,7 @@ public class DictionaryBasedResultCollector extends AbstractScannedResultCollect boolean[] directDictionaryEncodingArray; - private boolean[] implictColumnArray; + private boolean[] implicitColumnArray; private boolean[] complexDataTypeArray; @@ -213,7 +213,7 @@ private void fillComplexColumnDataBufferForThisRow() { // complex dictionary columns comes. ByteBuffer buffer; if (!dictionaryEncodingArray[i]) { - if (implictColumnArray[i]) { + if (implicitColumnArray[i]) { throw new RuntimeException("Not Supported Column Type"); } else if (complexDataTypeArray[i]) { buffer = ByteBuffer.wrap(complexTypeKeyArray[complexTypeComplexColumnIndex++]); @@ -242,7 +242,7 @@ void fillDimensionData(BlockletScannedResult scannedResult, int[] surrogateResul byte[][] noDictionaryKeys, byte[][] complexTypeKeyArray, Map complexDimensionInfoMap, Object[] row, int i) { if (!dictionaryEncodingArray[i]) { - if (implictColumnArray[i]) { + if (implicitColumnArray[i]) { if (CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID .equals(queryDimensions[i].getColumnName())) { row[order[i]] = DataTypeUtil.getDataBasedOnDataType( @@ -347,7 +347,7 @@ void initDimensionAndMeasureIndexesForFillingData() { dictionaryEncodingArray = CarbonUtil.getDictionaryEncodingArray(queryDimensions); directDictionaryEncodingArray = CarbonUtil.getDirectDictionaryEncodingArray(queryDimensions); - implictColumnArray = CarbonUtil.getImplicitColumnArray(queryDimensions); + implicitColumnArray = CarbonUtil.getImplicitColumnArray(queryDimensions); complexDataTypeArray = CarbonUtil.getComplexDataTypeArray(queryDimensions); parentToChildColumnsMap.clear(); diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java index 430a5552fe3..2a712607bae 100644 --- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java +++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java @@ -60,7 +60,7 @@ public class DictionaryBasedVectorResultCollector extends AbstractScannedResultC ColumnVectorInfo[] allColumnInfo; - private ColumnVectorInfo[] implictColumnInfo; + private ColumnVectorInfo[] implicitColumnInfo; private boolean isDirectVectorFill; @@ -85,14 +85,14 @@ void prepareDimensionAndMeasureColumnVectors() { List dictInfoList = new ArrayList<>(); List noDictInfoList = new ArrayList<>(); List complexList = new ArrayList<>(); - List implictColumnList = new ArrayList<>(); + List implicitColumnList = new ArrayList<>(); for (int i = 0; i < queryDimensions.length; i++) { if (!dimensionInfo.getDimensionExists()[i]) { continue; } if (queryDimensions[i].getDimension().hasEncoding(Encoding.IMPLICIT)) { ColumnVectorInfo columnVectorInfo = new ColumnVectorInfo(); - implictColumnList.add(columnVectorInfo); + implicitColumnList.add(columnVectorInfo); columnVectorInfo.dimension = queryDimensions[i]; columnVectorInfo.ordinal = queryDimensions[i].getDimension().getOrdinal(); allColumnInfo[queryDimensions[i].getOrdinal()] = columnVectorInfo; @@ -145,7 +145,7 @@ void prepareDimensionAndMeasureColumnVectors() { dictionaryInfo = dictInfoList.toArray(new ColumnVectorInfo[dictInfoList.size()]); noDictionaryInfo = noDictInfoList.toArray(new ColumnVectorInfo[noDictInfoList.size()]); complexInfo = complexList.toArray(new ColumnVectorInfo[complexList.size()]); - implictColumnInfo = implictColumnList.toArray(new ColumnVectorInfo[implictColumnList.size()]); + implicitColumnInfo = implicitColumnList.toArray(new ColumnVectorInfo[implicitColumnList.size()]); Arrays.sort(dictionaryInfo); Arrays.sort(complexInfo); } @@ -194,7 +194,7 @@ void fillResultToColumnarBatch(BlockletScannedResult scannedResult, scannedResult.fillColumnarNoDictionaryBatch(noDictionaryInfo); scannedResult.fillColumnarMeasureBatch(measureColumnInfo, measureInfo.getMeasureOrdinals()); scannedResult.fillColumnarComplexBatch(complexInfo); - scannedResult.fillColumnarImplicitBatch(implictColumnInfo); + scannedResult.fillColumnarImplicitBatch(implicitColumnInfo); // it means fetched all data out of page so increment the page counter if (availableRows == requiredRows) { scannedResult.incrementPageCounter(); diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java index 706d57a0dd9..1caecad800d 100644 --- a/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java +++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonProperties.java @@ -84,7 +84,7 @@ public final class CarbonProperties { private static final CarbonProperties CARBONPROPERTIESINSTANCE = new CarbonProperties(); /** - * porpeties + * Properties */ private Properties carbonProperties; diff --git a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java index fc4704edb24..3fb54f0d255 100644 --- a/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java +++ b/core/src/main/java/org/apache/carbondata/core/util/CarbonUtil.java @@ -560,7 +560,7 @@ public static List convertToIntegerList(int[] array) { } /** - * From beeline if a delimeter is passed as \001, in code we get it as + * From beeline if a delimiter is passed as \001, in code we get it as * escaped string as \\001. So this method will unescape the slash again and * convert it back t0 \001 * diff --git a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/DataLoadingTestCase.scala b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/DataLoadingTestCase.scala index 24a5aa4c194..172cb64ded4 100644 --- a/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/DataLoadingTestCase.scala +++ b/integration/spark-common-cluster-test/src/test/scala/org/apache/carbondata/cluster/sdv/generated/DataLoadingTestCase.scala @@ -244,7 +244,7 @@ class DataLoadingTestCase extends QueryTest with BeforeAndAfterAll { } - //Show loads-->Delimeter_check + //Show loads-->Delimiter_check test("BadRecord_Dataload_021", Include) { sql( s"""CREATE TABLE bad_records_test5 (String_col string,integer_col int,decimal_column diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala index 6728cdf4596..e5f79e7ac49 100644 --- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala +++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/integration/spark/testsuite/complexType/TestComplexTypeQuery.scala @@ -47,8 +47,8 @@ class TestComplexTypeQuery extends QueryTest with BeforeAndAfterAll { sql("drop table if exists structusingstructHive") sql("drop table if exists structusingarraycarbon") sql("drop table if exists structusingarrayhive") - sql("drop table if exists complexcarbonwithspecialchardelimeter") - sql("drop table if exists complexhivewithspecialchardelimeter") + sql("drop table if exists complexcarbonwithspecialchardelimiter") + sql("drop table if exists complexhivewithspecialchardelimiter") sql( "create table complexcarbontable(deviceInformationId int, channelsId string, ROMSize " + "string, ROMName String, purchasedate string, mobile struct, MAC " + @@ -128,7 +128,7 @@ class TestComplexTypeQuery extends QueryTest with BeforeAndAfterAll { test( "Test ^ * special character data loading for complex types") { sql( - "create table complexcarbonwithspecialchardelimeter(deviceInformationId int, channelsId " + + "create table complexcarbonwithspecialchardelimiter(deviceInformationId int, channelsId " + "string, ROMSize string, ROMName String, purchasedate string, mobile struct, MAC array, locationinfo array>, gamePointId double,contractNumber double) STORED BY " + "'org.apache.carbondata.format'"); sql("LOAD DATA local inpath '" + resourcesPath + - "/complextypespecialchardelimiter.csv' INTO table complexcarbonwithspecialchardelimeter " + + "/complextypespecialchardelimiter.csv' INTO table complexcarbonwithspecialchardelimiter " + "OPTIONS('DELIMITER'=',', 'QUOTECHAR'='\"', 'FILEHEADER'='deviceInformationId,channelsId," + "ROMSize,ROMName,purchasedate,mobile,MAC,locationinfo,proddate,gamePointId," + "contractNumber', 'COMPLEX_DELIMITER_LEVEL_1'='^', 'COMPLEX_DELIMITER_LEVEL_2'='*')"); sql( - "create table complexhivewithspecialchardelimeter(deviceInformationId int, channelsId " + + "create table complexhivewithspecialchardelimiter(deviceInformationId int, channelsId " + "string, ROMSize string, ROMName String, purchasedate string, mobile struct, MAC array, locationinfo array val reference = AttributeReference(name, StringType, true)().withExprId(a.exprId) newProjectList :+= reference - implictsExisted = true + implicitExisted = true reference case a@Alias(s: ScalaUDF, name) if name.equalsIgnoreCase(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_SEGMENTID) => - implictsExisted = true + implicitExisted = true val reference = AttributeReference(CarbonCommonConstants.CARBON_IMPLICIT_COLUMN_TUPLEID, StringType, true)().withExprId(a.exprId) @@ -393,7 +393,7 @@ private[sql] class CarbonLateDecodeStrategy extends SparkStrategy { getRequestedColumns(relation, projectsAttr, filterSet, handledSet, newProjectList) var updateRequestedColumns = - if (!vectorPushRowFilters && !implictsExisted && !hasDictionaryFilterCols + if (!vectorPushRowFilters && !implicitExisted && !hasDictionaryFilterCols && !hasMoreDictionaryCols) { updateRequestedColumnsFunc( (projectSet ++ filterSet).map(relation.attributeMap).toSeq, @@ -406,7 +406,7 @@ private[sql] class CarbonLateDecodeStrategy extends SparkStrategy { supportBatchedDataSource(relation.relation.sqlContext, updateRequestedColumns.asInstanceOf[Seq[Attribute]]) && needDecoder.isEmpty - if (!vectorPushRowFilters && !supportBatch && !implictsExisted && !hasDictionaryFilterCols + if (!vectorPushRowFilters && !supportBatch && !implicitExisted && !hasDictionaryFilterCols && !hasMoreDictionaryCols) { // revert for row scan updateRequestedColumns = updateRequestedColumnsFunc(requestedColumns, table, needDecoder) @@ -423,7 +423,7 @@ private[sql] class CarbonLateDecodeStrategy extends SparkStrategy { updateRequestedColumns.asInstanceOf[Seq[Attribute]]) // Check whether spark should handle row filters in case of vector flow. if (!vectorPushRowFilters && scan.isInstanceOf[CarbonDataSourceScan] - && !implictsExisted && !hasDictionaryFilterCols && !hasMoreDictionaryCols) { + && !implicitExisted && !hasDictionaryFilterCols && !hasMoreDictionaryCols) { // Here carbon only do page pruning and row level pruning will be done by spark. scan.inputRDDs().head match { case rdd: CarbonScanRDD[InternalRow] => diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java b/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java index d02348d6121..51a7b3ae9ad 100644 --- a/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java +++ b/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java @@ -161,11 +161,11 @@ public void build( String global_sort_partitions = optionsFinal.get("global_sort_partitions"); String timestampformat = optionsFinal.get("timestampformat"); String dateFormat = optionsFinal.get("dateformat"); - String delimeter = optionsFinal.get("delimiter"); - String complex_delimeter_level1 = optionsFinal.get("complex_delimiter_level_1"); - String complex_delimeter_level2 = optionsFinal.get("complex_delimiter_level_2"); - String complex_delimeter_level3 = optionsFinal.get("complex_delimiter_level_3"); - String complex_delimeter_level4 = optionsFinal.get("complex_delimiter_level_4"); + String delimiter = optionsFinal.get("delimiter"); + String complex_delimiter_level1 = optionsFinal.get("complex_delimiter_level_1"); + String complex_delimiter_level2 = optionsFinal.get("complex_delimiter_level_2"); + String complex_delimiter_level3 = optionsFinal.get("complex_delimiter_level_3"); + String complex_delimiter_level4 = optionsFinal.get("complex_delimiter_level_4"); String all_dictionary_path = optionsFinal.get("all_dictionary_path"); String column_dict = optionsFinal.get("columndict"); validateDateTimeFormat(timestampformat, "TimestampFormat"); @@ -257,20 +257,20 @@ public void build( carbonLoadModel.setGlobalSortPartitions(global_sort_partitions); carbonLoadModel.setUseOnePass(Boolean.parseBoolean(single_pass)); - if (delimeter.equalsIgnoreCase(complex_delimeter_level1) || - complex_delimeter_level1.equalsIgnoreCase(complex_delimeter_level2) || - delimeter.equalsIgnoreCase(complex_delimeter_level2) || - delimeter.equalsIgnoreCase(complex_delimeter_level3)) { + if (delimiter.equalsIgnoreCase(complex_delimiter_level1) || + complex_delimiter_level1.equalsIgnoreCase(complex_delimiter_level2) || + delimiter.equalsIgnoreCase(complex_delimiter_level2) || + delimiter.equalsIgnoreCase(complex_delimiter_level3)) { throw new InvalidLoadOptionException("Field Delimiter and Complex types delimiter are same"); } else { - carbonLoadModel.setComplexDelimiter(complex_delimeter_level1); - carbonLoadModel.setComplexDelimiter(complex_delimeter_level2); - carbonLoadModel.setComplexDelimiter(complex_delimeter_level3); - carbonLoadModel.setComplexDelimiter(complex_delimeter_level4); + carbonLoadModel.setComplexDelimiter(complex_delimiter_level1); + carbonLoadModel.setComplexDelimiter(complex_delimiter_level2); + carbonLoadModel.setComplexDelimiter(complex_delimiter_level3); + carbonLoadModel.setComplexDelimiter(complex_delimiter_level4); } // set local dictionary path, and dictionary file extension carbonLoadModel.setAllDictPath(all_dictionary_path); - carbonLoadModel.setCsvDelimiter(CarbonUtil.unescapeChar(delimeter)); + carbonLoadModel.setCsvDelimiter(CarbonUtil.unescapeChar(delimiter)); carbonLoadModel.setCsvHeader(fileHeader); carbonLoadModel.setColDictFilePath(column_dict); From 2fbf3e6dc1bc35e4d6ba3ce321de6d1ecb5ac2b1 Mon Sep 17 00:00:00 2001 From: tisonkong <360548494@qq.com> Date: Sat, 29 Dec 2018 21:57:43 +0800 Subject: [PATCH 3/3] [CARBONDATA-3126] fixed the over-100 characters error. --- .../collector/impl/DictionaryBasedVectorResultCollector.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java index 2a712607bae..727e9692483 100644 --- a/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java +++ b/core/src/main/java/org/apache/carbondata/core/scan/collector/impl/DictionaryBasedVectorResultCollector.java @@ -145,7 +145,8 @@ void prepareDimensionAndMeasureColumnVectors() { dictionaryInfo = dictInfoList.toArray(new ColumnVectorInfo[dictInfoList.size()]); noDictionaryInfo = noDictInfoList.toArray(new ColumnVectorInfo[noDictInfoList.size()]); complexInfo = complexList.toArray(new ColumnVectorInfo[complexList.size()]); - implicitColumnInfo = implicitColumnList.toArray(new ColumnVectorInfo[implicitColumnList.size()]); + implicitColumnInfo = implicitColumnList.toArray( + new ColumnVectorInfo[implicitColumnList.size()]); Arrays.sort(dictionaryInfo); Arrays.sort(complexInfo); }