diff --git a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala index eaf5e4b0ad3..8107cd5e1c8 100644 --- a/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala +++ b/integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/standardpartition/StandardPartitionTableQueryTestCase.scala @@ -437,6 +437,20 @@ test("Creation of partition table should fail if the colname in table schema and sql("drop datamap if exists preaggTable on table partitionTable") } + test("validate data in partition table after dropping and adding a column") { + sql("drop table if exists par") + sql("create table par(name string) partitioned by (age double) stored by " + + "'carbondata'") + sql(s"load data local inpath '$resourcesPath/uniqwithoutheader.csv' into table par options" + + s"('header'='false')") + sql("alter table par drop columns(name)") + sql("alter table par add columns(name string)") + sql(s"load data local inpath '$resourcesPath/uniqwithoutheader.csv' into table par options" + + s"('header'='false')") + checkAnswer(sql("select name from par"), Seq(Row("a"),Row("b"), Row(null), Row(null))) + sql("drop table if exists par") + } + private def verifyPartitionInfo(frame: DataFrame, partitionNames: Seq[String]) = { val plan = frame.queryExecution.sparkPlan @@ -473,6 +487,7 @@ test("Creation of partition table should fail if the colname in table schema and sql("drop table if exists staticpartitionlocloadother") sql("drop table if exists staticpartitionextlocload_new") sql("drop table if exists staticpartitionlocloadother_new") + sql("drop table if exists par") } } diff --git a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala index f5149e811dd..745eee77316 100644 --- a/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala +++ b/integration/spark-common/src/main/scala/org/apache/spark/sql/execution/command/carbonTableSchemaCommon.scala @@ -302,6 +302,11 @@ class AlterTableColumnSchemaGenerator( allColumns = CarbonScalaUtil.reArrangeColumnSchema(allColumns) + if (tableInfo.getFactTable.getPartitionInfo != null) { + val par = tableInfo.getFactTable.getPartitionInfo.getColumnSchemaList + allColumns = allColumns.filterNot(b => par.contains(b)) ++= par.asScala + } + def getLocalDictColumnList(tableProperties: scala.collection.mutable.Map[String, String], columns: scala.collection.mutable.ListBuffer[ColumnSchema]): (scala.collection.mutable .ListBuffer[ColumnSchema], scala.collection.mutable.ListBuffer[ColumnSchema]) = { diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala index 0c3b7dca849..ad3eb72600b 100644 --- a/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala +++ b/integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonRelation.scala @@ -98,8 +98,15 @@ case class CarbonRelation( override val output = { val columns = carbonTable.getCreateOrderColumn(carbonTable.getTableName) .asScala + val partitionColumnSchemas = if (carbonTable.getPartitionInfo() != null) { + carbonTable.getPartitionInfo.getColumnSchemaList.asScala + } else { + Nil + } + val otherColumns = columns.filterNot(a => partitionColumnSchemas.contains(a.getColumnSchema)) + val partitionColumns = columns.filter(a => partitionColumnSchemas.contains(a.getColumnSchema)) // convert each column to Attribute - columns.filter(!_.isInvisible).map { column: CarbonColumn => + (otherColumns ++= partitionColumns).filter(!_.isInvisible).map { column: CarbonColumn => if (column.isDimension()) { val output: DataType = column.getDataType.getName.toLowerCase match { case "array" => diff --git a/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java b/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java index f69f0afa7c7..4a293044957 100644 --- a/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java +++ b/processing/src/main/java/org/apache/carbondata/processing/loading/model/CarbonLoadModelBuilder.java @@ -206,11 +206,18 @@ public void build( } else { if (StringUtils.isEmpty(fileHeader)) { List columns = table.getCreateOrderColumn(table.getTableName()); - String[] columnNames = new String[columns.size()]; - for (int i = 0; i < columnNames.length; i++) { - columnNames[i] = columns.get(i).getColName(); + List columnNames = new ArrayList<>(); + List partitionColumns = new ArrayList<>(); + for (int i = 0; i < columns.size(); i++) { + if (table.getPartitionInfo() != null && table.getPartitionInfo().getColumnSchemaList() + .contains(columns.get(i).getColumnSchema())) { + partitionColumns.add(columns.get(i).getColName()); + } else { + columnNames.add(columns.get(i).getColName()); + } } - fileHeader = Strings.mkString(columnNames, ","); + columnNames.addAll(partitionColumns); + fileHeader = Strings.mkString(columnNames.toArray(new String[columnNames.size()]), ","); } } }