From 3c335979b07a203213e6f66cfad122bdf88ce3b1 Mon Sep 17 00:00:00 2001 From: dhatchayani Date: Fri, 24 Nov 2017 11:26:10 +0530 Subject: [PATCH] [CARBONDATA-1803] Changing format of Show segments Changing the show segments format for backward compatibility. (1) File Format is a new option previously added in between, so moved the same to last (2) Segment Id is again changed back to SegmentSequenceId This closes #1558 --- .../main/scala/org/apache/carbondata/api/CarbonStore.scala | 4 ++-- .../apache/carbondata/spark/util/GlobalDictionaryUtil.scala | 2 +- .../org/apache/spark/sql/CarbonCatalystOperators.scala | 6 +++--- .../spark/testsuite/segmentreading/TestSegmentReading.scala | 2 +- .../spark/carbondata/TestStreamingTableOperation.scala | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala index 73325a6b5d4..44cbb500beb 100644 --- a/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala +++ b/integration/spark-common/src/main/scala/org/apache/carbondata/api/CarbonStore.scala @@ -92,8 +92,8 @@ object CarbonStore { load.getSegmentStatus.getMessage, startTime, endTime, - load.getFileFormat.toString, - mergedTo) + mergedTo, + load.getFileFormat.toString) }.toSeq } else { Seq.empty diff --git a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala index d8038d5b49c..f6170e84d4b 100644 --- a/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala +++ b/integration/spark-common/src/main/scala/org/apache/carbondata/spark/util/GlobalDictionaryUtil.scala @@ -374,7 +374,7 @@ object GlobalDictionaryUtil { classOf[CSVInputFormat], classOf[NullWritable], classOf[StringArrayWritable], - hadoopConf).setName("global dictionary").map[Row] { currentRow => + jobConf).setName("global dictionary").map[Row] { currentRow => row.setValues(currentRow._2.get()) } sqlContext.createDataFrame(rdd, schema) diff --git a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala index ee285e7814a..e02df9af980 100644 --- a/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala +++ b/integration/spark2/src/main/scala/org/apache/spark/sql/CarbonCatalystOperators.scala @@ -122,12 +122,12 @@ case class ShowLoadsCommand( extends Command { override def output: Seq[Attribute] = { - Seq(AttributeReference("Segment Id", StringType, nullable = false)(), + Seq(AttributeReference("SegmentSequenceId", StringType, nullable = false)(), AttributeReference("Status", StringType, nullable = false)(), AttributeReference("Load Start Time", TimestampType, nullable = false)(), AttributeReference("Load End Time", TimestampType, nullable = true)(), - AttributeReference("File Format", StringType, nullable = false)(), - AttributeReference("Merged To", StringType, nullable = false)()) + AttributeReference("Merged To", StringType, nullable = false)(), + AttributeReference("File Format", StringType, nullable = false)()) } } diff --git a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/segmentreading/TestSegmentReading.scala b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/segmentreading/TestSegmentReading.scala index 9e12f43b90f..19201e35f84 100644 --- a/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/segmentreading/TestSegmentReading.scala +++ b/integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/segmentreading/TestSegmentReading.scala @@ -248,7 +248,7 @@ class TestSegmentReading extends QueryTest with BeforeAndAfterAll { |('DELIMITER'= ',', 'QUOTECHAR'= '\"')""".stripMargin) val df = sql("SHOW SEGMENTS for table carbon_table_show_seg") val col = df.collect().map{ - row => Row(row.getString(0),row.getString(1),row.getString(5)) + row => Row(row.getString(0),row.getString(1),row.getString(4)) }.toSeq assert(col.equals(Seq(Row("2","Success","NA"), Row("1","Compacted","0.1"), diff --git a/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOperation.scala b/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOperation.scala index 7cbec0499f2..d9591c4ba68 100644 --- a/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOperation.scala +++ b/integration/spark2/src/test/scala/org/apache/spark/carbondata/TestStreamingTableOperation.scala @@ -570,7 +570,7 @@ class TestStreamingTableOperation extends QueryTest with BeforeAndAfterAll { result.foreach { row => if (row.getString(0).equals("1")) { assertResult(SegmentStatus.STREAMING.getMessage)(row.getString(1)) - assertResult(FileFormat.ROW_V1.toString)(row.getString(4)) + assertResult(FileFormat.ROW_V1.toString)(row.getString(5)) } } }