Skip to content

Commit

Permalink
Fixes styling issue and reverts unnecessary changes
Browse files Browse the repository at this point in the history
  • Loading branch information
liancheng committed Jul 6, 2015
1 parent 3581497 commit 9b87903
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,11 @@ import java.util.{Calendar, TimeZone}
* precision.
*/
object DateTimeUtils {
final val MILLIS_PER_DAY = SECONDS_PER_DAY * 1000L

// see http://stackoverflow.com/questions/466321/convert-unix-timestamp-to-julian
final val JULIAN_DAY_OF_EPOCH = 2440587 // and .5
final val SECONDS_PER_DAY = 60 * 60 * 24L
final val MILLIS_PER_DAY = SECONDS_PER_DAY * 1000L
final val HUNDRED_NANOS_PER_SECOND = 1000L * 1000L * 10L
final val NANOS_PER_SECOND = HUNDRED_NANOS_PER_SECOND * 100

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,7 @@ class ParquetIOSuiteBase extends QueryTest with ParquetTest {
* Writes `data` to a Parquet file, reads it back and check file contents.
*/
protected def checkParquetFile[T <: Product : ClassTag: TypeTag](data: Seq[T]): Unit = {
withParquetDataFrame(data) { r =>
r.foreach(println)
checkAnswer(r, data.map(Row.fromTuple))
}
withParquetDataFrame(data)(r => checkAnswer(r, data.map(Row.fromTuple)))
}

test("basic data types (without binary)") {
Expand Down Expand Up @@ -151,7 +148,7 @@ class ParquetIOSuiteBase extends QueryTest with ParquetTest {
}

test("map") {
val data = (1 to 4).map(i => Tuple1(Map(i -> (i + 1), (i + 1) -> (i + 2))))
val data = (1 to 4).map(i => Tuple1(Map(i -> s"val_$i")))
checkParquetFile(data)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import java.io.File

import org.scalatest.BeforeAndAfterAll

import org.apache.spark.sql._
import org.apache.spark.sql.execution.{ExecutedCommand, PhysicalRDD}
import org.apache.spark.sql.hive.execution.HiveTableScan
import org.apache.spark.sql.hive.test.TestHive
Expand All @@ -30,7 +31,6 @@ import org.apache.spark.sql.parquet.{ParquetRelation2, ParquetTableScan}
import org.apache.spark.sql.sources.{InsertIntoDataSource, InsertIntoHadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, QueryTest, Row, SQLConf, SaveMode}
import org.apache.spark.util.Utils

// The data where the partitioning key exists only in the directory structure.
Expand Down Expand Up @@ -790,7 +790,7 @@ class ParquetDataSourceOffSourceSuite extends ParquetSourceSuiteBase {
* A collection of tests for parquet data with various forms of partitioning.
*/
abstract class ParquetPartitioningTest extends QueryTest with SQLTestUtils with BeforeAndAfterAll {
override def sqlContext = TestHive
override def sqlContext: SQLContext = TestHive

var partitionedTableDir: File = null
var normalTableDir: File = null
Expand Down

0 comments on commit 9b87903

Please sign in to comment.