This repository has been archived by the owner on Mar 30, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 92
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Harish Butani
committed
Jul 28, 2016
1 parent
9c293d4
commit d592ee6
Showing
2 changed files
with
83 additions
and
44 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,65 +1,96 @@ | ||
name := "spark-druid-olap" | ||
|
||
version := "0.1.0" | ||
scalaVersion := "2.10.5" | ||
|
||
organization := "SparklineData" | ||
crossScalaVersions := Seq("2.10.5", "2.11.6") | ||
|
||
scalaVersion := "2.10.4" | ||
parallelExecution in Test := false | ||
|
||
javaOptions += " -Xms1g -Xmx2g -Duser.timezone=UTC " | ||
val nscalaVersion = "1.6.0" | ||
val scalatestVersion = "2.2.4" | ||
val httpclientVersion = "4.5" | ||
val json4sVersion = "3.2.10" | ||
val sparkdateTimeVersion = "0.0.2" | ||
val scoptVersion = "3.3.0" | ||
val sparkVersion = "1.6.0" | ||
|
||
val coreDependencies = Seq( | ||
"com.github.nscala-time" %% "nscala-time" % nscalaVersion, | ||
"org.apache.spark" %% "spark-core" % sparkVersion % "provided", | ||
"org.apache.spark" %% "spark-sql" % sparkVersion % "provided", | ||
"org.apache.spark" %% "spark-hive" % sparkVersion % "provided", | ||
"org.apache.spark" %% "spark-hive-thriftserver" % sparkVersion % "provided", | ||
"org.apache.httpcomponents" % "httpclient" % httpclientVersion, | ||
//"org.json4s" %% "json4s-native" % json4sVersion, | ||
"org.json4s" %% "json4s-ext" % json4sVersion, | ||
"com.sparklinedata" %% "spark-datetime" % sparkdateTimeVersion, | ||
"com.github.scopt" %% "scopt" % scoptVersion, | ||
"org.scalatest" %% "scalatest" % scalatestVersion % "test" | ||
) | ||
|
||
parallelExecution in Test := false | ||
val coreTestDependencies = Seq( | ||
"org.scalatest" %% "scalatest" % scalatestVersion % "test", | ||
"com.databricks" %% "spark-csv" % "1.1.0" % "test" | ||
) | ||
|
||
crossScalaVersions := Seq("2.10.4", "2.11.6") | ||
lazy val commonSettings = Seq( | ||
organization := "com.sparklinedata", | ||
|
||
sparkVersion := "1.6.0" | ||
version := "0.1.0", | ||
|
||
spName := "SparklineData/spark-druid-olap" | ||
javaOptions := Seq("-Xms1g", "-Xmx2g", "-Duser.timezone=UTC", "-XX:MaxPermSize=256M"), | ||
|
||
//spAppendScalaVersion := true | ||
// Target Java 7 | ||
scalacOptions += "-target:jvm-1.7", | ||
javacOptions in compile ++= Seq("-source", "1.7", "-target", "1.7"), | ||
|
||
scalacOptions += "-feature" | ||
scalacOptions := Seq("-feature", "-deprecation"), | ||
|
||
licenses := Seq("Apache License, Version 2.0" -> url("http://www.apache.org/licenses/LICENSE-2.0")), | ||
|
||
// All Spark Packages need a license | ||
licenses := Seq("Apache-2.0" -> url("http://opensource.org/licenses/Apache-2.0")) | ||
homepage := Some(url("https://github.com/SparklineData/spark-datetime")), | ||
|
||
publishMavenStyle := true, | ||
|
||
// Add Spark components this package depends on, e.g, "mllib", .... | ||
sparkComponents ++= Seq("sql", "hive", "hive-thriftserver") | ||
publishTo := Some("releases" at "https://oss.sonatype.org/service/local/staging/deploy/maven2/"), | ||
|
||
credentials += Credentials(Path.userHome / ".github.cred") | ||
publishArtifact in Test := false, | ||
|
||
// uncomment and change the value below to change the directory where your zip artifact will be created | ||
// spDistDirectory := target.value | ||
pomIncludeRepository := { _ => false }, | ||
|
||
// add any Spark Package dependencies using spDependencies. | ||
// e.g. spDependencies += "databricks/spark-avro:0.1" | ||
test in assembly := {}, | ||
|
||
val httpclientVersion = "4.5" | ||
val json4sVersion = "3.2.10" | ||
val scalatestVersion = "2.2.4" | ||
val sparkdateTimeVersion = "0.0.2" | ||
val scoptVersion = "3.3.0" | ||
useGpg := true, | ||
|
||
libraryDependencies ++= Seq( | ||
"org.apache.httpcomponents" % "httpclient" % httpclientVersion, | ||
//"org.json4s" %% "json4s-native" % json4sVersion, | ||
"org.json4s" %% "json4s-ext" % json4sVersion, | ||
"com.sparklinedata" %% "spark-datetime" % sparkdateTimeVersion, | ||
"com.github.scopt" %% "scopt" % scoptVersion, | ||
"org.scalatest" %% "scalatest" % scalatestVersion % "test", | ||
"com.databricks" %% "spark-csv" % "1.1.0" % "test" | ||
) | ||
usePgpKeyHex("C922EB45"), | ||
|
||
pomExtra := ( | ||
<scm> | ||
<url>https://github.com/SparklineData/spark-datetime.git</url> | ||
<connection>scm:git:git@github.com:SparklineData/spark-datetime.git</connection> | ||
</scm> | ||
<developers> | ||
<developer> | ||
<name>Harish Butani</name> | ||
<organization>SparklineData</organization> | ||
<organizationUrl>http://sparklinedata.com/</organizationUrl> | ||
</developer> | ||
</developers>), | ||
|
||
assemblyOption in assembly := (assemblyOption in assembly).value.copy(includeScala = false) | ||
fork in Test := true | ||
) ++ releaseSettings ++ Seq( | ||
ReleaseKeys.publishArtifactsAction := PgpKeys.publishSigned.value | ||
) | ||
|
||
test in assembly := {} | ||
|
||
spShortDescription := "Spark Druid Package" // Your one line description of your package | ||
lazy val root = project.in(file(".")) | ||
.settings(commonSettings: _*) | ||
.settings(name := "accelerator") | ||
.settings(libraryDependencies ++= (coreDependencies ++ coreTestDependencies)) | ||
.settings(assemblyOption in assembly := (assemblyOption in assembly).value.copy(includeScala = false)) | ||
.settings( | ||
artifact in (Compile, assembly) ~= { art => | ||
art.copy(`classifier` = Some("assembly")) | ||
} | ||
) | ||
.settings(addArtifact(artifact in (Compile, assembly), assembly).settings: _*) | ||
|
||
spDescription := """Spark-Druid package enables'Logical Plans' written against a raw event dataset | ||
to be rewritten to take advantage of a Drud Index of the Event data. It | ||
comprises of a 'Druid DataSource' that wraps the 'raw event dataset', and a | ||
'Druid Planner' that contains a set of Rewrite Rules to convert | ||
'Project-Filter-Aggregation-Having-Sort-Limit' plans to Druid Index Rest calls.""".stripMargin |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters