Skip to content
Browse files

Add config files, "re-start" so jobserver can start easily in local env

  • Loading branch information...
1 parent a52a895 commit c39e403071227be5277ea4f2a01c8415fd434762 Evan Chan committed Nov 30, 2013
View
20 jobserver/README.md
@@ -1,5 +1,17 @@
spark-job-server provides a RESTful interface for submitting and managing Spark jobs, jars, and job contexts.
+## Quick start / development mode
+
+From SBT shell, simply type "re-start". This uses a default configuration file. An optional argument is a
+path to an alternative config file. You can also specify JVM parameters after "---". Including all the
+options looks like this:
+
+ re-start /path/to/my.conf --- -Xmx8g
+
+Note that re-start (SBT Revolver) forks the job server in a separate process. If you make a code change, simply
+type re-start again at the SBT shell prompt, it will compile your changes and restart the jobserver. It enables
+very fast turnaround cycles.
+
## Features
- *"Spark as a Service"*: Simple REST interface for all aspects of job, context management
@@ -81,14 +93,6 @@ serialized properly:
If we encounter a data type that is not supported, then the entire result will be serialized to a string.
-## Running a local job server
-
-From SBT shell, simply type "re-start". This uses a default configuration file. An optional argument is a
-path to an alternative config file. You can also specify JVM parameters after "---". Including all the
-options looks like this:
-
- re-start /path/to/my.conf --- -Xmx8g
-
## Development hints
- Please run scalastyle to ensure your code changes don't break the style guide
View
20 jobserver/config/logback-local.xml
@@ -0,0 +1,20 @@
+<configuration debug="true">
+
+ <logger name="spark.jobserver" level="DEBUG" />
+
+ <appender name="FILE" class="ch.qos.logback.core.FileAppender">
+ <file>spark-jobserver.log</file>
+
+ <!-- truncate the logfile every time we start the app -->
+ <append>false</append>
+
+ <encoder>
+ <pattern>[%date{ISO8601}] %-5level %logger{26} [%X{jobId}] [%X{akkaSource}] - %msg%n</pattern>
+ </encoder>
+
+ </appender>
+
+ <root level="info">
+ <appender-ref ref="FILE" />
+ </root>
+</configuration>
View
31 jobserver/config/logback-server.xml
@@ -0,0 +1,31 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Logging config for server deploys, with automatic date based log rollover -->
+
+<configuration>
+ <!-- Enable JMX-based changing of log levels, reloading of config, etc. -->
+ <jmxConfigurator />
+
+ <appender name="file" class="ch.qos.logback.core.rolling.RollingFileAppender">
+ <!--See http://logback.qos.ch/manual/appenders.html#RollingFileAppender-->
+ <!--and http://logback.qos.ch/manual/appenders.html#TimeBasedRollingPolicy-->
+ <!--for further documentation-->
+
+ <!-- This is where the current log file is written -->
+ <file>${LOG_DIR}/spark-job-server.log</file>
+
+ <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
+ <!-- daily rollover -->
+ <fileNamePattern>${LOG_DIR}/spark-job-server.%d{yyyy-MM-dd}.log</fileNamePattern>
+
+ <!-- keep 30 days' worth of history -->
+ <maxHistory>30</maxHistory>
+ </rollingPolicy>
+
+ <encoder>
+ <pattern>[%date{ISO8601}] %-5level %logger{20} [%X{jobId}] [%X{akkaSource}] - %msg%n</pattern>
+ </encoder>
+ </appender>
+ <root level="info">
+ <appender-ref ref="file"/>
+ </root>
+</configuration>
View
56 jobserver/src/main/resources/application.conf
@@ -0,0 +1,56 @@
+# Settings for safe local mode development of job server
+# These are used as defaults when you do "re-start" within sbt
+spark {
+ master = "local[4]"
+
+ jobserver {
+ port = 8090
+
+ # Number of job results to keep per JobResultActor/context
+ job-result-cache-size = 5000
+
+ jobdao = spark.jobserver.io.JobFileDAO
+
+ filedao {
+ rootdir = /tmp/spark-jobserver/filedao/data
+ }
+
+ # Time out for job server to wait while creating contexts
+ context-creation-timeout = 15 s
+ }
+
+ # predefined Spark contexts
+ # Below is an example, but do not uncomment it. Everything defined here is carried over to
+ # deploy-time configs, so they will be created in all environments. :(
+ contexts {
+ # olap-demo {
+ # num-cpu-cores = 4 # Number of cores to allocate. Required.
+ # memory-per-node = 1024m # Executor memory per node, -Xmx style eg 512m, 1G, etc.
+ # }
+ # define additional contexts here
+ }
+
+ # Default settings for ad hoc as well as manually created contexts
+ context-settings {
+ num-cpu-cores = 4 # Number of cores to allocate. Required.
+ memory-per-node = 512m # Executor memory per node, -Xmx style eg 512m, 1G, etc.
+ # dependent-jars = ["rookery-spark"] # appName for dependent jars to load for entire context
+ # max-jobs-per-context = 4 # Max # of jobs to run at the same time
+ # coarse-mesos-mode = true # per-context, rather than per-job, resource allocation
+ # rdd-ttl = 24 h # time-to-live for RDDs in a SparkContext. Don't specify = forever
+ }
+}
+
+akka {
+ # Use SLF4J/logback for deployed environment logging
+ event-handlers = ["akka.event.slf4j.Slf4jEventHandler"]
+}
+
+# check the reference.conf in spray-can/src/main/resources for all defined settings
+spray.can.server {
+ # uncomment the next line for making this an HTTPS example
+ # ssl-encryption = on
+ idle-timeout = 5 s
+ request-timeout = 15 s
+ pipelining-limit = 2 # for maximum performance (prevents StopReading / ResumeReading messages to the IOBridge)
+}
View
3 project/SparkBuild.scala
@@ -319,12 +319,13 @@ object SparkBuild extends Build {
)
)
- def jobServerSettings = sharedSettings ++ Seq(
+ def jobServerSettings = sharedSettings ++ Revolver.settings ++ Seq(
name := "spark-job-server",
scalacOptions += "-Ydependent-method-types", // Needed for Spray with Scala 2.9
resolvers += "spray repo" at "http://repo.spray.io",
libraryDependencies ++= Seq(
"ch.qos.logback" % "logback-classic" % "1.0.7",
+ "org.slf4j" % "log4j-over-slf4j" % "1.6.6", // akka-app uses logback, so no log4j
"com.typesafe" % "config" % "1.0.0",
"org.joda" % "joda-convert" % "1.2",
"joda-time" % "joda-time" % "2.1",

0 comments on commit c39e403

Please sign in to comment.
Something went wrong with that request. Please try again.