Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Merge branch 'master' of github.com:twitter/zipkin into b3slow

  • Loading branch information...
commit 75a8da929842900111a9277c264df27984a65dd6 2 parents 8d39b63 + f417c9a
@johanoskarsson johanoskarsson authored
Showing with 142 additions and 93 deletions.
  1. +1 −1  README.md
  2. +15 −5 bin/git-pull-request.rb
  3. +2 −2 zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/DependencyTree.scala
  4. +1 −1  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/ExpensiveEndpoints.scala
  5. +1 −1  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/MemcacheRequest.scala
  6. +0 −42 zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/MostCommonCalls.scala
  7. +1 −1  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/PopularAnnotations.scala
  8. +1 −1  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/PopularKeys.scala
  9. +1 −1  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/ServerResponsetime.scala
  10. +1 −1  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/Timeouts.scala
  11. +73 −0 zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/WhaleReport.scala
  12. +1 −1  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/WorstRuntimes.scala
  13. +0 −1  zipkin-hadoop/src/scripts/run.sh
  14. +1 −1  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/DependencyTreeSpec.scala
  15. +1 −1  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/ExpensiveEndpointsSpec.scala
  16. +1 −1  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/MemcacheRequestSpec.scala
  17. +1 −1  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/PopularAnnotationsSpec.scala
  18. +1 −1  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/PopularKeysSpec.scala
  19. +1 −2  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/ServerResponsetimeSpec.scala
  20. +1 −1  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/TimeoutsSpec.scala
  21. +2 −2 zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/UtilSpec.scala
  22. +34 −24 ...hadoop/src/test/scala/com/twitter/zipkin/hadoop/{CommonServiceCallsSpec.scala → WhaleReportSpec.scala}
  23. +1 −1  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/WorstRuntimesSpec.scala
View
2  README.md
@@ -89,7 +89,7 @@ Once the trace data arrives at the Zipkin collector daemon we check that it's va
We settled on Cassandra for storage. It's scalable, has a flexible schema and is heavily used within Twitter. We did try to make this component pluggable though, so should not be hard to put in something else here.
### Zipkin query daemon
-Once the data is stored and indexed we need a way to extract it. This is where the query daemon comes in, providing the users with a simple Thrift api for finding and retrieving traces. See <a href="https://github.com/twitter/zipkin/blob/master/zipkin-thrift/src/main/thrift/zipkin.thrift">the Thrift file</a>.
+Once the data is stored and indexed we need a way to extract it. This is where the query daemon comes in, providing the users with a simple Thrift api for finding and retrieving traces. See <a href="https://github.com/twitter/zipkin/blob/master/zipkin-thrift/src/main/thrift/zipkinQuery.thrift">the Thrift file</a>.
### UI
Most of our users access the data via our UI. It's a Rails app that uses <a href="http://d3js.org/">D3</a> to visualize the trace data. Note that there is no built in authentication in the UI.
View
20 bin/git-pull-request.rb
@@ -190,8 +190,16 @@ def merge(number)
puts " into " + color(base['ref'] + ": " + base['sha'], :green)
with_temporary_branch(base) do |tmp|
- puts "Merging head to temporary branch"
- Git.run("merge --squash #{head['sha']}")
+ is_fork = head['repo']['fork']
+
+ if is_fork:
+ remote = head['repo']['git_url']
+ ref = head['ref']
+ Git.run("pull --squash #{remote} #{ref}")
+ else
+ puts "Merging head to temporary branch"
+ Git.run("merge --squash #{head['sha']}")
+ end
commit_msg = merge_commit_msg(pull_request, issue)
@@ -205,9 +213,11 @@ def merge(number)
puts "Pushing to origin"
Git.run("push origin master")
- puts "Deleting local and remote branches"
- Git.run("push origin :#{head['ref']}")
- Git.run("branch -D #{head['ref']}")
+ if !is_fork:
+ puts "Deleting local and remote branches"
+ Git.run("push origin :#{head['ref']}")
+ Git.run("branch -D #{head['ref']}")
+ end
end
end
View
4 zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/DependencyTree.scala
@@ -19,14 +19,13 @@ package com.twitter.zipkin.hadoop
import com.twitter.scalding._
import cascading.pipe.joiner._
import com.twitter.zipkin.gen.{SpanServiceName, BinaryAnnotation, Span, Annotation}
-import sources.{PrepTsvSource, PreprocessedSpanSourceTest, PreprocessedSpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{PrepTsvSource, PreprocessedSpanSourceTest, PreprocessedSpanSource, Util}
/**
* Find out how often services call each other throughout the entire system
*/
class DependencyTree(args: Args) extends Job(args) with DefaultDateRangeJob {
-
val spanInfo = PreprocessedSpanSource()
.read
.filter(0) { s : SpanServiceName => s.isSetParent_id() }
@@ -40,5 +39,6 @@ class DependencyTree(args: Args) extends Job(args) with DefaultDateRangeJob {
val spanInfoWithParent = spanInfo
.joinWithSmaller('parent_id -> 'id_1, idName, joiner = new LeftJoin)
.groupBy('service, 'name_1){ _.size('count) }
+ .groupBy('service){ _.sortBy('count) }
.write(Tsv(args("output")))
}
View
2  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/ExpensiveEndpoints.scala
@@ -19,7 +19,7 @@ package com.twitter.zipkin.hadoop
import com.twitter.zipkin.gen.{Constants, SpanServiceName, Annotation}
import cascading.pipe.joiner.LeftJoin
import com.twitter.scalding.{Tsv, DefaultDateRangeJob, Job, Args}
-import sources.{PrepTsvSource, Util, PreprocessedSpanSource}
+import com.twitter.zipkin.hadoop.sources.{PrepTsvSource, Util, PreprocessedSpanSource}
/**
* Per service call (i.e. pair of services), finds the average run time (in microseconds) of that service call
View
2  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/MemcacheRequest.scala
@@ -20,7 +20,7 @@ import com.twitter.scalding._
import java.nio.ByteBuffer
import java.util.Arrays
import com.twitter.zipkin.gen.{BinaryAnnotation, Span, Constants, Annotation}
-import sources.{PrepNoNamesSpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{PrepNoNamesSpanSource, Util}
/**
* Find out how often each service does memcache accesses
View
42 zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/MostCommonCalls.scala
@@ -1,42 +0,0 @@
-/*
- * Copyright 2012 Twitter Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package com.twitter.zipkin.hadoop
-
-import com.twitter.scalding._
-import cascading.pipe.joiner.LeftJoin
-import com.twitter.zipkin.gen.{SpanServiceName}
-import sources.{PrepTsvSource, PreprocessedSpanSource, Util}
-
-/**
- * For each service finds the services that it most commonly calls
- */
-
-class MostCommonCalls(args : Args) extends Job(args) with DefaultDateRangeJob {
- val spanInfo = PreprocessedSpanSource()
- .read
- .mapTo(0 -> ('id, 'parent_id, 'service))
- { s: SpanServiceName => (s.id, s.parent_id, s.service_name) }
-
- val idName = PrepTsvSource()
- .read
-
- val result = spanInfo
- .joinWithSmaller('parent_id -> 'id_1, idName, joiner = new LeftJoin)
- .groupBy('service, 'name_1){ _.size('count) }
- .groupBy('service){ _.sortBy('count) }
- .write(Tsv(args("output")))
-}
View
2  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/PopularAnnotations.scala
@@ -18,7 +18,7 @@ package com.twitter.zipkin.hadoop
import com.twitter.scalding._
-import sources.PreprocessedSpanSource
+import com.twitter.zipkin.hadoop.sources.PreprocessedSpanSource
import com.twitter.zipkin.gen.{SpanServiceName, Annotation}
/**
View
2  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/PopularKeys.scala
@@ -18,7 +18,7 @@ package com.twitter.zipkin.hadoop
import com.twitter.scalding._
-import sources.{PreprocessedSpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{PreprocessedSpanSource, Util}
import com.twitter.zipkin.gen.{SpanServiceName, BinaryAnnotation, Span}
/**
View
2  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/ServerResponsetime.scala
@@ -17,7 +17,7 @@ package com.twitter.zipkin.hadoop
import com.twitter.scalding._
-import sources.SpanSource
+import com.twitter.zipkin.hadoop.sources.SpanSource
import com.twitter.zipkin.gen.{Span, Constants, Annotation}
import scala.collection.JavaConverters._
import java.nio.ByteBuffer
View
2  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/Timeouts.scala
@@ -19,7 +19,7 @@ package com.twitter.zipkin.hadoop
import com.twitter.scalding._
import cascading.pipe.joiner.LeftJoin
import com.twitter.zipkin.gen.{SpanServiceName, Annotation}
-import sources.{PrepTsvSource, PreprocessedSpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{PrepTsvSource, PreprocessedSpanSource, Util}
/**
* Find which services timeout the most
View
73 zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/WhaleReport.scala
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2012 Twitter Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.twitter.zipkin.hadoop
+
+import com.twitter.zipkin.gen.{BinaryAnnotation, Constants, SpanServiceName, Annotation}
+import com.twitter.scalding.{Tsv, DefaultDateRangeJob, Job, Args}
+import com.twitter.zipkin.hadoop.sources.{Util, PreprocessedSpanSource}
+import java.nio.ByteBuffer
+
+
+/**
+ * Finds traces that have 500 Internal Service Errors and finds the spans in those traces that have retries or timeouts
+ */
+
+class WhaleReport(args: Args) extends Job(args) with DefaultDateRangeJob {
+
+ val ERRORS = List("finagle.timeout", "finagle.retry")
+
+ val spanInfo = PreprocessedSpanSource()
+ .read
+ .mapTo(0 ->('trace_id, 'id, 'service, 'annotations, 'binary_annotations))
+ { s: SpanServiceName => (s.trace_id, s.id, s.service_name, s.annotations.toList, s.binary_annotations.toList)
+ }
+
+ val errorTraces = spanInfo
+ .project('trace_id, 'binary_annotations)
+ .filter('binary_annotations) {
+ bal: List[BinaryAnnotation] =>
+ bal.exists({ ba: BinaryAnnotation => {
+ ba != null && ba.value != null && cleanString(ba.value) == WhaleReport.ERROR_MESSAGE
+ }
+ })
+ }
+ .project('trace_id)
+ .rename('trace_id -> 'trace_id_1)
+
+ val filtered = spanInfo
+ .flatMap('annotations -> 'error) { al : List[Annotation] => { al.find { a : Annotation => ERRORS.contains(a.value) } } }
+ .joinWithSmaller('trace_id -> 'trace_id_1, errorTraces)
+ .discard('trace_id_1)
+ .groupBy('trace_id) { _.toList[String]('service -> 'serviceList) }
+ .write(Tsv(args("output")))
+
+ // When converting from ByteBuffer to String some null values seem to be passed along, so we clean them
+ private def cleanString(bb : ByteBuffer) : String = {
+ val chars = (new String(Util.getArrayFromBuffer(bb))).toCharArray
+ var result = ""
+ for (char <- chars) {
+ if (char.asInstanceOf[Int] != 0) {
+ result += char
+ }
+ }
+ result
+ }
+}
+
+object WhaleReport {
+ val ERROR_MESSAGE = "500 Internal Server Error"
+}
View
2  zipkin-hadoop/src/main/scala/com/twitter/zipkin/hadoop/WorstRuntimes.scala
@@ -18,7 +18,7 @@ package com.twitter.zipkin.hadoop
import com.twitter.scalding._
import com.twitter.zipkin.gen.{Span, Constants, Annotation}
-import sources.{PrepNoNamesSpanSource}
+import com.twitter.zipkin.hadoop.sources.{PrepNoNamesSpanSource}
/**
* Obtain the IDs and the durations of the one hundred service calls which take the longest per service
View
1  zipkin-hadoop/src/scripts/run.sh
@@ -66,6 +66,5 @@ $DIR/run_job.sh -j PopularKeys -d $ENDTIME -o $OUTPUT/PopularKeys &
$DIR/run_job.sh -j PopularAnnotations -d $ENDTIME -o $OUTPUT/PopularAnnotations &
$DIR/run_job.sh -j FindIDtoName -p -d $ENDTIME
$DIR/run_job.sh -j DependencyTree -d $ENDTIME -o $OUTPUT/DependencyTree &
-$DIR/run_job.sh -j MostCommonCalls -d $ENDTIME -o $OUTPUT/MostCommonCalls &
$DIR/run_job.sh -j Timeouts -s "--error_type finagle.timeout" -o $OUTPUT/Timeouts -d $ENDTIME &
$DIR/run_job.sh -j Timeouts -s "--error_type finagle.retry" -o $OUTPUT/Retries -d $ENDTIME &
View
2  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/DependencyTreeSpec.scala
@@ -22,7 +22,7 @@ import com.twitter.scalding._
import gen.AnnotationType
import scala.collection.JavaConverters._
import collection.mutable.HashMap
-import sources.{PrepTsvSource, PreprocessedSpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{PrepTsvSource, PreprocessedSpanSource, Util}
/**
* Tests that DependencyTree finds all service calls and how often per pair
View
2  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/ExpensiveEndpointsSpec.scala
@@ -29,7 +29,7 @@ import com.twitter.scalding.RichDate
import com.twitter.zipkin.gen.AnnotationType
import com.twitter.scalding.JobTest
import com.twitter.scalding.Tsv
-import sources.{PrepTsvSource, PreprocessedSpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{PrepTsvSource, PreprocessedSpanSource, Util}
/**
* Tests that ExpensiveEndpointSpec finds the average run time of each service
View
2  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/MemcacheRequestSpec.scala
@@ -20,7 +20,7 @@ import org.specs.Specification
import com.twitter.zipkin.gen
import com.twitter.scalding._
import gen.AnnotationType
-import sources.{PrepNoNamesSpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{PrepNoNamesSpanSource, Util}
import scala.collection.JavaConverters._
import collection.mutable.HashMap
import java.nio.ByteBuffer
View
2  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/PopularAnnotationsSpec.scala
@@ -20,7 +20,7 @@ import org.specs.Specification
import com.twitter.zipkin.gen
import com.twitter.scalding._
import gen.AnnotationType
-import sources.{PreprocessedSpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{PreprocessedSpanSource, Util}
import scala.collection.JavaConverters._
import scala.collection.mutable._
View
2  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/PopularKeysSpec.scala
@@ -20,7 +20,7 @@ import org.specs.Specification
import com.twitter.zipkin.gen
import com.twitter.scalding._
import gen.AnnotationType
-import sources.{PreprocessedSpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{PreprocessedSpanSource, Util}
import scala.collection.JavaConverters._
import scala.collection.mutable._
View
3  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/ServerResponsetimeSpec.scala
@@ -4,8 +4,7 @@ package com.twitter.zipkin.hadoop
import org.specs.Specification
import com.twitter.zipkin.gen
import com.twitter.scalding._
-import gen.AnnotationType
-import sources.{SpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{SpanSource, Util}
import scala.collection.JavaConverters._
class ServerResponsetimeSpec extends Specification with TupleConversions {
View
2  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/TimeoutsSpec.scala
@@ -23,7 +23,7 @@ import com.twitter.scalding._
import gen.AnnotationType
import scala.collection.JavaConverters._
import scala.collection.mutable._
-import sources.{PrepTsvSource, PreprocessedSpanSource, Util}
+import com.twitter.zipkin.hadoop.sources.{PrepTsvSource, PreprocessedSpanSource, Util}
/**
* Tests that Timeouts finds the service calls where timeouts occur and how often
View
4 zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/UtilSpec.scala
@@ -1,9 +1,9 @@
package com.twitter.zipkin.hadoop
import org.specs.Specification
-import sources.Util
+import com.twitter.zipkin.hadoop.sources.Util
import com.twitter.zipkin.gen
-import gen.{AnnotationType, Annotation}
+import com.twitter.zipkin.gen.{AnnotationType, Annotation}
import scala.collection.JavaConverters._
class UtilSpec extends Specification {
View
58 ...ipkin/hadoop/CommonServiceCallsSpec.scala → ...itter/zipkin/hadoop/WhaleReportSpec.scala
@@ -21,53 +21,63 @@ import com.twitter.zipkin.gen
import com.twitter.scalding._
import gen.AnnotationType
import scala.collection.JavaConverters._
-import collection.mutable.HashMap
-import sources.{PrepTsvSource, PreprocessedSpanSource, Util}
+import collection.mutable.{HashMap, HashSet}
+import com.twitter.zipkin.hadoop.sources.{PrepTsvSource, PreprocessedSpanSource, Util}
+import java.nio.ByteBuffer
+import java.util.Arrays
/**
-* Tests that MostCommonCalls finds the most commonly called services per service
-*/
+ * Tests that WhaleReport finds traces with 500 Internal Service Errors and finds the spans in those traces with finagle.retry or finagle.timeouts.
+ */
-class CommonServiceCallsSpec extends Specification with TupleConversions {
+class WhaleReportSpec extends Specification with TupleConversions {
noDetailedDiffs()
implicit val dateRange = DateRange(RichDate(123), RichDate(321))
+ val buf = ByteBuffer.allocate(100);
+
+ // Create a character ByteBuffer
+ val cbuf = buf.asCharBuffer();
+
+ // Write a string
+ cbuf.put(WhaleReport.ERROR_MESSAGE);
+
val endpoint = new gen.Endpoint(123, 666, "service")
val endpoint1 = new gen.Endpoint(123, 666, "service1")
val endpoint2 = new gen.Endpoint(123, 666, "service2")
val span = new gen.SpanServiceName(12345, "methodcall", 666,
- List(new gen.Annotation(1000, "cs").setHost(endpoint), new gen.Annotation(2000, "sr").setHost(endpoint), new gen.Annotation(3000, "ss").setHost(endpoint), new gen.Annotation(4000, "cr").setHost(endpoint)).asJava,
- List[gen.BinaryAnnotation]().asJava, "service")
- val span1 = new gen.SpanServiceName(123456, "methodcall", 666,
+ List(new gen.Annotation(1000, "finagle.timeout").setHost(endpoint), new gen.Annotation(1001, "sr").setHost(endpoint), new gen.Annotation(1002, "ss").setHost(endpoint), new gen.Annotation(1003, "cr").setHost(endpoint)).asJava,
+ List[gen.BinaryAnnotation]( new gen.BinaryAnnotation("http.responsecode", buf, AnnotationType.BOOL ) ).asJava, "service")
+ val span1 = new gen.SpanServiceName(12345, "methodcall", 666,
List(new gen.Annotation(1000, "cs").setHost(endpoint2), new gen.Annotation(2000, "sr").setHost(endpoint2), new gen.Annotation(4000, "ss").setHost(endpoint2), new gen.Annotation(5000, "cr").setHost(endpoint2)).asJava,
List(new gen.BinaryAnnotation("bye", null, AnnotationType.BOOL)).asJava, "service2")
- val span2 = new gen.SpanServiceName(1234567, "methodcall", 666,
- List(new gen.Annotation(1000, "cs").setHost(endpoint2), new gen.Annotation(3000, "cr").setHost(endpoint2)).asJava,
+ val span2 = new gen.SpanServiceName(12345, "methodcall", 666,
+ List(new gen.Annotation(1000, "finagle.retry").setHost(endpoint2), new gen.Annotation(3000, "cr").setHost(endpoint2)).asJava,
List(new gen.BinaryAnnotation("bye", null, AnnotationType.BOOL)).asJava, "service2")
- val spans = (Util.repeatSpan(span, 30, 32, 1) ++ Util.repeatSpan(span1, 50, 100, 32))
+ val spans = (Util.repeatSpan(span, 0, 32, 1) ++ Util.repeatSpan(span1, 0, 100, 32) ++ Util.repeatSpan(span2, 0, 200, 100))
- "MostCommonCalls" should {
- "Return the most common service calls" in {
- JobTest("com.twitter.zipkin.hadoop.MostCommonCalls").
+ "WhaleReport" should {
+ "Return fail whales!" in {
+ JobTest("com.twitter.zipkin.hadoop.WhaleReport").
arg("input", "inputFile").
arg("output", "outputFile").
arg("date", "2012-01-01T01:00").
source(PreprocessedSpanSource(), spans).
source(PrepTsvSource(), Util.getSpanIDtoNames(spans)).
- sink[(String, String, Long)](Tsv("outputFile")) {
- val result = new HashMap[String, Long]()
- result("service, null") = 0
- result("service2, null") = 0
- result("service2, service1") = 0
+ sink[(Long, List[String])](Tsv("outputFile")) {
+ var result = new HashSet[String]()
+ var actual = new HashSet[String]()
+ result += "service"
+ result += "service2"
outputBuffer => outputBuffer foreach { e =>
- result(e._1 + ", " + e._2) = e._3
+ e._1 mustEqual 12345
+ for (name <- e._2)
+ actual += name
}
- result("service, null") mustEqual 31
- result("service2, null") mustEqual 20
- result("service2, service") mustEqual 31
- }
+ actual mustEqual result
}.run.finish
}
}
+}
View
2  zipkin-hadoop/src/test/scala/com/twitter/zipkin/hadoop/WorstRuntimesSpec.scala
@@ -22,7 +22,7 @@ import com.twitter.scalding._
import scala.collection.JavaConverters._
import collection.mutable.HashMap
import com.twitter.zipkin.gen.AnnotationType
-import sources.{Util, PrepNoNamesSpanSource}
+import com.twitter.zipkin.hadoop.sources.{Util, PrepNoNamesSpanSource}
/**
* Tests that WorstRuntimes finds the spans which take the longest to run
Please sign in to comment.
Something went wrong with that request. Please try again.