Skip to content
This repository has been archived by the owner on Sep 20, 2022. It is now read-only.

Commit

Permalink
Close #28: [HIVEMALL-30] Temporarily ignore a streaming test in Spark
Browse files Browse the repository at this point in the history
  • Loading branch information
maropu committed Jan 26, 2017
1 parent 79f92f4 commit e86c8a0
Showing 1 changed file with 33 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ private[this] class TestInputStream[T: ClassTag](
}
}

final class HivemallFeatureOpsSuite extends HivemallFeatureQueryTest {
final class HivemallOpsWithFeatureSuite extends HivemallFeatureQueryTest {

// This implicit value used in `HivemallStreamingOps`
implicit val sqlCtx = hiveContext
Expand All @@ -83,7 +83,38 @@ final class HivemallFeatureOpsSuite extends HivemallFeatureQueryTest {
}
}

test("streaming") {
// scalastyle:off line.size.limit

/**
* This test below fails sometimes (too flaky), so we temporarily ignore it.
* The stacktrace of this failure is:
*
* HivemallOpsWithFeatureSuite:
* Exception in thread "broadcast-exchange-60" java.lang.OutOfMemoryError: Java heap space
* at java.nio.HeapByteBuffer.<init>(HeapByteBuffer.java:57)
* at java.nio.ByteBuffer.allocate(ByteBuffer.java:331)
* at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$4.apply(TorrentBroadcast.scala:231)
* at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$4.apply(TorrentBroadcast.scala:231)
* at org.apache.spark.util.io.ChunkedByteBufferOutputStream.allocateNewChunkIfNeeded(ChunkedByteBufferOutputStream.scala:78)
* at org.apache.spark.util.io.ChunkedByteBufferOutputStream.write(ChunkedByteBufferOutputStream.scala:65)
* at net.jpountz.lz4.LZ4BlockOutputStream.flushBufferedData(LZ4BlockOutputStream.java:205)
* at net.jpountz.lz4.LZ4BlockOutputStream.finish(LZ4BlockOutputStream.java:235)
* at net.jpountz.lz4.LZ4BlockOutputStream.close(LZ4BlockOutputStream.java:175)
* at java.io.ObjectOutputStream$BlockDataOutputStream.close(ObjectOutputStream.java:1827)
* at java.io.ObjectOutputStream.close(ObjectOutputStream.java:741)
* at org.apache.spark.serializer.JavaSerializationStream.close(JavaSerializer.scala:57)
* at org.apache.spark.broadcast.TorrentBroadcast$$anonfun$blockifyObject$1.apply$mcV$sp(TorrentBroadcast.scala:238)
* at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1296)
* at org.apache.spark.broadcast.TorrentBroadcast$.blockifyObject(TorrentBroadcast.scala:237)
* at org.apache.spark.broadcast.TorrentBroadcast.writeBlocks(TorrentBroadcast.scala:107)
* at org.apache.spark.broadcast.TorrentBroadcast.<init>(TorrentBroadcast.scala:86)
* at org.apache.spark.broadcast.TorrentBroadcastFactory.newBroadcast(TorrentBroadcastFactory.scala:34)
* ...
*/

// scalastyle:on line.size.limit

ignore("streaming") {
import sqlCtx.implicits._

// We assume we build a model in advance
Expand Down

0 comments on commit e86c8a0

Please sign in to comment.