-
Notifications
You must be signed in to change notification settings - Fork 3.8k
Description
Please provide a detailed title (e.g. "Broker crashes when using TopN query with Bound filter" instead of just "Broker crashes").
Affected Version
Druid 0.15.0
Description
Exception
`2019-09-02T11:06:23,884 WARN [main] org.apache.curator.retry.ExponentialBackoffRetry - maxRetries too large (30). Pinning to 29
2019-09-02T11:06:32,510 WARN [main] com.sun.jersey.spi.inject.Errors - The following warnings have been detected with resource and/or provider classes:
WARNING: A HTTP GET method, public void org.apache.druid.server.http.SegmentListerResource.getSegments(long,long,long,javax.servlet.http.HttpServletRequest) throws java.io.IOException, MUST return a non-void type.
2019-09-02T11:06:38,980 WARN [task-runner-0-priority-0] org.apache.druid.segment.realtime.appenderator.BaseAppenderatorDriver - Cannot allocate segment for timestamp[2019-09-15T15:59:59.999Z], sequenceName[index_kafka_org_month_durations_e3cfbd595efc56f_0].
2019-09-02T11:06:38,982 ERROR [task-runner-0-priority-0] org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner - Encountered exception in run() before persisting.
org.apache.druid.java.util.common.ISE: Could not allocate segment for row with timestamp[2019-09-15T15:59:59.999Z]
at org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner.runInternal(SeekableStreamIndexTaskRunner.java:605) [druid-indexing-service-0.15.0-incubating.jar:0.15.0-incubating]
at org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner.run(SeekableStreamIndexTaskRunner.java:246) [druid-indexing-service-0.15.0-incubating.jar:0.15.0-incubating]
at org.apache.druid.indexing.seekablestream.SeekableStreamIndexTask.run(SeekableStreamIndexTask.java:167) [druid-indexing-service-0.15.0-incubating.jar:0.15.0-incubating]
at org.apache.druid.indexing.overlord.SingleTaskBackgroundRunner$SingleTaskBackgroundRunnerCallable.call(SingleTaskBackgroundRunner.java:419) [druid-indexing-service-0.15.0-incubating.jar:0.15.0-incubating]
at org.apache.druid.indexing.overlord.SingleTaskBackgroundRunner$SingleTaskBackgroundRunnerCallable.call(SingleTaskBackgroundRunner.java:391) [druid-indexing-service-0.15.0-incubating.jar:0.15.0-incubating]
at java.util.concurrent.FutureTask.run(FutureTask.java:266) [?:1.8.0_191]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_191]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_191]
at java.lang.Thread.run(Thread.java:748) [?:1.8.0_191]
2019-09-02T11:06:39,087 ERROR [task-runner-0-priority-0] org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner - Encountered exception while running task.
org.apache.druid.java.util.common.ISE: Could not allocate segment for row with timestamp[2019-09-15T15:59:59.999Z]
at org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner.runInternal(SeekableStreamIndexTaskRunner.java:605) ~[druid-indexing-service-0.15.0-incubating.jar:0.15.0-incubating]
at org.apache.druid.indexing.seekablestream.SeekableStreamIndexTaskRunner.run(SeekableStreamIndexTaskRunner.java:246) [druid-indexing-service-0.15.0-incubating.jar:0.15.0-incubating]
at org.apache.druid.indexing.seekablestream.SeekableStreamIndexTask.run(SeekableStreamIndexTask.java:167) [druid-indexing-service-0.15.0-incubating.jar:0.15.0-incubating]
at org.apache.druid.indexing.overlord.SingleTaskBackgroundRunner$SingleTaskBackgroundRunnerCallable.call(SingleTaskBackgroundRunner.java:419) [druid-indexing-service-0.15.0-incubating.jar:0.15.0-incubating]
at org.apache.druid.indexing.overlord.SingleTaskBackgroundRunner$SingleTaskBackgroundRunnerCallable.call(SingleTaskBackgroundRunner.java:391) [druid-indexing-service-0.15.0-incubating.jar:0.15.0-incubating]
at java.util.concurrent.FutureTask.run(FutureTask.java:266) [?:1.8.0_191]
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) [?:1.8.0_191]
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) [?:1.8.0_191]
at java.lang.Thread.run(Thread.java:748) [?:1.8.0_191]
Finished peon task
2019-09-02 11:06:39,238 pool-4-thread-1 ERROR Unable to register shutdown hook because JVM is shutting down. java.lang.IllegalStateException: Not started
at org.apache.druid.common.config.Log4jShutdown.addShutdownCallback(Log4jShutdown.java:47)
at org.apache.logging.log4j.core.impl.Log4jContextFactory.addShutdownCallback(Log4jContextFactory.java:273)
at org.apache.logging.log4j.core.LoggerContext.setUpShutdownHook(LoggerContext.java:256)
at org.apache.logging.log4j.core.LoggerContext.start(LoggerContext.java:216)
at org.apache.logging.log4j.core.impl.Log4jContextFactory.getContext(Log4jContextFactory.java:145)
at org.apache.logging.log4j.core.impl.Log4jContextFactory.getContext(Log4jContextFactory.java:41)
at org.apache.logging.log4j.LogManager.getContext(LogManager.java:182)
at org.apache.logging.log4j.spi.AbstractLoggerAdapter.getContext(AbstractLoggerAdapter.java:103)
at org.apache.logging.slf4j.Log4jLoggerFactory.getContext(Log4jLoggerFactory.java:43)
at org.apache.logging.log4j.spi.AbstractLoggerAdapter.getLogger(AbstractLoggerAdapter.java:42)
at org.apache.logging.slf4j.Log4jLoggerFactory.getLogger(Log4jLoggerFactory.java:29)
at org.slf4j.LoggerFactory.getLogger(LoggerFactory.java:358)
at org.slf4j.LoggerFactory.getLogger(LoggerFactory.java:383)
at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.(LeaseRenewer.java:77)
at org.apache.hadoop.hdfs.DFSClient.getLeaseRenewer(DFSClient.java:480)
at org.apache.hadoop.hdfs.DFSClient.close(DFSClient.java:620)
at org.apache.hadoop.hdfs.DistributedFileSystem.close(DistributedFileSystem.java:1189)
at org.apache.hadoop.fs.FileSystem$Cache.closeAll(FileSystem.java:2910)
at org.apache.hadoop.fs.FileSystem$Cache$ClientFinalizer.run(FileSystem.java:2927)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
`
payload
{ "task": "index_kafka_org_month_durations_e3cfbd595efc56f_koghokai", "payload": { "type": "index_kafka", "id": "index_kafka_org_month_durations_e3cfbd595efc56f_koghokai", "resource": { "availabilityGroup": "index_kafka_org_month_durations_e3cfbd595efc56f", "requiredCapacity": 1 }, "dataSchema": { "dataSource": "org_month_durations", "parser": { "type": "string", "parseSpec": { "format": "json", "timestampSpec": { "column": "timestamp", "format": "millis" }, "dimensionsSpec": { "dimensions": [ { "name": "platform", "type": "string" }, { "name": "domain_id", "type": "string" }, { "name": "org_code", "type": "string" }, { "name": "org_id", "type": "string" } ] } } }, "metricsSpec": [ { "type": "count", "name": "count" }, { "type": "longSum", "name": "durations", "fieldName": "durations", "expression": null } ], "granularitySpec": { "type": "uniform", "segmentGranularity": "DAY", "queryGranularity": "MONTH", "rollup": true, "intervals": null }, "transformSpec": { "filter": null, "transforms": [] } }, "tuningConfig": { "type": "KafkaTuningConfig", "maxRowsInMemory": 1000000, "maxBytesInMemory": 0, "maxRowsPerSegment": 5000000, "maxTotalRows": null, "intermediatePersistPeriod": "PT10M", "basePersistDirectory": "/var/druid-tmp/tmp/1567421434649-0", "maxPendingPersists": 0, "indexSpec": { "bitmap": { "type": "concise" }, "dimensionCompression": "lz4", "metricCompression": "lz4", "longEncoding": "longs" }, "buildV9Directly": true, "reportParseExceptions": false, "handoffConditionTimeout": 0, "resetOffsetAutomatically": false, "segmentWriteOutMediumFactory": null, "intermediateHandoffPeriod": "P2147483647D", "logParseExceptions": false, "maxParseExceptions": 2147483647, "maxSavedParseExceptions": 0, "skipSequenceNumberAvailabilityCheck": false }, "ioConfig": { "type": "kafka", "taskGroupId": 0, "baseSequenceName": "index_kafka_org_month_durations_e3cfbd595efc56f", "startPartitions": { "type": "end", "stream": "org_month_durations", "topic": "org_month_durations", "partitionSequenceNumberMap": { "0": 648 }, "partitionOffsetMap": { "0": 648 } }, "endPartitions": { "type": "end", "stream": "org_month_durations", "topic": "org_month_durations", "partitionSequenceNumberMap": { "0": 9223372036854776000 }, "partitionOffsetMap": { "0": 9223372036854776000 } }, "startSequenceNumbers": { "type": "start", "stream": "org_month_durations", "topic": "org_month_durations", "partitionSequenceNumberMap": { "0": 648 }, "partitionOffsetMap": { "0": 648 }, "exclusivePartitions": [] }, "endSequenceNumbers": { "type": "end", "stream": "org_month_durations", "topic": "org_month_durations", "partitionSequenceNumberMap": { "0": 9223372036854776000 }, "partitionOffsetMap": { "0": 9223372036854776000 } }, "consumerProperties": { "bootstrap.servers": "172.16.1.23:9092" }, "pollTimeout": 100, "useTransaction": true, "minimumMessageTime": null, "maximumMessageTime": null }, "context": { "checkpoints": "{\"0\":{\"0\":648}}", "IS_INCREMENTAL_HANDOFF_SUPPORTED": true }, "groupId": "index_kafka_org_month_durations", "dataSource": "org_month_durations" } }
data
OrgDuration(1568563199999,ios,workplus1,a9a4d18231a14c3f974dcec0624b4b06,zcQ7KReg0Xe7sfCI5nZvkY,300000), OrgDuration(1568563199999,ios,workplus1,a9a4d18231a14c3f974dcec0624b4b06,zcQ7KReg0Xe7sfCI5nZvkY,300000), OrgDuration(1568563199999,ios,workplus1,a9a4d18231a14c3f974dcec0624b4b06,zcQ7KReg0Xe7sfCI5nZvkY,300000)