Join GitHub today
GitHub is home to over 31 million developers working together to host and review code, manage projects, and build software together.
Sign upExceptions and shutdown of prometheus application #3454
Comments
This comment has been minimized.
This comment has been minimized.
|
Please update to 2.0.0. There have been many bug fixes in the following
beta and release candidate versions.
…On Sat, Nov 11, 2017, 11:50 shubhamitc ***@***.***> wrote:
*What did you do?*
Checked the log file and found the exception
*What did you expect to see?*
Not shutdown of prometheus
*What did you see instead? Under which circumstances?*
After running the prometheus for a few days we found this exception.
*Environment*
Production
- System information:
***@***.*** prometheus]# uname -srm
Linux 2.6.32-642.el6.x86_64 x86_64
- Prometheus version:
***@***.*** prometheus]# ./prometheus --version
prometheus, version 2.0.0-beta.0 (branch: master, revision: 2b5d915
<2b5d915>
)
build user: ***@***.***
build date: 20170712-12:21:13
go version: go1.8.3
***@***.*** prometheus]#
- Prometheus configuration file:
# my global config
global:
scrape_interval: 15s
evaluation_interval: 30s
# scrape_timeout is set to the global default (10s).
external_labels:
monitor: codelab
foo: bar
rule_files:
- "/opt/prometheus/prometheus/rules/*.yml"
- "my/*.rules"
remote_write:
- url: http://remote1/push
write_relabel_configs:
- source_labels: [__name__]
regex: expensive.*
action: drop
- url: http://remote2/push
scrape_configs:
- job_name: prometheus
honor_labels: true
# scrape_interval is defined by the configured global (15s).
# scrape_timeout is defined by the global default (10s).
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
labels:
machine: prometheus-system
env: prod
type: monitor
# Not working issue is posted [kawamuray/prometheus-kafka-consumer-group-exporter#34]
#- job_name: "kafka"
# static_configs:
# - targets: ['192.168.239.38:7979']
- job_name: "rabbitmq"
static_configs:
- targets: ['192.168.239.38:9090']
labels:
env: staging
type: ppp
- job_name: "rabbitmq-prod"
static_configs:
- targets: ['192.168.239.38:9091']
labels:
env: prod
stack: ppp
type: app
service: rabbitmq
- job_name: "smartalert"
metrics_path: '/metrics.php'
scrape_interval: 600s
static_configs:
- targets: ['192.168.249.9']
labels:
env: staging
stack: ppp
type: monitoring
service: smartalert
- job_name: "node"
static_configs:
- targets: ['localhost:9100']
labels:
machine: monitoring
type: monitoring
env: prod
- job_name: "redis"
static_configs:
- targets: ['192.168.239.38:9121','192.168.254.136:9121']
labels:
env: prod
type: ppp
- job_name: "elasticsearch"
scrape_timeout: 240s
scrape_interval: 245s
static_configs:
- targets: ['192.168.250.34:9108']
labels:
env: prod
service: es
stack: ppp
job: elasticsearch
- job_name: consul-sd-prod
metrics_path: '/metrics'
consul_sd_configs:
- server: '192.168.250.34:8500'
services: ['node_exporter' ]
scheme: http
# tls_config:
# ca_file: valid_ca_file
# cert_file: valid_cert_file
# key_file: valid_key_file
# insecure_skip_verify: false
relabel_configs:
- source_labels: ['__meta_consul_service']
regex: '(.*)'
target_label: 'job'
replacement: '$1'
- source_labels: ['__meta_consul_node']
regex: '(.*)'
target_label: 'instance'
replacement: '$1'
- source_labels: ['__meta_sd_consul_tags']
separator: ','
regex: 'label:([^=]+)=([^,]+)'
target_label: '${1}'
replacement: '${2}'
- source_labels: ['__meta_sd_consul_tags']
separator: ','
regex: 'label:([^=]+)=([^,]+)'
target_label: '${1}'
replacement: '${2}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){0}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){1}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){2}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){3}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){4}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- job_name: consul-sd-web
metrics_path: '/prometheus'
consul_sd_configs:
- server: '192.168.250.34:8500'
services: ["commonservice", "baas-server"]
scheme: http
# tls_config:
# ca_file: valid_ca_file
# cert_file: valid_cert_file
# key_file: valid_key_file
# insecure_skip_verify: false
relabel_configs:
- source_labels: ['__meta_consul_service']
regex: '(.*)'
target_label: 'job'
replacement: '$1'
- source_labels: ['__meta_consul_node']
regex: '(.*)'
target_label: 'instance'
replacement: '$1'
- source_labels: ['__meta_sd_consul_tags']
separator: ','
regex: 'label:([^=]+)=([^,]+)'
target_label: '${1}'
replacement: '${2}'
- source_labels: ['__meta_sd_consul_tags']
separator: ','
regex: 'label:([^=]+)=([^,]+)'
target_label: '${1}'
replacement: '${2}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){0}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){1}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){2}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,).*(?:contextPath)=([^,]+),.*'
replacement: '${1}/prometheus'
target_label: '__metrics_path__'
#- job_name: consul-sd-commonservice
# metrics_path: '/commonservice/prometheus'
# consul_sd_configs:
# - server: '192.168.250.34:8500'
# services: ['commonservice' ]
# scheme: http
## tls_config:
## ca_file: valid_ca_file
## cert_file: valid_cert_file
## key_file: valid_key_file
## insecure_skip_verify: false
#
# relabel_configs:
# - source_labels: ['__meta_consul_service']
# regex: '(.*)'
# target_label: 'job'
# replacement: '$1'
# - source_labels: ['__meta_consul_node']
# regex: '(.*)'
# target_label: 'instance'
# replacement: '$1'
# - source_labels: ['__meta_sd_consul_tags']
# separator: ','
# regex: 'label:([^=]+)=([^,]+)'
# target_label: '${1}'
# replacement: '${2}'
# - source_labels: [__meta_consul_tags]
# regex: ',(?:[^,]+,){0}([^=]+)=([^,]+),.*'
# replacement: '${2}'
# target_label: '${1}'
# - source_labels: [__meta_consul_tags]
# regex: ',(?:[^,]+,){1}([^=]+)=([^,]+),.*'
# replacement: '${2}'
# target_label: '${1}'
# - source_labels: [__meta_consul_tags]
# regex: ',(?:[^,]+,){2}([^=]+)=([^,]+),.*'
# replacement: '${2}'
# target_label: '${1}'
- job_name: consul-sd-staging
metrics_path: '/metrics'
consul_sd_configs:
- server: '192.168.250.2:8500'
services: ['nginx', 'cache', 'mysql', 'web', 'node_exporter']
scheme: http
# tls_config:
# ca_file: valid_ca_file
# cert_file: valid_cert_file
# key_file: valid_key_file
# insecure_skip_verify: false
relabel_configs:
- source_labels: ['__meta_consul_service']
regex: '(.*)'
target_label: 'job'
replacement: 'staging-$1'
- source_labels: ['__meta_consul_node']
regex: '(.*)'
target_label: 'instance'
replacement: '$1'
- source_labels: [__meta_consul_tags]
regex: ',(?:contextPath)=([^,]+),'
replacement: '${1}/prometheus'
target_label: '__metrics_path__'
- source_labels: ['__meta_sd_consul_tags']
separator: ','
regex: 'label:([^=]+)=([^,]+)'
target_label: '${1}'
replacement: '${2}'
- source_labels: ['__meta_sd_consul_tags']
separator: ','
regex: 'label:([^=]+)=([^,]+)'
target_label: '${1}'
replacement: '${2}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){0}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){1}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){2}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- job_name: consul-sd-staging-2
metrics_path: '/vuconnect/prometheus'
consul_sd_configs:
- server: '192.168.250.2:8500'
services: ['vuconnect']
scheme: http
# tls_config:
# ca_file: valid_ca_file
# cert_file: valid_cert_file
# key_file: valid_key_file
# insecure_skip_verify: false
relabel_configs:
- source_labels: ['__meta_consul_service']
regex: '(.*)'
target_label: 'job'
replacement: 'staging-$1'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,)(?:contextPath)=([^,]+),.*'
replacement: '${1}/prometheus'
target_label: '__metrics_path__'
- source_labels: ['__meta_consul_node']
regex: '(.*)'
target_label: 'instance'
replacement: '$1'
- source_labels: ['__meta_sd_consul_tags']
separator: ','
regex: 'label:([^=]+)=([^,]+)'
target_label: '${1}'
replacement: '${2}'
- source_labels: ['__meta_sd_consul_tags']
separator: ','
regex: 'label:([^=]+)=([^,]+)'
target_label: '${1}'
replacement: '${2}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){0}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){1}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
- source_labels: [__meta_consul_tags]
regex: ',(?:[^,]+,){2}([^=]+)=([^,]+),.*'
replacement: '${2}'
target_label: '${1}'
# - source_labels: [__meta_consul_address,__meta_consul_service_port]
# separator: ';'
# regex: '(.*);(.*)'
# replacement: '${1}:${2}'
# target_label: '__address__'
# A scrape configuration for running Prometheus on a Kubernetes cluster.
# This uses separate scrape configs for cluster components (i.e. API server, node)
# and services to allow each to use different authentication configs.
#
# Kubernetes labels will be added as Prometheus labels on metrics via the
# `labelmap` relabeling action.
#
# If you are using Kubernetes 1.7.2 or earlier, please take note of the comments
# for the kubernetes-cadvisor job; you will need to edit or remove this job.
# Scrape config for API servers.
#
# Kubernetes exposes API servers as endpoints to the default/kubernetes
# service so this uses `endpoints` role and uses relabelling to only keep
# the endpoints associated with the default/kubernetes service using the
# default named port `https`. This works for single API server deployments as
# well as HA API server deployments.
- job_name: 'kubernetes-apiservers'
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
kubernetes_sd_configs:
- role: endpoints
api_server: 'https://x.x.x.x'
#ca_file: ./ca.crt
basic_auth:
username: 'admin'
password: 'xxxxxxxxxx'
# This TLS & bearer token file config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: ./ca.crt
# If your node certificates are self-signed or use a different CA to the
# master CA, then disable certificate verification below. Note that
# certificate verification is an integral part of a secure infrastructure
# so this should only be disabled in a controlled environment. You can
# disable certificate verification by uncommenting the line below.
#
# insecure_skip_verify: true
bearer_token_file: ./token
# Keep only the default/kubernetes service endpoints for the https port. This
# will add targets for each API server which Kubernetes adds an endpoint to
# the default/kubernetes service.
relabel_configs:
- source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: default;kubernetes;https
# Scrape config for nodes (kubelet).
#
# Rather than connecting directly to the node, the scrape is proxied though the
# Kubernetes apiserver. This means it will work if Prometheus is running out of
# cluster, or can't connect to nodes for some other reason (e.g. because of
# firewalling).
- job_name: 'kubernetes-nodes'
# Default to scraping over https. If required, just disable this or change to
# `http`.
scheme: https
# This TLS & bearer token file config is used to connect to the actual scrape
# endpoints for cluster components. This is separate to discovery auth
# configuration because discovery & scraping are two separate concerns in
# Prometheus. The discovery auth config is automatic if Prometheus runs inside
# the cluster. Otherwise, more config options have to be provided within the
# <kubernetes_sd_config>.
tls_config:
ca_file: ./ca.crt
bearer_token_file: ./token
kubernetes_sd_configs:
- role: node
api_server: 'https://x.x.x.x'
#ca_file: ./ca.crt
basic_auth:
username: 'admin'
password: 'xxxxxxxxxx'
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
## Scrape config for Kubelet cAdvisor.
##
## This is required for Kubernetes 1.7.3 and later, where cAdvisor metrics
## (those whose names begin with 'container_') have been removed from the
## Kubelet metrics endpoint. This job scrapes the cAdvisor endpoint to
## retrieve those metrics.
##
## In Kubernetes 1.7.0-1.7.2, these metrics are only exposed on the cAdvisor
## HTTP endpoint; use "replacement: /api/v1/nodes/${1}:4194/proxy/metrics"
## in that case (and ensure cAdvisor's HTTP server hasn't been disabled with
## the --cadvisor-port=0 Kubelet flag).
##
## This job is not necessary and should be removed in Kubernetes 1.6 and
## earlier versions, or it will cause the metrics to be scraped twice.
#- job_name: 'kubernetes-cadvisor'
#
# # Default to scraping over https. If required, just disable this or change to
# # `http`.
# scheme: https
#
# # This TLS & bearer token file config is used to connect to the actual scrape
# # endpoints for cluster components. This is separate to discovery auth
# # configuration because discovery & scraping are two separate concerns in
# # Prometheus. The discovery auth config is automatic if Prometheus runs inside
# # the cluster. Otherwise, more config options have to be provided within the
# # <kubernetes_sd_config>.
# tls_config:
# ca_file: ./ca.crt
# bearer_token_file: ./token
#
# kubernetes_sd_configs:
# - role: node
# api_server: 'https://x.x.x.x'
# #ca_file: ./ca.crt
# basic_auth:
# username: 'admin'
# password: 'xxxxxxxxxx'
#
# relabel_configs:
# - action: labelmap
# regex: __meta_kubernetes_node_label_(.+)
# - target_label: __address__
# replacement: kubernetes.default.svc:443
# - source_labels: [__meta_kubernetes_node_name]
# regex: (.+)
# target_label: __metrics_path__
# replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
# Scrape config for service endpoints.
#
# The relabeling allows the actual service scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/scrape` <http://prometheus.io/scrape>: Only scrape services that have a value of `true`
# * `prometheus.io/scheme` <http://prometheus.io/scheme>: If the metrics endpoint is secured then you will need
# to set this to `https` & most likely set the `tls_config` of the scrape config.
# * `prometheus.io/path` <http://prometheus.io/path>: If the metrics path is not `/metrics` override this.
# * `prometheus.io/port` <http://prometheus.io/port>: If the metrics are exposed on a different port to the
# service then set this appropriately.
- job_name: 'kubernetes-service-endpoints'
tls_config:
ca_file: ./ca.crt
bearer_token_file: ./token
kubernetes_sd_configs:
- role: endpoints
api_server: 'https://x.x.x.x'
#ca_file: ./ca.crt
basic_auth:
username: 'admin'
password: 'xxxxxxxxxx'
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
action: replace
target_label: __scheme__
regex: (https?)
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
action: replace
target_label: __address__
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: kubernetes_name
# Example scrape config for probing services via the Blackbox Exporter.
#
# The relabeling allows the actual service scrape endpoint to be configured
# via the following annotations:
#
# * `prometheus.io/probe` <http://prometheus.io/probe>: Only probe services that have a value of `true`
- job_name: 'kubernetes-services'
tls_config:
ca_file: ./ca.crt
bearer_token_file: ./token
metrics_path: /probe
params:
module: [http_2xx]
kubernetes_sd_configs:
- role: service
api_server: 'https://x.x.x.x'
#ca_file: ./ca.crt
basic_auth:
username: 'admin'
password: 'xxxxxxxxxx'
relabel_configs:
- source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
action: keep
regex: true
- source_labels: [__address__]
target_label: __param_target
- target_label: __address__
replacement: blackbox
- source_labels: [__param_target]
target_label: instance
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_service_name]
target_label: kubernetes_name
# Example scrape config for pods
#
# The relabeling allows the actual pod scrape endpoint to be configured via the
# following annotations:
#
# * `prometheus.io/scrape` <http://prometheus.io/scrape>: Only scrape pods that have a value of `true`
# * `prometheus.io/path` <http://prometheus.io/path>: If the metrics path is not `/metrics` override this.
# * `prometheus.io/port` <http://prometheus.io/port>: Scrape the pod on the indicated port instead of the
# pod's declared ports (default is a port-free target if none are declared).
- job_name: 'kubernetes-pods'
tls_config:
ca_file: ./ca.crt
bearer_token_file: ./token
kubernetes_sd_configs:
- role: pod
api_server: 'https://x.x.x.x'
#ca_file: ./ca.crt
basic_auth:
username: 'admin'
password: 'xxxxxxxxxx'
relabel_configs:
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
action: keep
regex: true
- source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
action: replace
target_label: __metrics_path__
regex: (.+)
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
- job_name: probe_ssh
scrape_interval: 60s
metrics_path: /probe
params:
module: [ssh_banner]
target: ['192.168.250.33:22', '192.168.250.34:22']
scheme: http
static_configs:
- targets:
- 192.168.250.34:9115
relabel_configs:
# Ensure port is 22, pass as URL parameter
- source_labels: [__address__]
regex: (.*)(:.*)?
replacement: ${1}:22
target_label: __param_target
# Make instance label the target
- source_labels: [__param_target]
target_label: instance
# Actually talk to the blackbox exporter though
- target_label: __address__
replacement: 192.168.250.34:9115
alerting:
alertmanagers:
- scheme: http
static_configs:
- targets:
- "localhost:9093"
- Alertmanager configuration file:
insert configuration here (if relevant to the issue)
- Logs:
unexpected fault address 0x7f43d6a55c88
fatal error: fault
[signal SIGSEGV: segmentation violation code=0x1 addr=0x7f43d6a55c88 pc=0x458fd4]
goroutine 24 [running]:
runtime.throw(0x1c06d41, 0x5)
/usr/local/go/src/runtime/panic.go:596 +0x95 fp=0xc4263b72a0 sp=0xc4263b7280
runtime.sigpanic()
/usr/local/go/src/runtime/signal_unix.go:297 +0x28c fp=0xc4263b72f0 sp=0xc4263b72a0
runtime.aeshashbody()
/usr/local/go/src/runtime/asm_amd64.s:946 +0x514 fp=0xc4263b72f8 sp=0xc4263b72f0
runtime.mapassign(0x1979ea0, 0xc42378e5d0, 0xc4263b7408, 0xc42a38ed6c)
/usr/local/go/src/runtime/hashmap.go:504 +0x84 fp=0xc4263b7398 sp=0xc4263b72f8github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb.(*indexWriter).AddSeries(0xc42e38d440, 0xc400004042, 0xc420fc1560, 0x9, 0x9, 0xc431bf82c8, 0x1, 0x1, 0x0, 0x0)
/go/src/github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb/index.go:262 +0xf7 fp=0xc4263b7438 sp=0xc4263b7398github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb.populateBlock(0xc4277062a0, 0x1, 0x1, 0x2851980, 0xc42e38d440, 0x2841f40, 0xc4244e9220, 0x429ea6, 0x0, 0x0)
/go/src/github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb/compact.go:373 +0x889 fp=0xc4263b7940 sp=0xc4263b7438github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb.(*compactor).write(0xc420295740, 0xd47dee34949f5f01, 0xbb5d52489a573fbb, 0xc4277062a0, 0x1, 0x1, 0x0, 0x0)
/go/src/github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb/compact.go:267 +0x5a2 fp=0xc4263b7b98 sp=0xc4263b7940github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb.(*compactor).Write(0xc420295740, 0x2858460, 0xc422004000, 0x0, 0x1)
/go/src/github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb/compact.go:229 +0x211 fp=0xc4263b7cb0 sp=0xc4263b7b98github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb.(*DB).compact(0xc4204d6a90, 0xc42000f000, 0x0, 0x0)
/go/src/github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb/db.go:355 +0x3fc fp=0xc4263b7df8 sp=0xc4263b7cb0github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb.(*DB).run(0xc4204d6a90)
/go/src/github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb/db.go:268 +0x3c6 fp=0xc4263b7fd8 sp=0xc4263b7df8
runtime.goexit()
/usr/local/go/src/runtime/asm_amd64.s:2197 +0x1 fp=0xc4263b7fe0 sp=0xc4263b7fd8
created by github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb.Open
/go/src/github.com/prometheus/prometheus/vendor/github.com/prometheus/tsdb/db.go:237 +0x397
goroutine 1 [select, 2653 minutes]:
main.main()
/go/src/github.com/prometheus/prometheus/cmd/prometheus/main.go:332 +0x4c16
goroutine 3 [syscall, 2653 minutes]:
os/signal.signal_recv(0x0)
/usr/local/go/src/runtime/sigqueue.go:116 +0x104
os/signal.loop()
/usr/local/go/src/os/signal/signal_unix.go:22 +0x22
created by os/signal.init.1
/usr/local/go/src/os/signal/signal_unix.go:28 +0x41
—
You are receiving this because you are subscribed to this thread.
Reply to this email directly, view it on GitHub
<#3454>, or mute the thread
<https://github.com/notifications/unsubscribe-auth/AAANaJzNcaYbWj6pBV0w_RPihn13rG5sks5s1Xv0gaJpZM4Qac10>
.
|
grobie
closed this
Nov 12, 2017
This comment has been minimized.
This comment has been minimized.
lock
bot
commented
Mar 23, 2019
|
This thread has been automatically locked since there has not been any recent activity after it was closed. Please open a new issue for related bugs. |
lock
bot
locked and limited conversation to collaborators
Mar 23, 2019
Sign up for free
to subscribe to this conversation on GitHub.
Already have an account?
Sign in.
shubhamitc commentedNov 11, 2017
What did you do?
Checked the log file and found the exception
What did you expect to see?
Not shutdown of prometheus
What did you see instead? Under which circumstances?
After running the prometheus for a few days we found this exception.
Environment
Production
[root@prometheus1 prometheus]# uname -srm
Linux 2.6.32-642.el6.x86_64 x86_64
[root@prometheus1 prometheus]# ./prometheus --version
prometheus, version 2.0.0-beta.0 (branch: master, revision: 2b5d915)
build user: root@fc24486243df
build date: 20170712-12:21:13
go version: go1.8.3
[root@prometheus1 prometheus]#