Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,4 +67,14 @@ To use the Prometheus Service with Grafana, configure your Grafana instance to h
- User: `user` (this is ignored, but must be non-empty)
- Password: <Service Token> (the same one your retrieval agent uses)

## Developing

To build & test, install minikube, and run:

eval $(minikube docker-env)
make
kubectl create -f ./k8s

Cortex will sit behind an nginx instance exposed on port 30080. A job is deployed to scrape it itself. Try it:

http://192.168.99.100:30080/api/prom/api/v1/query?query=up
34 changes: 34 additions & 0 deletions k8s/consul-dep.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: consul
spec:
replicas: 1
template:
metadata:
labels:
name: consul
spec:
containers:
- name: consul
image: consul:0.7.1
imagePullPolicy: IfNotPresent
args:
- agent
- -ui
- -server
- -client=0.0.0.0
- -bootstrap
env:
- name: CHECKPOINT_DISABLE
value: "1"
ports:
- name: server-noscrape
containerPort: 8300
- name: serf-noscrape
containerPort: 8301
- name: client-noscrape
containerPort: 8400
- name: http-noscrape
containerPort: 8500
11 changes: 11 additions & 0 deletions k8s/consul-svc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
---
apiVersion: v1
kind: Service
metadata:
name: consul
spec:
ports:
- name: http
port: 8500
selector:
name: consul
24 changes: 24 additions & 0 deletions k8s/distributor-dep.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: distributor
spec:
replicas: 1
template:
metadata:
labels:
name: distributor
spec:
containers:
- name: distributor
image: weaveworks/cortex-distributor
imagePullPolicy: IfNotPresent
args:
- -log.level=debug
- -server.log-success=true
- -server.http-listen-port=80
- -consul.hostname=consul.default.svc.cluster.local:8500
- -distributor.replication-factor=1
ports:
- containerPort: 80
10 changes: 10 additions & 0 deletions k8s/distributor-svc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
apiVersion: v1
kind: Service
metadata:
name: distributor
spec:
ports:
- port: 80
selector:
name: distributor
20 changes: 20 additions & 0 deletions k8s/dynamodb-dep.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: dynamodb
spec:
replicas: 1
template:
metadata:
labels:
name: dynamodb
annotations:
prometheus.io.scrape: "false"
spec:
containers:
- name: dynamodb
image: deangiberson/aws-dynamodb-local
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8000
10 changes: 10 additions & 0 deletions k8s/dynamodb-svc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
apiVersion: v1
kind: Service
metadata:
name: dynamodb
spec:
ports:
- port: 8000
selector:
name: dynamodb
60 changes: 60 additions & 0 deletions k8s/ingester-dep.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: ingester
spec:
replicas: 1

# Ingesters are not ready for at least 1 min
# after creation. This has to be in sync with
# the ring timeout value, as this will stop a
# stampede of new ingesters if we should loose
# some.
minReadySeconds: 60

# Having maxSurge 0 and maxUnavailable 1 means
# the deployment will update one ingester at a time
# as it will have to stop one (making one unavailable)
# before it can start one (surge of zero)
strategy:
rollingUpdate:
maxSurge: 0
maxUnavailable: 1

template:
metadata:
labels:
name: ingester
spec:
# Give ingesters 40 minutes grace to flush chunks and exit cleanly.
# Service is available during this time, as long as we don't stop
# too many ingesters at once.
terminationGracePeriodSeconds: 2400

containers:
- name: ingester
image: weaveworks/cortex-ingester
imagePullPolicy: IfNotPresent
args:
- -server.log-success=true
- -server.http-listen-port=80
- -consul.hostname=consul.default.svc.cluster.local:8500
- -s3.url=s3://abc:123@s3.default.svc.cluster.local:4569
- -dynamodb.url=dynamodb://user:pass@dynamodb.default.svc.cluster.local:8000/cortex
- -dynamodb.periodic-table.prefix=cortex_weekly_
- -dynamodb.periodic-table.start=2017-01-06
- -dynamodb.daily-buckets-from=2017-01-10
- -dynamodb.base64-buckets-from=2017-01-17
- -dynamodb.v4-schema-from=2017-02-05
- -memcached.hostname=memcached.default.svc.cluster.local
- -memcached.timeout=100ms
- -memcached.service=memcached
ports:
- containerPort: 80
readinessProbe:
httpGet:
path: /ready
port: 80
initialDelaySeconds: 15
timeoutSeconds: 1
10 changes: 10 additions & 0 deletions k8s/ingester-svc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
apiVersion: v1
kind: Service
metadata:
name: ingester
spec:
ports:
- port: 80
selector:
name: ingester
27 changes: 27 additions & 0 deletions k8s/memcached-dep.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: memcached
spec:
replicas: 1
template:
metadata:
labels:
name: memcached
annotations:
prometheus.io.port: "9150"
spec:
containers:
- name: memcached
image: memcached:1.4.25
imagePullPolicy: IfNotPresent
args:
- -m 64 # Maximum memory to use, in megabytes. 64MB is default.
- -p 11211 # Default port, but being explicit is nice.
ports:
- name: clients
containerPort: 11211
ports:
- name: prom
containerPort: 9150
16 changes: 16 additions & 0 deletions k8s/memcached-svc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
---
apiVersion: v1
kind: Service
metadata:
name: memcached
spec:
# The memcache client uses DNS to get a list of memcached servers and then
# uses a consistent hash of the key to determine which server to pick.
clusterIP: None
ports:
- name: memcached
port: 11211
- name: prom
port: 9150
selector:
name: memcached
39 changes: 39 additions & 0 deletions k8s/nginx-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx
data:
nginx.conf: |-
worker_processes 5; ## Default: 1
error_log /dev/stderr;
pid nginx.pid;
worker_rlimit_nofile 8192;

events {
worker_connections 4096; ## Default: 1024
}

http {
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] $status '
'"$request" $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /dev/stderr main;
sendfile on;
tcp_nopush on;
resolver 10.0.0.10;

server { # simple reverse-proxy
listen 80;
proxy_set_header X-Scope-OrgID 0;

# pass requests for dynamic content to rails/turbogears/zope, et al
location = /api/prom/push {
proxy_pass http://distributor.default.svc.cluster.local$request_uri;
}

location ~ /api/prom/.* {
proxy_pass http://querier.default.svc.cluster.local$request_uri;
}
}
}
26 changes: 26 additions & 0 deletions k8s/nginx-dep.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: nginx
spec:
replicas: 1
template:
metadata:
labels:
name: nginx
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
volumeMounts:
- name: config-volume
mountPath: /etc/nginx
volumes:
- name: config-volume
configMap:
name: nginx
13 changes: 13 additions & 0 deletions k8s/nginx-svc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
---
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
type: NodePort
ports:
- name: http
port: 80
nodePort: 30080
selector:
name: nginx
33 changes: 33 additions & 0 deletions k8s/querier-dep.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: querier
spec:
replicas: 1
template:
metadata:
labels:
name: querier
spec:
containers:
- name: querier
image: weaveworks/cortex-querier
imagePullPolicy: IfNotPresent
args:
- -server.log-success=true
- -server.http-listen-port=80
- -consul.hostname=consul.default.svc.cluster.local:8500
- -s3.url=s3://abc:123@s3.default.svc.cluster.local:4569
- -dynamodb.url=dynamodb://user:pass@dynamodb.default.svc.cluster.local:8000/cortex
- -dynamodb.periodic-table.prefix=cortex_weekly_
- -dynamodb.periodic-table.start=2017-01-06
- -dynamodb.daily-buckets-from=2017-01-10
- -dynamodb.base64-buckets-from=2017-01-17
- -dynamodb.v4-schema-from=2017-02-05
- -memcached.hostname=memcached.default.svc.cluster.local
- -memcached.timeout=100ms
- -memcached.service=memcached
- -distributor.replication-factor=1
ports:
- containerPort: 80
10 changes: 10 additions & 0 deletions k8s/querier-svc.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
---
apiVersion: v1
kind: Service
metadata:
name: querier
spec:
ports:
- port: 80
selector:
name: querier
Loading