Skip to content

Commit

Permalink
Merge pull request #90 from kongfei605/scrape_k8s
Browse files Browse the repository at this point in the history
add out_of_cluster_scrape config && disable prometheus
  • Loading branch information
kongfei605 committed Jul 12, 2022
2 parents 9ff7ea1 + a9fa334 commit e1ae270
Show file tree
Hide file tree
Showing 4 changed files with 63 additions and 17 deletions.
1 change: 0 additions & 1 deletion conf/in_cluster_scrape.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ scrape_configs:
- role: endpoints
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
insecure_skip_verify: true
authorization:
credentials_file: /var/run/secrets/kubernetes.io/serviceaccount/token
Expand Down
30 changes: 15 additions & 15 deletions conf/logs.toml
Original file line number Diff line number Diff line change
@@ -1,36 +1,36 @@
[logs]
## key 占位符
## just a placholder
api_key = "ef4ahfbwzwwtlwfpbertgq1i6mq0ab1q"
## 是否开启日志采集
## enable log collect or not
enable = false
## 接受日志的server地址, http/tcp/kafka, 只有kafka支持多个地址(broker)用逗号分割
## the server receive logs, http/tcp/kafka, only kafka brokers can be multiple ip:ports with concatenation character ","
send_to = "127.0.0.1:17878"
## 发送日志的协议 http/tcp/kafka
## send logs with protocol: http/tcp/kafka
send_type = "http"
topic = "flashcatcloud"
## 是否压缩发送
## send logs with compression or not
use_compress = false
## 是否采用ssl
## use ssl or not
send_with_tls = false
##
## send logs in batchs
batch_wait = 5
## 日志offset信息保存目录
## save offset in this path
run_path = "/opt/categraf/run"
## 最多同时采集多少个日志文件
## max files can be open
open_files_limit = 100
## 定期扫描目录下是否有新增日志
## scan config file in 10 seconds
scan_period = 10
## udp 读buffer的大小
## read buffer of udp
frame_size = 9000
##
collect_container_all = true
## 全局的处理规则
## glog processing rules
[[logs.Processing_rules]]
## 单个日志采集配置
## single log configure
[[logs.items]]
## file/journald/tcp/udp
type = "file"
## type=file时 path必填,type=journald/tcp/udp时 port必填
## type=file, path is required; type=journald/tcp/udp, port is required
path = "/opt/tomcat/logs/*.txt"
source = "tomcat"
service = "my_service"
service = "my_service"
47 changes: 47 additions & 0 deletions conf/out_of_cluster_scrape.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
global:
scrape_interval: 15s
#external_labels:
# cluster: test
# replica: 0

scrape_configs:
- job_name: "categraf-out"
static_configs:
- targets: ["172.16.6.171:8080"]
- job_name: "categraf-out-apiserver"
metrics_path: "/metrics"
kubernetes_sd_configs:
- role: endpoints
kubeconfig_file: /path/to/.kube/config
scheme: https
tls_config:
insecure_skip_verify: true
authorization:
credentials_file: /path/to/token
relabel_configs:
- source_labels:
[
__meta_kubernetes_namespace,
__meta_kubernetes_service_name,
__meta_kubernetes_endpoint_port_name,
]
action: keep
regex: default;kubernetes;https
- job_name: "categraf-out-coredns"
metrics_path: "/metrics"
kubernetes_sd_configs:
- role: endpoints
kubeconfig_file: /path/to/.kube/config
scheme: http
relabel_configs:
- source_labels:
[
__meta_kubernetes_namespace,
__meta_kubernetes_service_name,
__meta_kubernetes_endpoint_port_name,
]
action: keep
regex: kube-system;kube-dns;metrics

remote_write:
- url: 'http://172.31.62.213/prometheus/v1/write'
2 changes: 1 addition & 1 deletion conf/prometheus.toml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
[prometheus]
enable=true
enable=false
scrape_config_file="/path/to/in_cluster_scrape.yaml"
## log level, debug warn info error
log_level="info"
Expand Down

0 comments on commit e1ae270

Please sign in to comment.