diff --git a/CHANGELOG.md b/CHANGELOG.md
index b1a6ae84f..9fa7ebed2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -17,6 +17,7 @@ NOTE: As semantic versioning states all 0.y.z releases can contain breaking chan
- [#113](https://github.com/kobsio/kobs/pull/113): Allow and improve customization of axis scaling.
- [#116](https://github.com/kobsio/kobs/pull/116): Open details when clicking on Jaeger chart nodes.
- [#119](https://github.com/kobsio/kobs/pull/119): Add Flux plugin to view and reconcile [Flux](https://fluxcd.io) resources.
+- [#122](https://github.com/kobsio/kobs/pull/122): Add ClickHouse plugin, to query show logs ingested by the [kobsio/fluent-bit-clickhouse](https://github.com/kobsio/fluent-bit-clickhouse) Fluent Bit plugin.
### Fixed
diff --git a/app/package.json b/app/package.json
index c3c651fe8..62bc2561d 100644
--- a/app/package.json
+++ b/app/package.json
@@ -5,6 +5,7 @@
"proxy": "http://localhost:15220",
"dependencies": {
"@kobsio/plugin-applications": "*",
+ "@kobsio/plugin-clickhouse": "*",
"@kobsio/plugin-core": "*",
"@kobsio/plugin-dashboards": "*",
"@kobsio/plugin-elasticsearch": "*",
diff --git a/app/src/index.tsx b/app/src/index.tsx
index 34e5b5487..d40b4aaea 100644
--- a/app/src/index.tsx
+++ b/app/src/index.tsx
@@ -18,6 +18,7 @@ import fluxPlugin from '@kobsio/plugin-flux';
import opsgeniePlugin from '@kobsio/plugin-opsgenie';
import markdownPlugin from '@kobsio/plugin-markdown';
import rssPlugin from '@kobsio/plugin-rss';
+import clickhousePlugin from '@kobsio/plugin-clickhouse';
ReactDOM.render(
@@ -34,6 +35,7 @@ ReactDOM.render(
...opsgeniePlugin,
...markdownPlugin,
...rssPlugin,
+ ...clickhousePlugin,
}} />
,
document.getElementById('root')
diff --git a/cmd/kobs/plugins/plugins.go b/cmd/kobs/plugins/plugins.go
index 6c7c6687e..5e38653c0 100644
--- a/cmd/kobs/plugins/plugins.go
+++ b/cmd/kobs/plugins/plugins.go
@@ -12,6 +12,7 @@ import (
// Import all plugins, which should be used with the kobs instance. By default this are all first party plugins from
// the plugins folder.
"github.com/kobsio/kobs/plugins/applications"
+ "github.com/kobsio/kobs/plugins/clickhouse"
"github.com/kobsio/kobs/plugins/dashboards"
"github.com/kobsio/kobs/plugins/elasticsearch"
"github.com/kobsio/kobs/plugins/flux"
@@ -39,6 +40,7 @@ type Config struct {
Opsgenie opsgenie.Config `json:"opsgenie"`
Markdown markdown.Config `json:"markdown"`
RSS rss.Config `json:"rss"`
+ Clickhouse clickhouse.Config `json:"clickhouse"`
}
// Router implements the router for the plugins package. This only registeres one route which is used to return all the
@@ -63,8 +65,8 @@ func Register(clusters *clusters.Clusters, config Config) chi.Router {
router.Get("/", router.getPlugins)
// Register all plugins
- router.Mount(applications.Route, applications.Register(clusters, router.plugins, config.Applications))
router.Mount(resources.Route, resources.Register(clusters, router.plugins, config.Resources))
+ router.Mount(applications.Route, applications.Register(clusters, router.plugins, config.Applications))
router.Mount(teams.Route, teams.Register(clusters, router.plugins, config.Teams))
router.Mount(dashboards.Route, dashboards.Register(clusters, router.plugins, config.Dashboards))
router.Mount(prometheus.Route, prometheus.Register(clusters, router.plugins, config.Prometheus))
@@ -73,6 +75,7 @@ func Register(clusters *clusters.Clusters, config Config) chi.Router {
router.Mount(kiali.Route, kiali.Register(clusters, router.plugins, config.Kiali))
router.Mount(flux.Route, flux.Register(clusters, router.plugins, config.Flux))
router.Mount(opsgenie.Route, opsgenie.Register(clusters, router.plugins, config.Opsgenie))
+ router.Mount(clickhouse.Route, clickhouse.Register(clusters, router.plugins, config.Clickhouse))
router.Mount(markdown.Route, markdown.Register(clusters, router.plugins, config.Markdown))
router.Mount(rss.Route, rss.Register(clusters, router.plugins, config.RSS))
diff --git a/docs/configuration/plugins.md b/docs/configuration/plugins.md
index 2ae2b6954..5ad586065 100644
--- a/docs/configuration/plugins.md
+++ b/docs/configuration/plugins.md
@@ -5,6 +5,7 @@ Plugins can be used to extend the functions of kobs. They can be configured usin
| Field | Type | Description | Required |
| ----- | ---- | ----------- | -------- |
| applications | [Applications](#applications) | Configure the caching behaviour for the applications plugin. | No |
+| clickhouse | [[]ClickHouse](#clickhouse) | Configure multiple ClickHouse instances, which can be used within kobs. | No |
| elasticsearch | [[]Elasticsearch](#elasticsearch) | Configure multiple Elasticsearch instances, which can be used within kobs. | No |
| jaeger | [[]Jaeger](#jaeger) | Configure multiple Jaeger instances, which can be used within kobs. | No |
| kiali | [[]Kiali](#kiali) | Configure multiple Kiali instances, which can be used within kobs. | No |
@@ -28,6 +29,31 @@ plugins:
| topologyCacheDuration | [duration](https://pkg.go.dev/time#ParseDuration) | The duration for how long the topology graph should be cached. The default value is `1h`. | No |
| teamsCacheDuration | [duration](https://pkg.go.dev/time#ParseDuration) | The duration for how long the teams for an application should be cached. The default value is `1h`. | No |
+## ClickHouse
+
+The following config can be used to grant kobs access to a ClickHouse instance running at `clickhouse-clickhouse.logging.svc.cluster.local:9000`, where the logs are save in a database named `logs`. To access ClickHouse the user `admin` with the password `admin` is used.
+
+```yaml
+plugins:
+ clickhouse:
+ - name: ClickHouse
+ description: ClickHouse is a fast open-source OLAP database management system.
+ address: clickhouse-clickhouse.logging.svc.cluster.local:9000
+ database: logs
+ username: admin
+ password: admin
+ type: logs
+```
+
+| Field | Type | Description | Required |
+| ----- | ---- | ----------- | -------- |
+| name | string | Name of the ClickHouse instance. | Yes |
+| displayName | string | Name of the ClickHouse as it is shown in the UI. | Yes |
+| descriptions | string | Description of the ClickHouse instance. | No |
+| address | string | Address of the ClickHouse instance. | Yes |
+| username | string | Username to access a ClickHouse instance. | No |
+| password | string | Password to access a ClickHouse instance. | No |
+| type | string | The type which should be used for the ClickHouse instance. Currently the only supported value is `logs`. The `logs` mode should be used together with the [kobsio/fluent-bit-clickhouse](https://github.com/kobsio/fluent-bit-clickhouse) plugin to collect logs via Fluent Bit and save them in ClickHouse. |
## Elasticsearch
diff --git a/docs/plugins/assets/clickhouse-logs.png b/docs/plugins/assets/clickhouse-logs.png
new file mode 100644
index 000000000..0ea4b664c
Binary files /dev/null and b/docs/plugins/assets/clickhouse-logs.png differ
diff --git a/docs/plugins/clickhouse.md b/docs/plugins/clickhouse.md
new file mode 100644
index 000000000..97f444838
--- /dev/null
+++ b/docs/plugins/clickhouse.md
@@ -0,0 +1,97 @@
+# ClickHouse
+
+!!! warning
+ The ClickHouse plugin is in a very early stage and might be changed heavily in the future.
+
+The [ClickHouse](https://clickhouse.tech) plugin can be used to get the data from a configured ClickHouse instance.
+
+The ClickHouse plugin can be used together with the [kobsio/fluent-bit-clickhouse](https://github.com/kobsio/fluent-bit-clickhouse) output plugin for [Fluent Bit](https://fluentbit.io). For this the `type` in the plugin options must be set to `logs`. You can then use the specified [Query Syntax](#query-syntax) to get the logs from ClickHouse.
+
+
+
+## Options
+
+The following options can be used for a panel with the ClickHouse plugin:
+
+| Field | Type | Description | Required |
+| ----- | ---- | ----------- | -------- |
+| type | string | Set the type for which you want to use the ClickHouse instance. Currently the only supported value is `logs`. | Yes |
+| queries | [[]Query](#query) | A list of queries, which can be selected by the user. | Yes |
+
+### Query
+
+| Field | Type | Description | Required |
+| ----- | ---- | ----------- | -------- |
+| name | string | A name for the ClickHouse query, which is displayed in the select box. | Yes |
+| query | string | The query which should be run against ClickHouse. See [Query Syntax](#query-syntax) for more information on the syntax, when ClickHouse is used in the `logs` mode. | Yes |
+| fields | []string | A list of fields to display in the results table. If this field is omitted, the whole document is displayed in the results table. | No |
+
+```yaml
+---
+apiVersion: kobs.io/v1beta1
+kind: Dashboard
+spec:
+ placeholders:
+ - name: namespace
+ description: The workload namespace
+ - name: app
+ description: The workloads app label
+ rows:
+ - size: -1
+ panels:
+ - title: Istio Logs
+ colSpan: 12
+ plugin:
+ name: clickhouse
+ options:
+ showChart: true
+ queries:
+ - name: Istio Logs
+ query: "namespace='bookinfo' _and_ app='bookinfo' _and_ container_name='istio-proxy' _and_ content.upstream_cluster~'inbound.*'"
+ fields:
+ - "pod_name"
+ - "content.authority"
+ - "content.route_name"
+ - "content.protocol"
+ - "content.method"
+ - "content.path"
+ - "content.response_code"
+ - "content.upstream_service_time"
+ - "content.bytes_received"
+ - "content.bytes_sent"
+```
+
+## Query Syntax
+
+### Operators
+
+kobs supports multiple operators which can be used in a query to retrieve logs from ClickHouse:
+
+| Operator | Description | Example |
+| -------- | ----------- | ------- |
+| `(` and `)` | Multiple terms or clauses can be grouped together with parentheses, to form sub-queries. | `cluster='kobs-demo' _and_ (namespace='bookinfo' _or_ namespace='istio-system')` |
+| `_not_` | Exclude the term from the query. | `cluster='kobs-demo' _and_ _not_ namespace='bookinfo'` |
+| `_and_` | Both terms must be included in the results. | `namespace='bookinfo' _and_ app='bookinfo'` |
+| `_or_` | The result can contain one of the given terms. | `namespace='bookinfo' _or_ namespace='istio-system'` |
+| `=` | The field must have this value. | `namespace='bookinfo'` |
+| `!=` | The field should not have this value. | `namespace!='bookinfo'` |
+| `>` | The value of the field must be greater than the specified value. | `content.response_code>499` |
+| `>=` | The value of the field must be greater than or equal to the specified value. | `content.response_code>=500` |
+| `<` | The value of the field must be lower than the specified value. | `content.response_code<500` |
+| `<=` | The value of the field must be lower than or equal to the specified value. | `content.response_code<=499` |
+| `~` | The value of the field must match the regular expression. | `content.upstream_cluster~'inbound.*'` |
+
+### Standard Fields
+
+- `timestamp`: The timestamp for when the log line was written.
+- `cluster`: The name of the cluster as it is set by Fluent Bit.
+- `namespace`: The namespace of the Pod.
+- `app`: The value of the `app` or `k8s-app` label of the Pod.
+- `pod_name`: The name of the Pod.
+- `container_name`: The name of the container from the Pod.
+- `host`: The name of the host where the Pod is running on.
+- `log`: The complete log line as it was written by the container.
+
+### Examples
+
+- `namespace='bookinfo' _and_ app='bookinfo' _and_ container_name='istio-proxy' _and_ content.upstream_cluster~'inbound.*'`: Select all inbound Istio logs from the bookinfo app in the bookinfo namespace.
diff --git a/go.mod b/go.mod
index 0a0028aa1..3b0366830 100644
--- a/go.mod
+++ b/go.mod
@@ -3,6 +3,7 @@ module github.com/kobsio/kobs
go 1.16
require (
+ github.com/ClickHouse/clickhouse-go v1.4.5
github.com/fluxcd/helm-controller/api v0.11.2
github.com/fluxcd/kustomize-controller/api v0.13.3
github.com/fluxcd/pkg/apis/meta v0.10.1
@@ -17,6 +18,7 @@ require (
github.com/prometheus/common v0.30.0
github.com/sirupsen/logrus v1.8.1
github.com/spf13/pflag v1.0.5
+ github.com/stretchr/testify v1.7.0
k8s.io/api v0.21.3
k8s.io/apiextensions-apiserver v0.21.3
k8s.io/apimachinery v0.21.3
diff --git a/go.sum b/go.sum
index ceeed0e0b..195bef945 100644
--- a/go.sum
+++ b/go.sum
@@ -45,6 +45,8 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/ClickHouse/clickhouse-go v1.4.5 h1:FfhyEnv6/BaWldyjgT2k4gDDmeNwJ9C4NbY/MXxJlXk=
+github.com/ClickHouse/clickhouse-go v1.4.5/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
@@ -80,6 +82,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
+github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
@@ -94,6 +98,8 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
+github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
@@ -330,6 +336,7 @@ github.com/jaegertracing/jaeger v1.15.1 h1:7QzNAXq+4ko9GtCjozDNAp2uonoABu+B2Rk94
github.com/jaegertracing/jaeger v1.15.1/go.mod h1:LUWPSnzNPGRubM8pk0inANGitpiMOOxihXx0+53llXI=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -361,6 +368,7 @@ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
@@ -372,6 +380,7 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@@ -461,6 +470,7 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
+github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
diff --git a/mkdocs.yml b/mkdocs.yml
index 576669af3..7b4c560ad 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -35,6 +35,7 @@ nav:
- Plugins:
- Getting Started: plugins/getting-started.md
- Applications: plugins/applications.md
+ - ClickHouse: plugins/clickhouse.md
- Dashboards: plugins/dashboards.md
- Elasticsearch: plugins/elasticsearch.md
- Flux: plugins/flux.md
diff --git a/plugins/clickhouse/clickhouse.go b/plugins/clickhouse/clickhouse.go
new file mode 100644
index 000000000..fac623abb
--- /dev/null
+++ b/plugins/clickhouse/clickhouse.go
@@ -0,0 +1,145 @@
+package clickhouse
+
+import (
+ "net/http"
+ "strconv"
+
+ "github.com/kobsio/kobs/pkg/api/clusters"
+ "github.com/kobsio/kobs/pkg/api/middleware/errresponse"
+ "github.com/kobsio/kobs/pkg/api/plugins/plugin"
+ "github.com/kobsio/kobs/plugins/clickhouse/pkg/instance"
+
+ "github.com/go-chi/chi/v5"
+ "github.com/go-chi/render"
+ "github.com/sirupsen/logrus"
+)
+
+// Route is the route under which the plugin should be registered in our router for the rest api.
+const Route = "/clickhouse"
+
+var (
+ log = logrus.WithFields(logrus.Fields{"package": "clickhouse"})
+)
+
+// Config is the structure of the configuration for the clickhouse plugin.
+type Config []instance.Config
+
+type logsResponse struct {
+ Documents []map[string]interface{} `json:"documents"`
+ Fields []string `json:"fields"`
+ Offset int64 `json:"offset"`
+}
+
+// Router implements the router for the resources plugin, which can be registered in the router for our rest api.
+type Router struct {
+ *chi.Mux
+ clusters *clusters.Clusters
+ instances []*instance.Instance
+}
+
+func (router *Router) getInstance(name string) *instance.Instance {
+ for _, i := range router.instances {
+ if i.Name == name {
+ return i
+ }
+ }
+
+ return nil
+}
+
+// getLogs implements the special handling when the user selected the "logs" options for the "view" configuration. This
+// options is intended to use together with the kobsio/fluent-bit-clickhouse Fluent Bit plugin and provides a custom
+// query language to get the logs from ClickHouse.
+// Next to the query and time range, a user can also provide a limit and offset to page through all the logs. The limit
+// shouldn't be larger then 1000 and if the offset is empty we use 0, which indicates a new query in our React UI.
+func (router *Router) getLogs(w http.ResponseWriter, r *http.Request) {
+ name := chi.URLParam(r, "name")
+ query := r.URL.Query().Get("query")
+ limit := r.URL.Query().Get("limit")
+ offset := r.URL.Query().Get("offset")
+ timeStart := r.URL.Query().Get("timeStart")
+ timeEnd := r.URL.Query().Get("timeEnd")
+
+ log.WithFields(logrus.Fields{"name": name, "query": query, "limit": limit, "offset": offset, "timeStart": timeStart, "timeEnd": timeEnd}).Tracef("getLogs")
+
+ i := router.getInstance(name)
+ if i == nil {
+ errresponse.Render(w, r, nil, http.StatusBadRequest, "Could not find instance name")
+ return
+ }
+
+ parsedLimit, err := strconv.ParseInt(limit, 10, 64)
+ if err != nil || parsedLimit > 1000 {
+ errresponse.Render(w, r, err, http.StatusBadRequest, "Could not parse limit")
+ return
+ }
+
+ parsedOffset := int64(0)
+ if offset != "" {
+ parsedOffset, err = strconv.ParseInt(offset, 10, 64)
+ if err != nil {
+ errresponse.Render(w, r, err, http.StatusBadRequest, "Could not parse offset")
+ return
+ }
+ }
+
+ parsedTimeStart, err := strconv.ParseInt(timeStart, 10, 64)
+ if err != nil {
+ errresponse.Render(w, r, err, http.StatusBadRequest, "Could not parse start time")
+ return
+ }
+
+ parsedTimeEnd, err := strconv.ParseInt(timeEnd, 10, 64)
+ if err != nil {
+ errresponse.Render(w, r, err, http.StatusBadRequest, "Could not parse end time")
+ return
+ }
+
+ documents, fields, newOffset, err := i.GetLogs(r.Context(), query, parsedLimit, parsedOffset, parsedTimeStart, parsedTimeEnd)
+ if err != nil {
+ errresponse.Render(w, r, err, http.StatusBadRequest, "Could not get logs")
+ return
+ }
+
+ render.JSON(w, r, logsResponse{
+ Documents: documents,
+ Fields: fields,
+ Offset: newOffset,
+ })
+}
+
+// Register returns a new router which can be used in the router for the kobs rest api.
+func Register(clusters *clusters.Clusters, plugins *plugin.Plugins, config Config) chi.Router {
+ var instances []*instance.Instance
+
+ for _, cfg := range config {
+ instance, err := instance.New(cfg)
+ if err != nil {
+ log.WithError(err).WithFields(logrus.Fields{"name": cfg.Name}).Fatalf("Could not create ClickHouse instance")
+ }
+
+ instances = append(instances, instance)
+
+ var options map[string]interface{}
+ options = make(map[string]interface{})
+ options["type"] = cfg.Type
+
+ plugins.Append(plugin.Plugin{
+ Name: cfg.Name,
+ DisplayName: cfg.DisplayName,
+ Description: cfg.Description,
+ Type: "clickhouse",
+ Options: options,
+ })
+ }
+
+ router := Router{
+ chi.NewRouter(),
+ clusters,
+ instances,
+ }
+
+ router.Get("/logs/{name}", router.getLogs)
+
+ return router
+}
diff --git a/plugins/clickhouse/package.json b/plugins/clickhouse/package.json
new file mode 100644
index 000000000..39c787174
--- /dev/null
+++ b/plugins/clickhouse/package.json
@@ -0,0 +1,28 @@
+{
+ "name": "@kobsio/plugin-clickhouse",
+ "version": "0.0.0",
+ "license": "MIT",
+ "private": false,
+ "main": "./lib/index.js",
+ "module": "./lib-esm/index.js",
+ "types": "./lib/index.d.ts",
+ "scripts": {
+ "plugin": "tsc && tsc --build tsconfig.esm.json && cp -r src/assets lib && cp -r src/assets lib-esm"
+ },
+ "dependencies": {
+ "@kobsio/plugin-core": "*",
+ "@nivo/bar": "^0.73.1",
+ "@nivo/tooltip": "^0.73.0",
+ "@patternfly/react-core": "^4.128.2",
+ "@patternfly/react-icons": "^4.10.11",
+ "@patternfly/react-table": "^4.27.24",
+ "@types/react": "^17.0.0",
+ "@types/react-dom": "^17.0.0",
+ "@types/react-router-dom": "^5.1.7",
+ "react": "^17.0.2",
+ "react-dom": "^17.0.2",
+ "react-query": "^3.17.2",
+ "react-router-dom": "^5.2.0",
+ "typescript": "^4.3.4"
+ }
+}
diff --git a/plugins/clickhouse/pkg/instance/helpers.go b/plugins/clickhouse/pkg/instance/helpers.go
new file mode 100644
index 000000000..e0878a008
--- /dev/null
+++ b/plugins/clickhouse/pkg/instance/helpers.go
@@ -0,0 +1,24 @@
+package instance
+
+// appendIfMissing appends a value to a slice, when this values doesn't exist in the slice already.
+func appendIfMissing(items []string, item string) []string {
+ for _, ele := range items {
+ if ele == item {
+ return items
+ }
+ }
+
+ return append(items, item)
+}
+
+// contains checks if the given slice of string contains the given item. It returns true when the slice contains the
+// given item.
+func contains(items []string, item string) bool {
+ for _, ele := range items {
+ if ele == item {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/plugins/clickhouse/pkg/instance/helpers_test.go b/plugins/clickhouse/pkg/instance/helpers_test.go
new file mode 100644
index 000000000..0e4741372
--- /dev/null
+++ b/plugins/clickhouse/pkg/instance/helpers_test.go
@@ -0,0 +1,25 @@
+package instance
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestAppendIfMissing(t *testing.T) {
+ items := []string{"foo", "bar"}
+
+ items = appendIfMissing(items, "foo")
+ require.Equal(t, []string{"foo", "bar"}, items)
+
+ items = appendIfMissing(items, "hello")
+ items = appendIfMissing(items, "world")
+ require.Equal(t, []string{"foo", "bar", "hello", "world"}, items)
+}
+
+func TestContains(t *testing.T) {
+ items := []string{"foo", "bar"}
+
+ require.Equal(t, true, contains(items, "foo"))
+ require.Equal(t, false, contains(items, "hello world"))
+}
diff --git a/plugins/clickhouse/pkg/instance/instance.go b/plugins/clickhouse/pkg/instance/instance.go
new file mode 100644
index 000000000..209c929f4
--- /dev/null
+++ b/plugins/clickhouse/pkg/instance/instance.go
@@ -0,0 +1,149 @@
+package instance
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "time"
+
+ "github.com/ClickHouse/clickhouse-go"
+ "github.com/sirupsen/logrus"
+)
+
+var (
+ log = logrus.WithFields(logrus.Fields{"package": "clickhouse"})
+)
+
+// Config is the structure of the configuration for a single ClickHouse instance.
+type Config struct {
+ Name string `json:"name"`
+ DisplayName string `json:"displayName"`
+ Description string `json:"description"`
+ Address string `json:"address"`
+ Database string `json:"database"`
+ Username string `json:"username"`
+ Password string `json:"password"`
+ WriteTimeout string `json:"writeTimeout"`
+ ReadTimeout string `json:"readTimeout"`
+ Type string `json:"type"`
+}
+
+// Instance represents a single ClickHouse instance, which can be added via the configuration file.
+type Instance struct {
+ Name string
+ database string
+ client *sql.DB
+}
+
+// GetLogs parses the given query into the sql syntax, which is then run against the ClickHouse instance. The returned
+// rows are converted into a document schema which can be used by our UI.
+func (i *Instance) GetLogs(ctx context.Context, query string, limit, offset, timeStart, timeEnd int64) ([]map[string]interface{}, []string, int64, error) {
+ var documents []map[string]interface{}
+ fields := defaultFields
+
+ // When the user provides a query, we have to build the additional conditions for the sql query. This is done via
+ // the parseLogsQuery which is responsible for parsing our simple query language and returning the corresponding
+ // where statement. These conditions are the added as additional AND to our sql query.
+ conditions := ""
+ if query != "" {
+ parsedQuery, err := parseLogsQuery(query)
+ if err != nil {
+ return nil, nil, offset, err
+ }
+
+ conditions = fmt.Sprintf("AND %s", parsedQuery)
+ }
+
+ // Now we are building and executing our sql query. We always return all fields from the logs table, where the
+ // timestamp of a row is within the selected query range and the parsed query. We also order all the results by the
+ // timestamp field and limiting the results / using a offset for pagination.
+ sqlQuery := fmt.Sprintf("SELECT %s FROM %s.logs WHERE timestamp >= ? AND timestamp <= ? %s ORDER BY timestamp DESC LIMIT %d OFFSET %d", defaultColumns, i.database, conditions, limit, offset)
+ rows, err := i.client.QueryContext(ctx, sqlQuery, time.Unix(timeStart, 0), time.Unix(timeEnd, 0))
+ if err != nil {
+ return nil, nil, offset, err
+ }
+ defer rows.Close()
+
+ // Now we are going throw all the returned rows and passing them to the Row struct. After that we are converting
+ // each row to a JSON document for the React UI, which contains all the default fields and all the items from the
+ // fields_string / fields_number array.
+ // When the offset is 0 (user starts a new query) we are also checking all the fields from the nested fields_string
+ // and fields_number array and adding them to the fields slice. This slice can then be used by the user in our React
+ // UI to show only a list of selected fields in the table.
+ for rows.Next() {
+ var r Row
+ if err := rows.Scan(&r.Timestamp, &r.Cluster, &r.Namespace, &r.App, &r.Pod, &r.Container, &r.Host, &r.FieldsString.Key, &r.FieldsString.Value, &r.FieldsNumber.Key, &r.FieldsNumber.Value, &r.Log); err != nil {
+ return nil, nil, offset, err
+ }
+
+ var document map[string]interface{}
+ document = make(map[string]interface{})
+ document["timestamp"] = r.Timestamp
+ document["cluster"] = r.Cluster
+ document["namespace"] = r.Namespace
+ document["app"] = r.App
+ document["pod_name"] = r.Pod
+ document["container_name"] = r.Container
+ document["host"] = r.Host
+ document["log"] = r.Log
+
+ for index, field := range r.FieldsNumber.Key {
+ document[field] = r.FieldsNumber.Value[index]
+
+ if offset == 0 {
+ fields = appendIfMissing(fields, field)
+ }
+ }
+
+ for index, field := range r.FieldsString.Key {
+ document[field] = r.FieldsString.Value[index]
+
+ if offset == 0 {
+ fields = appendIfMissing(fields, field)
+ }
+ }
+
+ documents = append(documents, document)
+ }
+
+ if err := rows.Err(); err != nil {
+ return nil, nil, offset, err
+ }
+
+ return documents, fields, offset + limit, nil
+}
+
+// New returns a new ClickHouse instance for the given configuration.
+func New(config Config) (*Instance, error) {
+ if config.WriteTimeout == "" {
+ config.WriteTimeout = "30"
+ }
+
+ if config.ReadTimeout == "" {
+ config.ReadTimeout = "30"
+ }
+
+ dns := "tcp://" + config.Address + "?username=" + config.Username + "&password=" + config.Password + "&database=" + config.Database + "&write_timeout=" + config.WriteTimeout + "&read_timeout=" + config.ReadTimeout
+
+ client, err := sql.Open("clickhouse", dns)
+ if err != nil {
+ log.WithError(err).Errorf("could not initialize database connection")
+ return nil, err
+ }
+
+ if err := client.Ping(); err != nil {
+ if exception, ok := err.(*clickhouse.Exception); ok {
+ log.WithError(err).WithFields(logrus.Fields{"code": exception.Code, "message": exception.Message, "stacktrace": exception.StackTrace}).Errorf("could not ping database")
+ } else {
+ log.WithError(err).Errorf("could not ping database")
+ }
+
+ return nil, err
+ }
+
+ return &Instance{
+ Name: config.Name,
+ database: config.Database,
+ client: client,
+ }, nil
+}
diff --git a/plugins/clickhouse/pkg/instance/logs.go b/plugins/clickhouse/pkg/instance/logs.go
new file mode 100644
index 000000000..cb0c8b8cd
--- /dev/null
+++ b/plugins/clickhouse/pkg/instance/logs.go
@@ -0,0 +1,129 @@
+package instance
+
+import (
+ "fmt"
+ "strings"
+)
+
+var (
+ defaultFields = []string{"timestamp", "cluster", "namespace", "app", "pod_name", "container_name", "host", "log"}
+ defaultColumns = "timestamp, cluster, namespace, app, pod_name, container_name, host, fields_string.key, fields_string.value, fields_number.key, fields_number.value, log"
+)
+
+// parseLogsQuery parses the given query string and return the conditions for the where statement in the sql query. We
+// are providing a very simple query language where the user can use "(", ")", "_not_", "_and_" and "_or_" operators.
+// Then we are splitting the string again for the other operators "=", "!=", ">", ">=", "<", "<=" and "~" which are used
+// to check the value of a field.
+// Once we have build all the conditions we concate all the strings to the final sql statement for the where clause.
+func parseLogsQuery(query string) (string, error) {
+ var newOpenBrackets []string
+ openBrackets := strings.Split(query, "(")
+ for _, openBracket := range openBrackets {
+ var newCloseBrackets []string
+ closeBrackets := strings.Split(openBracket, "(")
+ for _, closeBracket := range closeBrackets {
+ var newNots []string
+ nots := strings.Split(closeBracket, "_not_")
+ for _, not := range nots {
+ var newAnds []string
+ ands := strings.Split(not, "_and_")
+ for _, and := range ands {
+ var newOrs []string
+ ors := strings.Split(and, "_or_")
+ for _, or := range ors {
+ condition, err := splitOperator(or)
+ if err != nil {
+ return "", err
+ }
+
+ newOrs = append(newOrs, condition)
+ }
+ newAnds = append(newAnds, strings.Join(newOrs, " OR "))
+ }
+ newNots = append(newNots, strings.Join(newAnds, " AND "))
+ }
+ newCloseBrackets = append(newCloseBrackets, strings.Join(newNots, " NOT "))
+ }
+ newOpenBrackets = append(newOpenBrackets, strings.Join(newCloseBrackets, ")"))
+ }
+
+ return strings.Join(newOpenBrackets, "("), nil
+}
+
+// splitOperator splits the given string by the following operators "=", "!=", ">", ">=", "<", "<=" and "~". If the
+// result is a slice with two items we found the operator which was used by the user to check the value of a field. So
+// that we pass the key (first item), value (second item) and the operator to the handleConditionParts to build the
+// where condition.
+func splitOperator(condition string) (string, error) {
+ equal := strings.Split(condition, "=")
+ if len(equal) == 2 {
+ return handleConditionParts(equal[0], equal[1], "=")
+ }
+
+ notEqual := strings.Split(condition, "!=")
+ if len(notEqual) == 2 {
+ return handleConditionParts(notEqual[0], notEqual[1], "!=")
+ }
+
+ greaterThan := strings.Split(condition, ">")
+ if len(greaterThan) == 2 {
+ return handleConditionParts(greaterThan[0], greaterThan[1], ">")
+ }
+
+ greaterThanOrEqual := strings.Split(condition, ">=")
+ if len(greaterThanOrEqual) == 2 {
+ return handleConditionParts(greaterThanOrEqual[0], greaterThanOrEqual[1], ">=")
+ }
+
+ lessThan := strings.Split(condition, "<")
+ if len(lessThan) == 2 {
+ return handleConditionParts(lessThan[0], lessThan[1], "<")
+ }
+
+ lessThanOrEqual := strings.Split(condition, "<=")
+ if len(lessThanOrEqual) == 2 {
+ return handleConditionParts(lessThanOrEqual[0], lessThanOrEqual[1], "<=")
+ }
+
+ regex := strings.Split(condition, "~")
+ if len(regex) == 2 {
+ return handleConditionParts(regex[0], regex[1], "~")
+ }
+
+ if strings.TrimSpace(condition) == "" {
+ return "", nil
+ }
+
+ return "", fmt.Errorf("invalid operator: %s", condition)
+}
+
+// handleConditionParts converts the given key, value and operator to it's sql representation. This is required because
+// some fields like "timestamp", "cluster", "namespace", etc. are a seperate column in the sql table, where others like
+// "content.level" or "content.response_code" are only available via the fields_strings / fields_numbers column. For
+// these nested columns we have to use a special query syntax. We also have to use the match function when the operator
+// is "~" which says that the user checks the field value against a regular expression.
+//
+// See: https://gist.github.com/alexey-milovidov/d6ffc9e0bc0bc72dd7bca90e76e3b83b
+// See: https://clickhouse.tech/docs/en/sql-reference/functions/string-search-functions/#matchhaystack-pattern
+func handleConditionParts(key, value, operator string) (string, error) {
+ key = strings.TrimSpace(key)
+ value = strings.TrimSpace(value)
+
+ if contains(defaultFields, key) {
+ if operator == "~" {
+ return fmt.Sprintf("match(%s, %s)", key, value), nil
+ }
+
+ return fmt.Sprintf("%s%s%s", key, operator, value), nil
+ }
+
+ if value != "" && string(value[0]) == "'" && string(value[len(value)-1]) == "'" {
+ if operator == "~" {
+ return fmt.Sprintf("match(fields_string.value[indexOf(fields_string.key, '%s')], %s)", key, value), nil
+ }
+
+ return fmt.Sprintf("fields_string.value[indexOf(fields_string.key, '%s')] %s %s", key, operator, value), nil
+ }
+
+ return fmt.Sprintf("fields_number.value[indexOf(fields_number.key, '%s')] %s '%s'", key, operator, value), nil
+}
diff --git a/plugins/clickhouse/pkg/instance/logs_test.go b/plugins/clickhouse/pkg/instance/logs_test.go
new file mode 100644
index 000000000..9f32faf24
--- /dev/null
+++ b/plugins/clickhouse/pkg/instance/logs_test.go
@@ -0,0 +1,29 @@
+package instance
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseLogsQuery(t *testing.T) {
+ for _, tc := range []struct {
+ query string
+ where string
+ isInvalid bool
+ }{
+ {query: "cluster = 'foo' _and_ namespace = 'bar'", where: "cluster='foo' AND namespace='bar'", isInvalid: false},
+ {query: "cluster = 'foo' _and_ (namespace='hello' _or_ namespace='world')", where: "cluster='foo' AND (namespace='hello' OR namespace='world')", isInvalid: false},
+ {query: "kubernetes.label_foo = 'bar'", where: "fields_string.value[indexOf(fields_string.key, 'kubernetes.label_foo')] = 'bar'", isInvalid: false},
+ } {
+ t.Run(tc.query, func(t *testing.T) {
+ parsedWhere, err := parseLogsQuery(tc.query)
+ if tc.isInvalid {
+ require.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ require.Equal(t, tc.where, parsedWhere)
+ }
+ })
+ }
+}
diff --git a/plugins/clickhouse/pkg/instance/structs.go b/plugins/clickhouse/pkg/instance/structs.go
new file mode 100644
index 000000000..64724557c
--- /dev/null
+++ b/plugins/clickhouse/pkg/instance/structs.go
@@ -0,0 +1,31 @@
+package instance
+
+import (
+ "time"
+)
+
+// FieldString is the struct for the nested fields for all JSON fields of a log line, which are containing a string.
+type FieldString struct {
+ Key []string
+ Value []string
+}
+
+// FieldNumber is the struct for the nested fields for all JSON fields of a log line, which are containing a number.
+type FieldNumber struct {
+ Key []string
+ Value []float64
+}
+
+// Row is the struct which represents a single row in the logs table of ClickHouse.
+type Row struct {
+ Timestamp time.Time
+ Cluster string
+ Namespace string
+ App string
+ Pod string
+ Container string
+ Host string
+ FieldsString FieldString
+ FieldsNumber FieldNumber
+ Log string
+}
diff --git a/plugins/clickhouse/src/assets/icon.png b/plugins/clickhouse/src/assets/icon.png
new file mode 100644
index 000000000..f352e6d9a
Binary files /dev/null and b/plugins/clickhouse/src/assets/icon.png differ
diff --git a/plugins/clickhouse/src/components/page/Logs.tsx b/plugins/clickhouse/src/components/page/Logs.tsx
new file mode 100644
index 000000000..292b78bee
--- /dev/null
+++ b/plugins/clickhouse/src/components/page/Logs.tsx
@@ -0,0 +1,140 @@
+import {
+ Alert,
+ AlertActionLink,
+ AlertVariant,
+ Button,
+ ButtonVariant,
+ Card,
+ CardBody,
+ Grid,
+ GridItem,
+ Spinner,
+} from '@patternfly/react-core';
+import { InfiniteData, InfiniteQueryObserverResult, QueryObserverResult, useInfiniteQuery } from 'react-query';
+import React from 'react';
+import { useHistory } from 'react-router-dom';
+
+import { ILogsData } from '../../utils/interfaces';
+import { IPluginTimes } from '@kobsio/plugin-core';
+import LogsDocuments from '../panel/LogsDocuments';
+import LogsFields from './LogsFields';
+
+interface IPageLogsProps {
+ name: string;
+ fields?: string[];
+ query: string;
+ selectField: (field: string) => void;
+ times: IPluginTimes;
+ showDetails: (details: React.ReactNode) => void;
+}
+
+const PageLogs: React.FunctionComponent = ({
+ name,
+ fields,
+ query,
+ selectField,
+ times,
+ showDetails,
+}: IPageLogsProps) => {
+ const history = useHistory();
+
+ const { isError, isFetching, isLoading, data, error, fetchNextPage, refetch } = useInfiniteQuery(
+ ['clickhouse/logs', query, times],
+ async ({ pageParam }) => {
+ try {
+ const response = await fetch(
+ `/api/plugins/clickhouse/logs/${name}?query=${query}&timeStart=${times.timeStart}&timeEnd=${
+ times.timeEnd
+ }&limit=100&offset=${pageParam || ''}`,
+ {
+ method: 'get',
+ },
+ );
+ const json = await response.json();
+
+ if (response.status >= 200 && response.status < 300) {
+ return json;
+ } else {
+ if (json.error) {
+ throw new Error(json.error);
+ } else {
+ throw new Error('An unknown error occured');
+ }
+ }
+ } catch (err) {
+ throw err;
+ }
+ },
+ {
+ getNextPageParam: (lastPage, pages) => lastPage.offset,
+ keepPreviousData: true,
+ },
+ );
+
+ if (isLoading) {
+ return (
+