Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ NOTE: As semantic versioning states all 0.y.z releases can contain breaking chan
- [#133](https://github.com/kobsio/kobs/pull/133): Improve querie performance to get logs from ClickHouse.
- [#137](https://github.com/kobsio/kobs/pull/137): Change log view for the ClickHouse and Elasticsearch plugin.
- [#139](https://github.com/kobsio/kobs/pull/139): Update Go and JavaScript dependencies.
- [#140](https://github.com/kobsio/kobs/pull/140): Fill the chart for the distribution of the log lines with zero value.

## [v0.5.0](https://github.com/kobsio/kobs/releases/tag/v0.5.0) (2021-08-03)

Expand Down
3 changes: 2 additions & 1 deletion plugins/clickhouse/pkg/instance/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,8 @@ func (i *Instance) GetLogs(ctx context.Context, query, order, orderBy string, li
// used to render the distribution chart, which shows how many documents/rows are available within a bucket.
if timeEnd-timeStart > 30 {
interval := (timeEnd - timeStart) / 30
sqlQueryBuckets := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d second) AS interval_data , count(*) AS count_data FROM %s.logs WHERE timestamp >= ? AND timestamp <= ? %s GROUP BY interval_data SETTINGS skip_unavailable_shards = 1", interval, i.database, conditions)
// sqlQueryBuckets := fmt.Sprintf("SELECT toStartOfInterval(timestamp, INTERVAL %d second) AS interval_data , count(*) AS count_data FROM %s.logs WHERE timestamp >= ? AND timestamp <= ? %s GROUP BY interval_data SETTINGS skip_unavailable_shards = 1", interval, i.database, conditions)
sqlQueryBuckets := fmt.Sprintf(`SELECT toStartOfInterval(timestamp, INTERVAL %d second) AS interval_data , count(*) AS count_data FROM %s.logs WHERE timestamp >= ? AND timestamp <= ? %s GROUP BY interval_data ORDER BY interval_data WITH FILL FROM toStartOfInterval(FROM_UNIXTIME(%d), INTERVAL %d second) TO toStartOfInterval(FROM_UNIXTIME(%d), INTERVAL %d second) STEP %d SETTINGS skip_unavailable_shards = 1`, interval, i.database, conditions, timeStart, interval, timeEnd, interval, interval)
log.WithFields(logrus.Fields{"query": sqlQueryBuckets, "timeStart": timeStart, "timeEnd": timeEnd}).Tracef("sql buckets query")
rowsBuckets, err := i.client.QueryContext(ctx, sqlQueryBuckets, time.Unix(timeStart, 0), time.Unix(timeEnd, 0))
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion plugins/clickhouse/src/components/panel/LogsChart.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ const LogsChart: React.FunctionComponent<ILogsChartProps> = ({ buckets }: ILogsC
<b>{tooltip.data.intervalFormatted}</b>
</div>
<div>
<SquareIcon color="#0066cc" /> Documents: {tooltip.data.count}
<SquareIcon color="#0066cc" /> Documents: {tooltip.data.count || 0}
</div>
</div>
</TooltipWrapper>
Expand Down