From dad4b169e8e80fba94d16242e22a3b8ed5aa0092 Mon Sep 17 00:00:00 2001 From: Sam Song Date: Wed, 15 May 2019 21:01:33 -0700 Subject: [PATCH 01/10] expose port to fluentd service for fluentbit --- deploy/kubernetes/fluentd-sumologic.yaml | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/deploy/kubernetes/fluentd-sumologic.yaml b/deploy/kubernetes/fluentd-sumologic.yaml index 55dc7b80d1..35609eb733 100644 --- a/deploy/kubernetes/fluentd-sumologic.yaml +++ b/deploy/kubernetes/fluentd-sumologic.yaml @@ -136,12 +136,12 @@ data: logs.conf: |- - @type dummy - tag "dummy.logs" - dummy {"hello":"world"} + @type forward + port 24321 + bind 0.0.0.0 - - @type null + + @type stdout --- apiVersion: apps/v1 @@ -185,6 +185,9 @@ spec: - name: prom-write containerPort: 9888 protocol: TCP + - name: fluent-bit + containerPort: 24321 + protocol: TCP livenessProbe: exec: command: @@ -258,4 +261,8 @@ spec: port: 9888 targetPort: 9888 protocol: TCP + - name: fluent-bit + port: 24321 + targetPort: 24321 + protocol: TCP --- From 0ca3f0a7e32a4a45217c276afd69b3024de94729 Mon Sep 17 00:00:00 2001 From: Sam Song Date: Wed, 15 May 2019 21:02:23 -0700 Subject: [PATCH 02/10] add overrides.yaml for fluentbit --- deploy/fluent-bit/overrides.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 deploy/fluent-bit/overrides.yaml diff --git a/deploy/fluent-bit/overrides.yaml b/deploy/fluent-bit/overrides.yaml new file mode 100644 index 0000000000..4b8847f01f --- /dev/null +++ b/deploy/fluent-bit/overrides.yaml @@ -0,0 +1,14 @@ +backend: + type: forward + forward: + host: fluentd + port: 24321 + tls: "off" + tls_verify: "on" + tls_debug: 1 + shared_key: + +rawConfig: |- + @INCLUDE fluent-bit-service.conf + @INCLUDE fluent-bit-input.conf + @INCLUDE fluent-bit-output.conf From f684395527be661163c308dbee523dc44a4224ee Mon Sep 17 00:00:00 2001 From: Sam Song Date: Wed, 15 May 2019 21:03:06 -0700 Subject: [PATCH 03/10] move kubernetes_sumologic filter plugin to the repo --- fluent-plugin-kubernetes_sumologic/Gemfile | 9 + fluent-plugin-kubernetes_sumologic/README.md | 547 ++++++ fluent-plugin-kubernetes_sumologic/Rakefile | 11 + ...fluent-plugin-kubernetes_sumologic.gemspec | 28 + .../plugin/filter_kubernetes_sumologic.rb | 201 +++ .../test/helper.rb | 16 + .../test_filter_kubernetes_sumologic.rb | 1473 +++++++++++++++++ 7 files changed, 2285 insertions(+) create mode 100644 fluent-plugin-kubernetes_sumologic/Gemfile create mode 100644 fluent-plugin-kubernetes_sumologic/README.md create mode 100644 fluent-plugin-kubernetes_sumologic/Rakefile create mode 100644 fluent-plugin-kubernetes_sumologic/fluent-plugin-kubernetes_sumologic.gemspec create mode 100644 fluent-plugin-kubernetes_sumologic/lib/fluent/plugin/filter_kubernetes_sumologic.rb create mode 100644 fluent-plugin-kubernetes_sumologic/test/helper.rb create mode 100644 fluent-plugin-kubernetes_sumologic/test/plugin/test_filter_kubernetes_sumologic.rb diff --git a/fluent-plugin-kubernetes_sumologic/Gemfile b/fluent-plugin-kubernetes_sumologic/Gemfile new file mode 100644 index 0000000000..9342b36475 --- /dev/null +++ b/fluent-plugin-kubernetes_sumologic/Gemfile @@ -0,0 +1,9 @@ +source 'https://rubygems.org' + +group :test do + gem 'codecov' + gem 'simplecov' + gem 'webmock' +end + +gemspec \ No newline at end of file diff --git a/fluent-plugin-kubernetes_sumologic/README.md b/fluent-plugin-kubernetes_sumologic/README.md new file mode 100644 index 0000000000..dca730d65c --- /dev/null +++ b/fluent-plugin-kubernetes_sumologic/README.md @@ -0,0 +1,547 @@ +[![Build Status](https://travis-ci.org/SumoLogic/fluentd-kubernetes-sumologic.svg?branch=master)](https://travis-ci.org/SumoLogic/fluentd-kubernetes-sumologic) [![Gem Version](https://badge.fury.io/rb/fluent-plugin-kubernetes_sumologic.svg)](https://badge.fury.io/rb/fluent-plugin-kubernetes_sumologic) [![Docker Pulls](https://img.shields.io/docker/pulls/sumologic/fluentd-kubernetes-sumologic.svg)](https://hub.docker.com/r/sumologic/fluentd-kubernetes-sumologic) [![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/SumoLogic/fluentd-output-sumologic/issues) + +This page describes the Sumo Kubernetes [Fluentd](http://www.fluentd.org/) plugin. + +## Support +The code in this repository has been developed in collaboration with the Sumo Logic community and is not supported via standard Sumo Logic Support channels. For any issues or questions please submit an issue within the GitHub repository. The maintainers of this project will work directly with the community to answer any questions, address bugs, or review any requests for new features. + +## Installation + +The plugin runs as a Kubernetes [DaemonSet](http://kubernetes.io/docs/admin/daemons/); it runs an instance of the plugin on each host in a cluster. Each plugin instance pulls system, kubelet, docker daemon, and container logs from the host and sends them, in JSON or text format, to an HTTP endpoint on a hosted collector in the [Sumo](http://www.sumologic.com) service. Note the plugin with default configuration requires Kubernetes >=1.8. See [the section below on running this on Kubernetes <1.8](#running-on-kubernetes-versions-<1.8) + +- [Step 1 Create hosted collector and HTTP source in Sumo](#step-1--create-hosted-collector-and-http-source-in-sumo) +- [Step 2 Create a Kubernetes secret](#step-2--create-a-kubernetes-secret) +- [Step 3 Install the Sumo Kubernetes FluentD plugin](#step-3--install-the-sumo-kubernetes-fluentd-plugin) + * [Option A Install plugin using kubectl](#option-a--install-plugin-using-kubectl) + * [Option B Helm chart](#option-b--helm-chart) +- [Environment variables](#environment-variables) + + [Override environment variables using annotations](#override-environment-variables-using-annotations) + + [Exclude data using annotations](#exclude-data-using-annotations) + + [Include excluded using annotations](#include-excluded-using-annotations) +- [Step 4 Set up Heapster for metric collection](#step-4-set-up-heapster-for-metric-collection) + * [Kubernetes ConfigMap](#kubernetes-configmap) + * [Kubernetes Service](#kubernetes-service) + * [Kubernetes Deployment](#kubernetes-deployment) +- [Log data](#log-data) + * [Docker](#docker) + * [Kubelet](#kubelet) + * [Containers](#containers) +- [Taints and Tolerations](#taints-and-tolerations) +- [Running On OpenShift](#running-on-openshift) + + + +![deployment](https://github.com/SumoLogic/fluentd-kubernetes-sumologic/blob/master/screenshots/kubernetes.png) + +# Step 1 Create hosted collector and HTTP source in Sumo + +In this step you create, on the Sumo service, an HTTP endpoint to receive your logs. This process involves creating an HTTP source on a hosted collector in Sumo. In Sumo, collectors use sources to receive data. + +1. If you don’t already have a Sumo account, you can create one by clicking the **Free Trial** button on https://www.sumologic.com/. +2. Create a hosted collector, following the instructions on [Configure a Hosted Collector](https://help.sumologic.com/Send-Data/Hosted-Collectors/Configure-a-Hosted-Collector) in Sumo help. (If you already have a Sumo hosted collector that you want to use, skip this step.) +3. Create an HTTP source on the collector you created in the previous step. For instructions, see [HTTP Logs and Metrics Source](https://help.sumologic.com/Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source) in Sumo help. +4. When you have configured the HTTP source, Sumo will display the URL of the HTTP endpoint. Make a note of the URL. You will use it when you configure the Kubernetes service to send data to Sumo. + +# Step 2 Create a Kubernetes secret + +Create a secret in Kubernetes with the HTTP source URL. If you want to change the secret name, you must modify the Kubernetes manifest accordingly. + +`kubectl create secret generic sumologic --from-literal=collector-url=INSERT_HTTP_URL` + +You should see the confirmation message + +`secret "sumologic" created.` + +# Step 3 Install the Sumo Kubernetes FluentD plugin + +Follow the instructions in Option A below to install the plugin using `kubectl`. If you prefer to use a Helm chart, see Option B. + +Before you start, see [Environment variables](#environment-variables) for information about settings you can customize, and how to use annotations to override selected environment variables and exclude data from being sent to Sumo. + +## Option A Install plugin using kubectl + +See the sample Kubernetes DaemonSet and Role in [fluentd.yaml](/daemonset/rbac/fluentd.yaml). + +1. Clone the [GitHub repo](https://github.com/SumoLogic/fluentd-kubernetes-sumologic). + +2. In `fluentd-kubernetes-sumologic`, install the chart using `kubectl`. + +Which `.yaml` file you should use depends on whether or not you are running RBAC for authorization. RBAC is enabled by default as of Kubernetes 1.6. Note the plugin with default configuration requires Kubernetes >=1.8. See the section below on [running this on Kubernetes <1.8](#running-on-kubernetes-versions-<1.8) + +**Non-RBAC (Kubernetes 1.5 and below)** + +`kubectl create -f /daemonset/nonrbac/fluentd.yaml` + +**RBAC (Kubernetes 1.6 and above)**

`kubectl create -f /daemonset/rbac/fluentd.yaml` + + +**Note** if you modified the command in Step 2 to use a different name, update the `.yaml` file to use the correct secret. + +Logs should begin flowing into Sumo within a few minutes of plugin installation. + +## Option B Helm chart +If you use Helm to manage your Kubernetes resources, there is a Helm chart for the plugin at https://github.com/kubernetes/charts/tree/master/stable/sumologic-fluentd. + +# Environment variables + +Environment | Variable Description +----------- | -------------------- +`AUDIT_LOG_PATH`|Define the path to the [Kubernetes Audit Log](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/)

Default: `/mnt/log/kube-apiserver-audit.log` +`CONCAT_SEPARATOR` |The character to use to delimit lines within the final concatenated message. Most multi-line messages contain a newline at the end of each line.

Default: "" +`EXCLUDE_CONTAINER_REGEX` |A regular expression for containers. Matching containers will be excluded from Sumo. The logs will still be sent to FluentD. +`EXCLUDE_FACILITY_REGEX`|A regular expression for syslog [facilities](https://en.wikipedia.org/wiki/Syslog#Facility). Matching facilities will be excluded from Sumo. The logs will still be sent to FluentD. +`EXCLUDE_HOST_REGEX`|A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. +`EXCLUDE_NAMESPACE_REGEX`|A regular expression for `namespaces`. Matching `namespaces` will be excluded from Sumo. The logs will still be sent to FluentD. +`EXCLUDE_PATH`|Files matching this pattern will be ignored by the `in_tail` plugin, and will not be sent to Kubernetes or Sumo. This can be a comma-separated list as well. See [in_tail](http://docs.fluentd.org/v0.12/articles/in_tail#excludepath) documentation for more information.

For example, defining `EXCLUDE_PATH` as shown below excludes all files matching `/var/log/containers/*.log`,

`...`

`env:`
  - `name: EXCLUDE_PATH`
  `value: "[\"/var/log/containers/*.log\"]"` +`EXCLUDE_POD_REGEX`|A regular expression for pods. Matching pods will be excluded from Sumo. The logs will still be sent to FluentD. +`EXCLUDE_PRIORITY_REGEX`|A regular expression for syslog [priorities](https://en.wikipedia.org/wiki/Syslog#Severity_level). Matching priorities will be excluded from Sumo. The logs will still be sent to FluentD. +`EXCLUDE_UNIT_REGEX` |A regular expression for `systemd` units. Matching units will be excluded from Sumo. The logs will still be sent to FluentD. +`FLUENTD_SOURCE`|Fluentd can use log tail, systemd query or forward as the source, Allowable values: `file`, `systemd`, `forward`.

Default: `file` +`FLUENTD_USER_CONFIG_DIR`|A directory of user-defined fluentd configuration files, which must be in the `*.conf` directory in the container. +`FLUSH_INTERVAL` |How frequently to push logs to Sumo.

Default: `5s` +`KUBERNETES_META`|Include or exclude Kubernetes metadata such as `namespace` and `pod_name` if using JSON log format.

Default: `true` +`KUBERNETES_META_REDUCE`| Reduces redundant Kubernetes metadata, see [_Reducing Kubernetes Metadata_](#reducing-kubernetes-metadata).

Default: `false` +`LOG_FORMAT`|Format in which to post logs to Sumo. Allowable values:

`text`—Logs will appear in SumoLogic in text format.
`json`—Logs will appear in SumoLogic in json format.
`json_merge`—Same as json but if the container logs in json format to stdout it will merge in the container json log at the root level and remove the log field.

Default: `json` +`MULTILINE_START_REGEXP`|The regular expression for the `concat` plugin to use when merging multi-line messages. Defaults to Julian dates, for example, Jul 29, 2017. +`NUM_THREADS`|Set the number of HTTP threads to Sumo. It might be necessary to do so in heavy-logging clusters.

Default: `1` +`READ_FROM_HEAD`|Start to read the logs from the head of file, not bottom. Only applies to containers log files. See in_tail doc for more information.

Default: `true` +`SOURCE_CATEGORY` |Set the `_sourceCategory` metadata field in Sumo.

Default: `"%{namespace}/%{pod_name}"` +`SOURCE_CATEGORY_PREFIX`|Prepends a string that identifies the cluster to the `_sourceCategory` metadata field in Sumo.

Default: `kubernetes/` +`SOURCE_CATEGORY_REPLACE_DASH` |Used to replace a dash (-) character with another character.

Default: `/`

For example, a Pod called `travel-nginx-3629474229-dirmo` within namespace `app` will appear in Sumo with `_sourceCategory=app/travel/nginx`. +`SOURCE_HOST`|Set the `_sourceHost` metadata field in Sumo.

Default: `""` +`SOURCE_NAME`|Set the `_sourceName` metadata field in Sumo.

Default: `"%{namespace}.%{pod}.%{container}"` +`TIME_KEY`|The field name for json formatted sources that should be used as the time. See [time_key](https://docs.fluentd.org/v0.12/articles/formatter_json#time_key-(string,-optional,-defaults-to-%E2%80%9Ctime%E2%80%9D)). Default: `time` +`ADD_TIMESTAMP`|Option to control adding timestamp to logs. Default: `true` +`TIMESTAMP_KEY`|Field name when add_timestamp is on. Default: `timestamp` +`ADD_STREAM`|Option to control adding stream to logs. Default: `true` +`ADD_TIME`|Option to control adding time to logs. Default: `true` +`CONTAINER_LOGS_PATH`|Specify the path in_tail should watch for container logs. Default: `/mnt/log/containers/*.log` +`PROXY_URI`|Add the uri of the proxy environment if present. +`ENABLE_STAT_WATCHER`|Option to control the enabling of [stat_watcher](https://docs.fluentd.org/v1.0/articles/in_tail#enable_stat_watcher). Default: `true` +`K8S_METADATA_FILTER_WATCH`|Option to control the enabling of [metadata filter plugin watch](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). Default: `true` +`K8S_METADATA_FILTER_CA_FILE`|Option to control the enabling of [metadata filter plugin ca_file](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). +`K8S_METADATA_FILTER_VERIFY_SSL`|Option to control the enabling of [metadata filter plugin verify_ssl](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). Default: `true` +`K8S_METADATA_FILTER_CLIENT_CERT`|Option to control the enabling of [metadata filter plugin client_cert](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). +`K8S_METADATA_FILTER_CLIENT_KEY`|Option to control the enabling of [metadata filter plugin client_key](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). +`K8S_METADATA_FILTER_BEARER_TOKEN_FILE`|Option to control the enabling of [metadata filter plugin bearer_token_file](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). +`K8S_METADATA_FILTER_BEARER_CACHE_SIZE`|Option to control the enabling of [metadata filter plugin cache_size](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). Default: `1000` +`K8S_METADATA_FILTER_BEARER_CACHE_TTL`|Option to control the enabling of [metadata filter plugin cache_ttl](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). Default: `3600` +`K8S_NODE_NAME`|If set, improves [caching of pod metadata](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes) and reduces API calls. +`VERIFY_SSL`|Verify ssl certificate of sumologic endpoint. Default: `true` +`FORWARD_INPUT_BIND`|The bind address to listen to if using forward as `FLUENTD_SOURCE`. Default: `0.0.0.0` (all addresses) +`FORWARD_INPUT_PORT`|The port to listen to if using forward as `FLUENTD_SOURCE`. Default: `24224` + + +The following table show which environment variables affect which Fluentd sources. + +| Environment Variable | Containers | Docker | Kubernetes | Systemd | +|----------------------|------------|--------|------------|---------| +| `EXCLUDE_CONTAINER_REGEX` | ✔ | ✘ | ✘ | ✘ | +| `EXCLUDE_FACILITY_REGEX` | ✘ | ✘ | ✘ | ✔ | +| `EXCLUDE_HOST_REGEX `| ✔ | ✘ | ✘ | ✔ | +| `EXCLUDE_NAMESPACE_REGEX` | ✔ | ✘ | ✔ | ✘ | +| `EXCLUDE_PATH` | ✔ | ✔ | ✔ | ✘ | +| `EXCLUDE_PRIORITY_REGEX` | ✘ | ✘ | ✘ | ✔ | +| `EXCLUDE_POD_REGEX` | ✔ | ✘ | ✘ | ✘ | +| `EXCLUDE_UNIT_REGEX` | ✘ | ✘ | ✘ | ✔ | +| `TIME_KEY` | ✔ | ✘ | ✘ | ✘ | + +### FluentD stops processing logs +When dealing with large volumes of data (TB's from what we have seen), FluentD may stop processing logs, but continue to run. This issue seems to be caused by the [scalability of the inotify process](https://github.com/fluent/fluentd/issues/1630) that is packaged with the FluentD in_tail plugin. If you encounter this situation, setting the `ENABLE_STAT_WATCHER` to `false` should resolve this issue. + +### Reducing Kubernetes metadata + +You can use the `KUBERNETES_META_REDUCE` environment variable (global) or the `sumologic.com/kubernetes_meta_reduce` annotation (per pod) to reduce the amount of Kubernetes metadata included with each log line under the `kubernetes` field. + +When set, FluentD will remove the following properties: + +* `pod_id` +* `container_id` +* `namespace_id` +* `master_url` +* `labels` +* `annotations` + +Logs will still include: + +* `pod_name` +* `container_name` +* `namespace_name` +* `host` + +These fields still allow you to uniquely identify a pod and look up additional details with the Kubernetes API. + +```yaml +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx +spec: + replicas: 1 + selector: + app: mywebsite + template: + metadata: + name: nginx + labels: + app: mywebsite + annotations: + sumologic.com/kubernetes_meta_reduce: "true" + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 +``` + + +### Override environment variables using annotations +You can override the `LOG_FORMAT`, `KUBERNETES_META_REDUCE`, `SOURCE_CATEGORY` and `SOURCE_NAME` environment variables, per pod, using [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/). For example: + +``` +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx +spec: + replicas: 1 + selector: + app: mywebsite + template: + metadata: + name: nginx + labels: + app: mywebsite + annotations: + sumologic.com/format: "text" + sumologic.com/kubernetes_meta_reduce: "true" + sumologic.com/sourceCategory: "mywebsite/nginx" + sumologic.com/sourceName: "mywebsite_nginx" + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 +``` + +### Exclude data using annotations + +You can also use the `sumologic.com/exclude` annotation to exclude data from Sumo. This data is sent to FluentD, but not to Sumo. + +``` +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx +spec: + replicas: 1 + selector: + app: mywebsite + template: + metadata: + name: nginx + labels: + app: mywebsite + annotations: + sumologic.com/format: "text" + sumologic.com/sourceCategory: "mywebsite/nginx" + sumologic.com/sourceName: "mywebsite_nginx" + sumologic.com/exclude: "true" + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 +``` + +### Include excluded using annotations + +If you excluded a whole namespace, but still need one or few pods to be still included for shipping to Sumologic, you can use the `sumologic.com/include` annotation to include data to Sumo. It takes precedence over the exclusion described above. + +``` +apiVersion: v1 +kind: ReplicationController +metadata: + name: nginx +spec: + replicas: 1 + selector: + app: mywebsite + template: + metadata: + name: nginx + labels: + app: mywebsite + annotations: + sumologic.com/format: "text" + sumologic.com/sourceCategory: "mywebsite/nginx" + sumologic.com/sourceName: "mywebsite_nginx" + sumologic.com/include: "true" + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 +``` + +# Step 4 Set up Heapster for metric collection + +The recommended way to collect metrics from Kubernetes clusters is to use Heapster and a Sumo collector with a Graphite source. + +Heapster aggregates metrics across a Kubenetes cluster. Heapster runs as a pod in the cluster, and discovers all nodes in the cluster and queries usage information from each node's `kubelet`—the on-machine Kubernetes agent. + +Heapster provides metrics at the cluster, node and pod level. + +1. Install Heapster in your Kubernetes cluster and configure a Graphite Sink to send the data in Graphite format to Sumo. For instructions, see +https://github.com/kubernetes/heapster/blob/master/docs/sink-configuration.md#graphitecarbon. Assuming you have used the below YAML files to configure your system, then the sink option in graphite would be `--sink=graphite:tcp://sumo-graphite.kube-system.svc:2003`. You may need to change this depending on the namespace you run the deployment in, the name of the service or the port number for your Graphite source. + +2. Use the Sumo Docker container. For instructions, see https://hub.docker.com/r/sumologic/collector/. + +3. The following sections contain an example configmap, which contains the `sources.json` configuration, an example service, and an example deployment. Create these manifests in Kubernetes using `kubectl`. + + +## Kubernetes ConfigMap +``` +kind: ConfigMap +apiVersion: v1 +metadata: + name: "sumo-sources" +data: + sources.json: |- + { + "api.version": "v1", + "sources": [ + { + "name": "SOURCE_NAME", + "category": "SOURCE_CATEGORY", + "automaticDateParsing": true, + "contentType": "Graphite", + "timeZone": "UTC", + "encoding": "UTF-8", + "protocol": "TCP", + "port": 2003, + "sourceType": "Graphite" + } + ] + } + +``` +## Kubernetes Service +``` +apiVersion: v1 +kind: Service +metadata: + name: sumo-graphite +spec: + ports: + - port: 2003 + selector: + app: sumo-graphite +``` +## Kubernetes Deployment +``` +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + labels: + app: sumo-graphite + name: sumo-graphite +spec: + replicas: 2 + template: + metadata: + labels: + app: sumo-graphite + spec: + volumes: + - name: sumo-sources + configMap: + name: sumo-sources + items: + - key: sources.json + path: sources.json + containers: + - name: sumo-graphite + image: sumologic/collector:latest + ports: + - containerPort: 2003 + volumeMounts: + - mountPath: /sumo + name: sumo-sources + env: + - name: SUMO_ACCESS_ID + value: + - name: SUMO_ACCESS_KEY + value: + - name: SUMO_SOURCES_JSON + value: /sumo/sources.json + +``` + +# Templating Kubernetes metadata +The following Kubernetes metadata is available for string templating: + +| String template | Description | +| --------------- | ------------------------------------------------------ | +| `%{namespace}` | Namespace name | +| `%{pod}` | Full pod name (e.g. `travel-products-4136654265-zpovl`) | +| `%{pod_name}` | Friendly pod name (e.g. `travel-products`) | +| `%{pod_id}` | The pod's uid (a UUID) | +| `%{container}` | Container name | +| `%{source_host}` | Host | +| `%{label:foo}` | The value of label `foo` | + +## Missing labels +Unlike the other templates, labels are not guaranteed to exist, so missing labels interpolate as `"undefined"`. + +For example, if you have only the label `app: travel` but you define `SOURCE_NAME="%{label:app}@%{label:version}"`, the source name will appear as `travel@undefined`. + +# Log data +After performing the configuration described above, your logs should start streaming to SumoLogic in `json` or text format with the appropriate metadata. If you are using `json` format you can auto extract fields, for example `_sourceCategory=some/app | json auto`. + +## Docker +![Docker Logs](/screenshots/docker.png) + +## Kubelet +Note that Kubelet logs are only collected if you are using systemd. Kubernetes no longer outputs the kubelet logs to a file. +![Docker Logs](/screenshots/kubelet.png) + +## Containers +![Docker Logs](/screenshots/container.png) + +# Taints and Tolerations +By default, the fluentd pods will schedule on, and therefore collect logs from, any worker nodes that do not have a taint and any master node that does not have a taint beyond the default master taint. If you would like to schedule pods on all nodes, regardless of taints, uncomment the following line from fluentd.yaml before applying it. + +``` +tolerations: + #- operator: "Exists" +``` + +# Running On OpenShift + +This daemonset setting mounts /var/log as service account FluentD so you need to run containers as privileged container. Here is command example: + +``` +oc adm policy add-scc-to-user privileged system:serviceaccount:logging:fluentd +oc adm policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:fluentd +oc label node —all logging-sumologic-fluentd=true +oc patch ds fluentd-sumologic -p "spec: + template: + spec: + containers: + - image: sumologic/fluentd-kubernetes-sumologic:latest + name: fluentd + securityContext: + privileged: true" +oc delete pod -l name = fluentd-sumologic +``` + +## Running on Kubernetes versions <1.8 + +In order to run this plugin on Kubernetes <1.8 you will need to make some changes the yaml file prior to deploying it. + +Replace: + +``` + - name: pos-files + hostPath: + path: /var/run/fluentd-pos + type: "" +``` +With: + +``` + - name: pos-files + emptyDir: {} +``` + +## Output to S3 + +If you need to also send data to S3 (i.e. as a secondary backup/audit trail) the image includes the `fluent-plugin-s3` plugin. In order to send the logs from FluentD to multiple outputs, you must use the `copy` plugin. This image comes with an [OOB configuration](conf.d/out.sumo.conf) to output the logs to Sumo Logic. In order to output to multiple destinations, you need to modify that existing configuration. + +**Example:** Send all logs to S3 and Sumo: + +``` + + @type copy + + @type sumologic + log_key log + endpoint "#{ENV['COLLECTOR_URL']}" + verify_ssl "#{ENV['VERIFY_SSL']}" + log_format "#{ENV['LOG_FORMAT']}" + flush_interval "#{ENV['FLUSH_INTERVAL']}" + num_threads "#{ENV['NUM_THREADS']}" + open_timeout 60 + add_timestamp "#{ENV['ADD_TIMESTAMP']}" + proxy_uri "#{ENV['PROXY_URI']}" + + + @type s3 + + aws_key_id YOUR_AWS_KEY_ID + aws_sec_key YOUR_AWS_SECRET_KEY + s3_bucket YOUR_S3_BUCKET_NAME + s3_region us-west-1 + path logs/ + buffer_path /var/log/fluent/s3 + + time_slice_format %Y%m%d%H + time_slice_wait 10m + utc + + buffer_chunk_limit 256m + + +``` + +You can replace the OOB configuration by creating a new Docker image from our image or by using a configmap to inject the new configuration to the pod. + +More details about the S3 plugin can be found [in the docs](https://docs.fluentd.org/v0.12/articles/out_s3). + +## Upgrading to v2.0.0 + +In version 2.0.0, some legacy FluentD configuration has been removed that could lead to [duplicate logs being ingested into Sumo Logic](https://github.com/SumoLogic/fluentd-kubernetes-sumologic/issues/79). These logs were control plane components. This version was done as a major release as it breaks the current version of the [Kubernetes App](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Kubernetes/Install_the_Kubernetes_App_and_View_the_Dashboards) you may have installed in Sumo Logic. + +After upgrading to this version, you will need to reinstall the [Kubernetes App](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Kubernetes/Install_the_Kubernetes_App_and_View_the_Dashboards) in Sumo Logic. If you do not some of the panels in the dashboards will not render properly. + +If you have other content outside the app (Partitions, Scheduled Views, Field Extraction Rules or Scheduled Searches and Alerts), these may need to be updated after upgrading to v2.0.0. The logs, while the same content, have a different format and the same parsing logic and metadata may not apply. + +The previous log format that is removed in v2.0.0: +```json +{ + "timestamp": 1538776281387, + "severity": "I", + "pid": "1", + "source": "wrap.go:42", + "message": "GET /api/v1/namespaces/kube-system/endpoints/kube-scheduler: (3.514372ms) 200 [[kube-scheduler/v1.10.5 (linux/amd64) kubernetes/32ac1c9/leader-election] 127.0.0.1:46290]" +} +``` +Is replaced by the following version. It is the same log line in a different format enriched with the same metadata the plugin applies to all pod logs. +```json +{ + "timestamp": 1538776282152, + "log": "I1005 21:51:21.387204 1 wrap.go:42] GET /api/v1/namespaces/kube-system/endpoints/kube-scheduler: (3.514372ms) 200 [[kube-scheduler/v1.10.5 (linux/amd64) kubernetes/32ac1c9/leader-election] 127.0.0.1:46290]", + "stream": "stdout", + "time": "2018-10-05T21:51:21.387477546Z", + "docker": { + "container_id": "a442fd2982dfdc09ab6235941f8d661a0a5c8df5e1d21f23ff48a9923ac14739" + }, + "kubernetes": { + "container_name": "kube-apiserver", + "namespace_name": "kube-system", + "pod_name": "kube-apiserver-ip-172-20-122-71.us-west-2.compute.internal", + "pod_id": "80fa5e13-c8b9-11e8-a456-0a8c1424d0d4", + "labels": { + "k8s-app": "kube-apiserver" + }, + "host": "ip-172-20-122-71.us-west-2.compute.internal", + "master_url": "https://100.64.0.1:443/api", + "namespace_id": "9b9b75b7-aa16-11e8-9d62-06df85b5d3bc" + } +} +``` diff --git a/fluent-plugin-kubernetes_sumologic/Rakefile b/fluent-plugin-kubernetes_sumologic/Rakefile new file mode 100644 index 0000000000..93a45e97eb --- /dev/null +++ b/fluent-plugin-kubernetes_sumologic/Rakefile @@ -0,0 +1,11 @@ +require 'bundler/gem_tasks' +require 'rake/testtask' + +Rake::TestTask.new(:test) do |test| + test.libs << 'test' + test.pattern = 'test/**/test_*.rb' + test.verbose = true + test.warning = false +end + +task :default => :test diff --git a/fluent-plugin-kubernetes_sumologic/fluent-plugin-kubernetes_sumologic.gemspec b/fluent-plugin-kubernetes_sumologic/fluent-plugin-kubernetes_sumologic.gemspec new file mode 100644 index 0000000000..40cbf1fe2c --- /dev/null +++ b/fluent-plugin-kubernetes_sumologic/fluent-plugin-kubernetes_sumologic.gemspec @@ -0,0 +1,28 @@ +# coding: utf-8 +lib = File.expand_path('../lib', __FILE__) +$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) + +Gem::Specification.new do |gem| + gem.name = "fluent-plugin-kubernetes_sumologic" + gem.version = "0.0.0" + gem.authors = ["Sumo Logic"] + gem.email = ["collection@sumologic.com"] + gem.description = %q{FluentD plugin to extract logs from Kubernetes clusters, enrich and ship to Sumo logic.} + gem.summary = %q{FluentD plugin to extract logs from Kubernetes clusters, enrich and ship to Sumo logic.} + gem.homepage = "https://github.com/SumoLogic/fluentd-kubernetes-sumologic" + gem.license = "Apache-2.0" + + gem.files = `git ls-files`.split($/) + gem.executables = gem.files.grep(%r{^bin/}) { |f| File.basename(f) } + gem.test_files = gem.files.grep(%r{^(test|spec|features)/}) + gem.require_paths = ["lib"] + + gem.required_ruby_version = '>= 2.0.0' + + gem.add_development_dependency "bundler", "~> 2" + gem.add_development_dependency "rake" + gem.add_development_dependency 'test-unit', '~> 3.1.0' + gem.add_development_dependency "codecov", ">= 0.1.10" + gem.add_runtime_dependency "fluentd", ">= 0.14.12" + gem.add_runtime_dependency 'httpclient', '~> 2.8.0' +end diff --git a/fluent-plugin-kubernetes_sumologic/lib/fluent/plugin/filter_kubernetes_sumologic.rb b/fluent-plugin-kubernetes_sumologic/lib/fluent/plugin/filter_kubernetes_sumologic.rb new file mode 100644 index 0000000000..24a2253a87 --- /dev/null +++ b/fluent-plugin-kubernetes_sumologic/lib/fluent/plugin/filter_kubernetes_sumologic.rb @@ -0,0 +1,201 @@ +require "fluent/filter" + +module Fluent::Plugin + class SumoContainerOutput < Fluent::Plugin::Filter + # Register type + Fluent::Plugin.register_filter("kubernetes_sumologic", self) + + config_param :kubernetes_meta, :bool, :default => true + config_param :kubernetes_meta_reduce, :bool, :default => false + config_param :source_category, :string, :default => "%{namespace}/%{pod_name}" + config_param :source_category_replace_dash, :string, :default => "/" + config_param :source_category_prefix, :string, :default => "kubernetes/" + config_param :source_name, :string, :default => "%{namespace}.%{pod}.%{container}" + config_param :log_format, :string, :default => "json" + config_param :source_host, :string, :default => "" + config_param :exclude_container_regex, :string, :default => "" + config_param :exclude_facility_regex, :string, :default => "" + config_param :exclude_host_regex, :string, :default => "" + config_param :exclude_namespace_regex, :string, :default => "" + config_param :exclude_pod_regex, :string, :default => "" + config_param :exclude_priority_regex, :string, :default => "" + config_param :exclude_unit_regex, :string, :default => "" + config_param :add_stream, :bool, :default => true + config_param :add_time, :bool, :default => true + + def configure(conf) + super + end + + def is_number?(string) + true if Float(string) rescue false + end + + def sanitize_pod_name(k8s_metadata) + # Strip out dynamic bits from pod name. + # NOTE: Kubernetes deployments append a template hash. + # At the moment this can be in 3 different forms: + # 1) pre-1.8: numeric in pod_template_hash and pod_parts[-2] + # 2) 1.8-1.11: numeric in pod_template_hash, hash in pod_parts[-2] + # 3) post-1.11: hash in pod_template_hash and pod_parts[-2] + + pod_parts = k8s_metadata[:pod].split("-") + pod_template_hash = k8s_metadata[:"label:pod-template-hash"] + if (pod_template_hash == pod_parts[-2] || + to_hash(pod_template_hash) == pod_parts[-2]) + k8s_metadata[:pod_name] = pod_parts[0..-3].join("-") + else + k8s_metadata[:pod_name] = pod_parts[0..-2].join("-") + end + end + + def to_hash(pod_template_hash) + # Convert the pod_template_hash to an alphanumeric string using the same logic Kubernetes + # uses at https://github.com/kubernetes/apimachinery/blob/18a5ff3097b4b189511742e39151a153ee16988b/pkg/util/rand/rand.go#L119 + alphanums = "bcdfghjklmnpqrstvwxz2456789" + pod_template_hash.each_byte.map { |i| alphanums[i.to_i % alphanums.length] }.join("") + end + + def filter(tag, time, record) + # Set the sumo metadata fields + sumo_metadata = record["_sumo_metadata"] || {} + record["_sumo_metadata"] = sumo_metadata + sumo_metadata[:log_format] = @log_format + sumo_metadata[:host] = @source_host if @source_host + sumo_metadata[:source] = @source_name if @source_name + + unless @source_category.nil? + sumo_metadata[:category] = @source_category.dup + unless @source_category_prefix.nil? + sumo_metadata[:category].prepend(@source_category_prefix) + end + end + + if record.key?("_SYSTEMD_UNIT") and not record.fetch("_SYSTEMD_UNIT").nil? + unless @exclude_unit_regex.empty? + if Regexp.compile(@exclude_unit_regex).match(record["_SYSTEMD_UNIT"]) + return nil + end + end + + unless @exclude_facility_regex.empty? + if Regexp.compile(@exclude_facility_regex).match(record["SYSLOG_FACILITY"]) + return nil + end + end + + unless @exclude_priority_regex.empty? + if Regexp.compile(@exclude_priority_regex).match(record["PRIORITY"]) + return nil + end + end + + unless @exclude_host_regex.empty? + if Regexp.compile(@exclude_host_regex).match(record["_HOSTNAME"]) + return nil + end + end + end + + # Allow fields to be overridden by annotations + if record.key?("kubernetes") and not record.fetch("kubernetes").nil? + # Clone kubernetes hash so we don't override the cache + kubernetes = record["kubernetes"].clone + k8s_metadata = { + :namespace => kubernetes["namespace_name"], + :pod => kubernetes["pod_name"], + :pod_id => kubernetes['pod_id'], + :container => kubernetes["container_name"], + :source_host => kubernetes["host"], + } + + + if kubernetes.has_key? "labels" + kubernetes["labels"].each { |k, v| k8s_metadata["label:#{k}".to_sym] = v } + end + k8s_metadata.default = "undefined" + + annotations = kubernetes.fetch("annotations", {}) + if annotations["sumologic.com/include"] == "true" + include = true + else + include = false + end + + unless @exclude_namespace_regex.empty? + if Regexp.compile(@exclude_namespace_regex).match(k8s_metadata[:namespace]) and not include + return nil + end + end + + unless @exclude_pod_regex.empty? + if Regexp.compile(@exclude_pod_regex).match(k8s_metadata[:pod]) and not include + return nil + end + end + + unless @exclude_container_regex.empty? + if Regexp.compile(@exclude_container_regex).match(k8s_metadata[:container]) and not include + return nil + end + end + + unless @exclude_host_regex.empty? + if Regexp.compile(@exclude_host_regex).match(k8s_metadata[:source_host]) and not include + return nil + end + end + + sanitize_pod_name(k8s_metadata) + + if annotations["sumologic.com/exclude"] == "true" + return nil + end + + sumo_metadata[:log_format] = annotations["sumologic.com/format"] if annotations["sumologic.com/format"] + + if annotations["sumologic.com/sourceHost"].nil? + sumo_metadata[:host] = sumo_metadata[:host] % k8s_metadata + else + sumo_metadata[:host] = annotations["sumologic.com/sourceHost"] % k8s_metadata + end + + if annotations["sumologic.com/sourceName"].nil? + sumo_metadata[:source] = sumo_metadata[:source] % k8s_metadata + else + sumo_metadata[:source] = annotations["sumologic.com/sourceName"] % k8s_metadata + end + + if annotations["sumologic.com/sourceCategory"].nil? + sumo_metadata[:category] = sumo_metadata[:category] % k8s_metadata + else + sumo_metadata[:category] = (annotations["sumologic.com/sourceCategory"] % k8s_metadata).prepend(@source_category_prefix) + end + sumo_metadata[:category].gsub!("-", @source_category_replace_dash) + + # Strip kubernetes metadata from json if disabled + if annotations["sumologic.com/kubernetes_meta"] == "false" || !@kubernetes_meta + record.delete("docker") + record.delete("kubernetes") + end + if annotations["sumologic.com/kubernetes_meta_reduce"] == "true" || annotations["sumologic.com/kubernetes_meta_reduce"].nil? && @kubernetes_meta_reduce == true + record.delete("docker") + record["kubernetes"].delete("pod_id") + record["kubernetes"].delete("namespace_id") + record["kubernetes"].delete("labels") + record["kubernetes"].delete("master_url") + record["kubernetes"].delete("annotations") + end + if @add_stream == false + record.delete("stream") + end + if @add_time == false + record.delete("time") + end + # Strip sumologic.com annotations + kubernetes.delete("annotations") if annotations + end + record + end + end +end diff --git a/fluent-plugin-kubernetes_sumologic/test/helper.rb b/fluent-plugin-kubernetes_sumologic/test/helper.rb new file mode 100644 index 0000000000..5a12ee9c08 --- /dev/null +++ b/fluent-plugin-kubernetes_sumologic/test/helper.rb @@ -0,0 +1,16 @@ +require "simplecov" +SimpleCov.start + +if ENV["CI"] == "true" + require "codecov" + SimpleCov.formatter = SimpleCov::Formatter::Codecov +end + +$LOAD_PATH.unshift(File.expand_path("../../", __FILE__)) +require "test-unit" +require "fluent/test" +require "fluent/test/driver/filter" +require "fluent/test/helpers" + +Test::Unit::TestCase.include(Fluent::Test::Helpers) +Test::Unit::TestCase.extend(Fluent::Test::Helpers) diff --git a/fluent-plugin-kubernetes_sumologic/test/plugin/test_filter_kubernetes_sumologic.rb b/fluent-plugin-kubernetes_sumologic/test/plugin/test_filter_kubernetes_sumologic.rb new file mode 100644 index 0000000000..0e706f5fd4 --- /dev/null +++ b/fluent-plugin-kubernetes_sumologic/test/plugin/test_filter_kubernetes_sumologic.rb @@ -0,0 +1,1473 @@ +require "fluent/test" +require "fluent/test/helpers" +require "fluent/test/driver/filter" +require "fluent/plugin/filter_kubernetes_sumologic" +require "test-unit" +require "webmock/test_unit" + +class SumoContainerOutputTest < Test::Unit::TestCase + include Fluent::Test::Helpers + + setup do + Fluent::Test.setup + @time = Fluent::Engine.now + end + + def create_driver(conf = CONFIG) + Fluent::Test::Driver::Filter.new(Fluent::Plugin::SumoContainerOutput).configure(conf) + end + + test "test_empty_config" do + conf = %{} + assert_nothing_raised do + create_driver(conf) + end + end + + test "test_default_config" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_no_k8s_labels" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs/54575ccdb9", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_sourcecategory_prefix" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_add_stream" do + conf = %{ + add_stream false + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_add_time" do + conf = %{ + add_time false + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "time" => time, + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_sourcecategory_replace_dash" do + conf = %{ + source_category_replace_dash - + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log-format-labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_kubernetes_meta" do + conf = %{ + kubernetes_meta false + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_kubernetes_meta_reduce_via_annotation" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs" + }, + "annotations" => { + "sumologic.com/kubernetes_meta_reduce" => "true", + }, + "host" => "docker-for-desktop", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "kubernetes" => { + "container_name" => "log-format-labs", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "host" => "docker-for-desktop", + "namespace_name" => "default", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_kubernetes_meta_reduce_via_conf" do + conf = %{ + kubernetes_meta_reduce true + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs" + }, + "host" => "docker-for-desktop", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "kubernetes" => { + "container_name" => "log-format-labs", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "host" => "docker-for-desktop", + "namespace_name" => "default", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_kubernetes_meta_reduce_via_annotation_and_conf" do + conf = %{ + kubernetes_meta_reduce false + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs" + }, + "annotations" => { + "sumologic.com/kubernetes_meta_reduce" => "true", + }, + "host" => "docker-for-desktop", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "kubernetes" => { + "container_name" => "log-format-labs", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "host" => "docker-for-desktop", + "namespace_name" => "default", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_log_format_json_merge" do + conf = %{ + log_format json_merge + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json_merge", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_log_format_text" do + conf = %{ + log_format text + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "text", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_exclude_pod_regex" do + conf = %{ + exclude_pod_regex foo + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) + end + assert_equal(1, d.filtered_records.size) + end + + test "test_exclude_pod_regex_whitelist" do + conf = %{ + exclude_pod_regex .* + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) + end + assert_equal(1, d.filtered_records.size) + end + + test "test_exclude_container_regex" do + conf = %{ + exclude_container_regex foo + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) + end + assert_equal(1, d.filtered_records.size) + end + + test "test_exclude_container_regex_whitelist" do + conf = %{ + exclude_container_regex .* + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) + end + assert_equal(1, d.filtered_records.size) + end + + test "test_exclude_namespace_regex" do + conf = %{ + exclude_namespace_regex foo + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "foo", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "bar", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) + end + assert_equal(1, d.filtered_records.size) + end + + test "test_exclude_namespace_regex_whitelist" do + conf = %{ + exclude_namespace_regex .* + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) + end + assert_equal(1, d.filtered_records.size) + end + + test "test_exclude_host_regex" do + conf = %{ + exclude_host_regex foo + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "foo", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "bar", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) + end + assert_equal(1, d.filtered_records.size) + end + + test "test_exclude_host_regex_whitelist" do + conf = %{ + exclude_host_regex .* + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) + end + assert_equal(1, d.filtered_records.size) + end + + test "test_exclude_annotation" do + conf = %{ + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/exclude" => "true"}}, "message" => "foo"}) + end + assert_equal(0, d.filtered_records.size) + end + + test "test_sourcehost_annotation" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "annotations" => { + "sumologic.com/sourceHost" => "foo", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "annotations" => { + "sumologic.com/sourceHost" => "foo", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "foo", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_sourcename_annotation" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "annotations" => { + "sumologic.com/sourceName" => "foo", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "annotations" => { + "sumologic.com/sourceName" => "foo", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "foo", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_sourcecategory_annotation" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "annotations" => { + "sumologic.com/sourceCategory" => "foo", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "annotations" => { + "sumologic.com/sourceCategory" => "foo", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/foo", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_sourcecategory_using_labels" do + conf = %{ + source_category %{namespace}/%{pod_name}/%{label:run} + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_sourcehost_using_pod_id" do + conf = %{ + source_host %{pod_id} + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "170af806-c801-11e8-9009-025000000001", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_undefined_labels" do + conf = %{ + source_category %{namespace}/%{pod_name}/%{label:foo} + } + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs/undefined", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_exclude_systemd_unit_regex" do + conf = %{ + exclude_unit_regex .* + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"_SYSTEMD_UNIT" => "test", "kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost"}, "message" => "foo"}) + end + assert_equal(0, d.filtered_records.size) + end + + test "test_exclude_systemd_facility_regex" do + conf = %{ + exclude_facility_regex .* + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"_SYSTEMD_UNIT" => "test", "SYSLOG_FACILITY" => "test", "kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost"}, "message" => "foo"}) + end + assert_equal(0, d.filtered_records.size) + end + + test "test_exclude_systemd_priority_regex" do + conf = %{ + exclude_priority_regex .* + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"_SYSTEMD_UNIT" => "test", "PRIORITY" => "test", "kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost"}, "message" => "foo"}) + end + assert_equal(0, d.filtered_records.size) + end + + test "test_exclude_systemd_hostname_regex" do + conf = %{ + exclude_host_regex .* + } + d = create_driver(conf) + time = @time + d.run do + d.feed("filter.test", time, {"_SYSTEMD_UNIT" => "test", "_HOSTNAME" => "test", "kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost"}, "message" => "foo"}) + end + assert_equal(0, d.filtered_records.size) + end + + test "test_pre_1.8_dynamic_bit_removal" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-1013177865-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-1013177865-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-1013177865-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_1.8-1.11_dynamic_bit_removal" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "1013177865", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_post_1.11_dynamic_bit_removal" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "54575ccdb9", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-54575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "54575ccdb9", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end + + test "test_mismatch_dynamic_bit_is_left" do + conf = %{} + d = create_driver(conf) + time = @time + input = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-53575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "54575ccdb9", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + } + d.run do + d.feed("filter.test", time, input) + end + expected = { + "timestamp" => 1538677347823, + "log" => "some message", + "stream" => "stdout", + "docker" => { + "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", + }, + "kubernetes" => { + "container_name" => "log-format-labs", + "namespace_name" => "default", + "pod_name" => "log-format-labs-53575ccdb9-9d677", + "pod_id" => "170af806-c801-11e8-9009-025000000001", + "labels" => { + "pod-template-hash" => "54575ccdb9", + "run" => "log-format-labs", + }, + "host" => "docker-for-desktop", + "master_url" => "https =>//10.96.0.1 =>443/api", + "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", + }, + "_sumo_metadata" => { + :category => "kubernetes/default/log/format/labs/53575ccdb9", + :host => "", + :log_format => "json", + :source => "default.log-format-labs-53575ccdb9-9d677.log-format-labs", + }, + } + assert_equal(1, d.filtered_records.size) + assert_equal(d.filtered_records[0], expected) + end +end From c5e1fdad78b4be718d555713169a13ade5b519c1 Mon Sep 17 00:00:00 2001 From: Sam Song Date: Wed, 15 May 2019 21:04:28 -0700 Subject: [PATCH 04/10] include kubernetes_sumologic filter plugin and dependencies in Dockerfile --- deploy/docker/Dockerfile | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile index 70dbf9a608..7a36645bb4 100644 --- a/deploy/docker/Dockerfile +++ b/deploy/docker/Dockerfile @@ -1,3 +1,26 @@ +FROM fluent/fluentd:v1.3.2-debian AS builder + +ENV PATH /home/fluent/.gem/ruby/2.3.0/bin:$PATH + +COPY gems/fluent-plugin*.gem ./ + +# New fluent image dynamically creates user in entrypoint +RUN apt-get update && \ + apt-get install -y build-essential ruby-dev libffi-dev libsystemd-dev && \ + gem install fluent-plugin-s3 -v 1.1.4 && \ + gem install fluent-plugin-systemd -v 0.3.1 && \ + gem install fluent-plugin-record-reformer -v 0.9.1 && \ + gem install fluent-plugin-kubernetes_metadata_filter -v 1.0.2 && \ + gem install fluent-plugin-sumologic_output -v 1.4.0 && \ + gem install fluent-plugin-concat -v 2.3.0 && \ + gem install fluent-plugin-rewrite-tag-filter -v 2.1.0 && \ + gem install fluent-plugin-prometheus -v 1.1.0 && \ + gem install fluent-plugin-kubernetes_sumologic && \ + rm -rf /home/fluent/.gem/ruby/2.3.0/cache/*.gem && \ + gem sources -c && \ + apt-get remove --purge -y build-essential ruby-dev libffi-dev libsystemd-dev && \ + rm -rf /var/lib/apt/lists/* + FROM fluent/fluentd:v1.4.2-onbuild-1.0 # Use root account to use apk @@ -29,4 +52,6 @@ RUN mkdir -p /fluentd/conf.d COPY ./fluent.conf /fluentd/conf.d/fluent.conf -USER fluent +COPY --from=builder /var/lib/gems /var/lib/gems + +USER fluent \ No newline at end of file From df40dd78793fc550ab737bf013636bf828972d75 Mon Sep 17 00:00:00 2001 From: Sam Song Date: Wed, 29 May 2019 15:11:06 -0700 Subject: [PATCH 05/10] collect desired container+systemd logs --- deploy/docker/Dockerfile | 54 ++++++----- deploy/docker/fluent.conf | 6 ++ deploy/fluent-bit/overrides.yaml | 66 +++++++++++++ deploy/kubernetes/fluentd-sumologic.yaml | 114 ++++++++++++++++++++++- 4 files changed, 214 insertions(+), 26 deletions(-) diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile index 7a36645bb4..c88d12f372 100644 --- a/deploy/docker/Dockerfile +++ b/deploy/docker/Dockerfile @@ -1,26 +1,3 @@ -FROM fluent/fluentd:v1.3.2-debian AS builder - -ENV PATH /home/fluent/.gem/ruby/2.3.0/bin:$PATH - -COPY gems/fluent-plugin*.gem ./ - -# New fluent image dynamically creates user in entrypoint -RUN apt-get update && \ - apt-get install -y build-essential ruby-dev libffi-dev libsystemd-dev && \ - gem install fluent-plugin-s3 -v 1.1.4 && \ - gem install fluent-plugin-systemd -v 0.3.1 && \ - gem install fluent-plugin-record-reformer -v 0.9.1 && \ - gem install fluent-plugin-kubernetes_metadata_filter -v 1.0.2 && \ - gem install fluent-plugin-sumologic_output -v 1.4.0 && \ - gem install fluent-plugin-concat -v 2.3.0 && \ - gem install fluent-plugin-rewrite-tag-filter -v 2.1.0 && \ - gem install fluent-plugin-prometheus -v 1.1.0 && \ - gem install fluent-plugin-kubernetes_sumologic && \ - rm -rf /home/fluent/.gem/ruby/2.3.0/cache/*.gem && \ - gem sources -c && \ - apt-get remove --purge -y build-essential ruby-dev libffi-dev libsystemd-dev && \ - rm -rf /var/lib/apt/lists/* - FROM fluent/fluentd:v1.4.2-onbuild-1.0 # Use root account to use apk @@ -36,6 +13,15 @@ RUN apk add --no-cache --update --virtual .build-deps sudo build-base ruby-dev \ && gem install google-protobuf \ && gem install snappy +RUN gem install fluent-plugin-s3 -v 1.1.4 \ + && gem install fluent-plugin-systemd -v 0.3.1 \ + && gem install fluent-plugin-record-modifier \ + && gem install fluent-plugin-kubernetes_metadata_filter -v 1.0.2 \ + && gem install fluent-plugin-sumologic_output -v 1.4.0 \ + && gem install fluent-plugin-rewrite-tag-filter -v 2.1.0 \ + && gem install fluent-plugin-prometheus -v 1.1.0 \ + && gem install fluent-plugin-kubernetes_sumologic + RUN gem install fluent-plugin-sumologic_output \ && gem install fluent-plugin-carbon-v2 \ && gem install fluent-plugin-prometheus-format \ @@ -48,10 +34,28 @@ RUN gem sources --clear-all \ && rm -rf /home/fluent/.gem/ruby/2.5.0/cache/*.gem \ && rm -f ./*.gem +# Default settings +ENV LOG_FORMAT "json" +ENV FLUSH_INTERVAL "5s" +ENV NUM_THREADS "1" +ENV SOURCE_CATEGORY "%{namespace}/%{pod_name}" +ENV SOURCE_CATEGORY_PREFIX "kubernetes/" +ENV SOURCE_CATEGORY_REPLACE_DASH "/" +ENV SOURCE_NAME "%{namespace}.%{pod}.%{container}" +ENV KUBERNETES_META "true" +ENV KUBERNETES_META_REDUCE "false" +ENV ADD_TIMESTAMP "true" +ENV TIMESTAMP_KEY "timestamp" +ENV ADD_STREAM "true" +ENV ADD_TIME "true" +ENV K8S_METADATA_FILTER_WATCH "true" +ENV K8S_METADATA_FILTER_VERIFY_SSL "true" +ENV K8S_METADATA_FILTER_BEARER_CACHE_SIZE "1000" +ENV K8S_METADATA_FILTER_BEARER_CACHE_TTL "3600" +ENV VERIFY_SSL "true" + RUN mkdir -p /fluentd/conf.d COPY ./fluent.conf /fluentd/conf.d/fluent.conf -COPY --from=builder /var/lib/gems /var/lib/gems - USER fluent \ No newline at end of file diff --git a/deploy/docker/fluent.conf b/deploy/docker/fluent.conf index 54f759f1ff..52c6ed2a27 100644 --- a/deploy/docker/fluent.conf +++ b/deploy/docker/fluent.conf @@ -20,6 +20,12 @@ @type prometheus_format + + @type kubernetes_metadata + + + @type kubernetes_sumologic + @type stdout \ No newline at end of file diff --git a/deploy/fluent-bit/overrides.yaml b/deploy/fluent-bit/overrides.yaml index 4b8847f01f..d400ce2753 100644 --- a/deploy/fluent-bit/overrides.yaml +++ b/deploy/fluent-bit/overrides.yaml @@ -8,6 +8,72 @@ backend: tls_debug: 1 shared_key: +trackOffsets: true + +input: + tail: + memBufLimit: 5MB + parser: docker + path: /var/log/containers/*.log + systemd: + enabled: true + filters: + systemdUnit: + - addon-config.service + - addon-run.service + - cfn-etcd-environment.service + - cfn-signal.service + - clean-ca-certificates.service + - containerd.service + - coreos-metadata.service + - coreos-setup-environment.service + - coreos-tmpfiles.service + - dbus.service + - docker.service + - efs.service + - etcd-member.service + - etcd.service + - etcd2.service + - etcd3.service + - etcdadm-check.service + - etcdadm-reconfigure.service + - etcdadm-save.service + - etcdadm-update-status.service + - flanneld.service + - format-etcd2-volume.service + - kube-node-taint-and-uncordon.service + - kubelet.service + - ldconfig.service + - locksmithd.service + - logrotate.service + - lvm2-monitor.service + - mdmon.service + - nfs-idmapd.service + - nfs-mountd.service + - nfs-server.service + - nfs-utils.service + - node-problem-detector.service + - ntp.service + - oem-cloudinit.service + - rkt-gc.service + - rkt-metadata.service + - rpc-idmapd.service + - rpc-mountd.service + - rpc-statd.service + - rpcbind.service + - set-aws-environment.service + - system-cloudinit.service + - systemd-timesyncd.service + - update-ca-certificates.service + - user-cloudinit.service + - var-lib-etcd2.service + maxEntries: 1000 + readFromTail: true + tag: host.* + +filter: + kubeTag: containers + rawConfig: |- @INCLUDE fluent-bit-service.conf @INCLUDE fluent-bit-input.conf diff --git a/deploy/kubernetes/fluentd-sumologic.yaml b/deploy/kubernetes/fluentd-sumologic.yaml index 35609eb733..8a3497c362 100644 --- a/deploy/kubernetes/fluentd-sumologic.yaml +++ b/deploy/kubernetes/fluentd-sumologic.yaml @@ -140,8 +140,115 @@ data: port 24321 bind 0.0.0.0 + + @include logs.source.containers.conf + @include logs.source.systemd.conf + + logs.source.containers.conf: |- + + @type relabel + @label @NORMAL + + + + + logs.source.systemd.conf: |- + + @type relabel + @label @KUBELET + + + + + + @type relabel + @label @SYSTEMD + + + + + logs.output.conf: |- - @type stdout + @type sumologic + log_key log + endpoint "#{ENV['SUMO_ENDPOINT_LOGS']}" + verify_ssl "#{ENV['VERIFY_SSL']}" + log_format "#{ENV['LOG_FORMAT']}" + flush_interval "#{ENV['FLUSH_INTERVAL']}" + num_threads "#{ENV['NUM_THREADS']}" + open_timeout 60 + add_timestamp "#{ENV['ADD_TIMESTAMP']}" + timestamp_key "#{ENV['TIMESTAMP_KEY']}" + proxy_uri "#{ENV['PROXY_URI']}" --- apiVersion: apps/v1 @@ -245,6 +352,11 @@ spec: secretKeyRef: name: sumologic key: endpoint-metrics-node-exporter + - name: SUMO_ENDPOINT_LOGS + valueFrom: + secretKeyRef: + name: sumologic + key: endpoint-logs --- apiVersion: v1 kind: Service From 976dadbc3d8af162ddda6b754826bf3c9bd962fd Mon Sep 17 00:00:00 2001 From: Sam Song Date: Wed, 29 May 2019 16:15:02 -0700 Subject: [PATCH 06/10] Remove README for kubernetes_sumologic --- deploy/docker/Dockerfile | 2 +- fluent-plugin-kubernetes_sumologic/README.md | 548 +------------------ 2 files changed, 4 insertions(+), 546 deletions(-) diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile index c88d12f372..691e79aad7 100644 --- a/deploy/docker/Dockerfile +++ b/deploy/docker/Dockerfile @@ -58,4 +58,4 @@ RUN mkdir -p /fluentd/conf.d COPY ./fluent.conf /fluentd/conf.d/fluent.conf -USER fluent \ No newline at end of file +USER fluent diff --git a/fluent-plugin-kubernetes_sumologic/README.md b/fluent-plugin-kubernetes_sumologic/README.md index dca730d65c..83832f85b8 100644 --- a/fluent-plugin-kubernetes_sumologic/README.md +++ b/fluent-plugin-kubernetes_sumologic/README.md @@ -1,547 +1,5 @@ -[![Build Status](https://travis-ci.org/SumoLogic/fluentd-kubernetes-sumologic.svg?branch=master)](https://travis-ci.org/SumoLogic/fluentd-kubernetes-sumologic) [![Gem Version](https://badge.fury.io/rb/fluent-plugin-kubernetes_sumologic.svg)](https://badge.fury.io/rb/fluent-plugin-kubernetes_sumologic) [![Docker Pulls](https://img.shields.io/docker/pulls/sumologic/fluentd-kubernetes-sumologic.svg)](https://hub.docker.com/r/sumologic/fluentd-kubernetes-sumologic) [![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/SumoLogic/fluentd-output-sumologic/issues) +# fluent-plugin-kubernetes_sumologic -This page describes the Sumo Kubernetes [Fluentd](http://www.fluentd.org/) plugin. +[Fluentd](https://fluentd.org/) filter plugin to attach Sumo Logic metadata to the logs. -## Support -The code in this repository has been developed in collaboration with the Sumo Logic community and is not supported via standard Sumo Logic Support channels. For any issues or questions please submit an issue within the GitHub repository. The maintainers of this project will work directly with the community to answer any questions, address bugs, or review any requests for new features. - -## Installation - -The plugin runs as a Kubernetes [DaemonSet](http://kubernetes.io/docs/admin/daemons/); it runs an instance of the plugin on each host in a cluster. Each plugin instance pulls system, kubelet, docker daemon, and container logs from the host and sends them, in JSON or text format, to an HTTP endpoint on a hosted collector in the [Sumo](http://www.sumologic.com) service. Note the plugin with default configuration requires Kubernetes >=1.8. See [the section below on running this on Kubernetes <1.8](#running-on-kubernetes-versions-<1.8) - -- [Step 1 Create hosted collector and HTTP source in Sumo](#step-1--create-hosted-collector-and-http-source-in-sumo) -- [Step 2 Create a Kubernetes secret](#step-2--create-a-kubernetes-secret) -- [Step 3 Install the Sumo Kubernetes FluentD plugin](#step-3--install-the-sumo-kubernetes-fluentd-plugin) - * [Option A Install plugin using kubectl](#option-a--install-plugin-using-kubectl) - * [Option B Helm chart](#option-b--helm-chart) -- [Environment variables](#environment-variables) - + [Override environment variables using annotations](#override-environment-variables-using-annotations) - + [Exclude data using annotations](#exclude-data-using-annotations) - + [Include excluded using annotations](#include-excluded-using-annotations) -- [Step 4 Set up Heapster for metric collection](#step-4-set-up-heapster-for-metric-collection) - * [Kubernetes ConfigMap](#kubernetes-configmap) - * [Kubernetes Service](#kubernetes-service) - * [Kubernetes Deployment](#kubernetes-deployment) -- [Log data](#log-data) - * [Docker](#docker) - * [Kubelet](#kubelet) - * [Containers](#containers) -- [Taints and Tolerations](#taints-and-tolerations) -- [Running On OpenShift](#running-on-openshift) - - - -![deployment](https://github.com/SumoLogic/fluentd-kubernetes-sumologic/blob/master/screenshots/kubernetes.png) - -# Step 1 Create hosted collector and HTTP source in Sumo - -In this step you create, on the Sumo service, an HTTP endpoint to receive your logs. This process involves creating an HTTP source on a hosted collector in Sumo. In Sumo, collectors use sources to receive data. - -1. If you don’t already have a Sumo account, you can create one by clicking the **Free Trial** button on https://www.sumologic.com/. -2. Create a hosted collector, following the instructions on [Configure a Hosted Collector](https://help.sumologic.com/Send-Data/Hosted-Collectors/Configure-a-Hosted-Collector) in Sumo help. (If you already have a Sumo hosted collector that you want to use, skip this step.) -3. Create an HTTP source on the collector you created in the previous step. For instructions, see [HTTP Logs and Metrics Source](https://help.sumologic.com/Send-Data/Sources/02Sources-for-Hosted-Collectors/HTTP-Source) in Sumo help. -4. When you have configured the HTTP source, Sumo will display the URL of the HTTP endpoint. Make a note of the URL. You will use it when you configure the Kubernetes service to send data to Sumo. - -# Step 2 Create a Kubernetes secret - -Create a secret in Kubernetes with the HTTP source URL. If you want to change the secret name, you must modify the Kubernetes manifest accordingly. - -`kubectl create secret generic sumologic --from-literal=collector-url=INSERT_HTTP_URL` - -You should see the confirmation message - -`secret "sumologic" created.` - -# Step 3 Install the Sumo Kubernetes FluentD plugin - -Follow the instructions in Option A below to install the plugin using `kubectl`. If you prefer to use a Helm chart, see Option B. - -Before you start, see [Environment variables](#environment-variables) for information about settings you can customize, and how to use annotations to override selected environment variables and exclude data from being sent to Sumo. - -## Option A Install plugin using kubectl - -See the sample Kubernetes DaemonSet and Role in [fluentd.yaml](/daemonset/rbac/fluentd.yaml). - -1. Clone the [GitHub repo](https://github.com/SumoLogic/fluentd-kubernetes-sumologic). - -2. In `fluentd-kubernetes-sumologic`, install the chart using `kubectl`. - -Which `.yaml` file you should use depends on whether or not you are running RBAC for authorization. RBAC is enabled by default as of Kubernetes 1.6. Note the plugin with default configuration requires Kubernetes >=1.8. See the section below on [running this on Kubernetes <1.8](#running-on-kubernetes-versions-<1.8) - -**Non-RBAC (Kubernetes 1.5 and below)** - -`kubectl create -f /daemonset/nonrbac/fluentd.yaml` - -**RBAC (Kubernetes 1.6 and above)**

`kubectl create -f /daemonset/rbac/fluentd.yaml` - - -**Note** if you modified the command in Step 2 to use a different name, update the `.yaml` file to use the correct secret. - -Logs should begin flowing into Sumo within a few minutes of plugin installation. - -## Option B Helm chart -If you use Helm to manage your Kubernetes resources, there is a Helm chart for the plugin at https://github.com/kubernetes/charts/tree/master/stable/sumologic-fluentd. - -# Environment variables - -Environment | Variable Description ------------ | -------------------- -`AUDIT_LOG_PATH`|Define the path to the [Kubernetes Audit Log](https://kubernetes.io/docs/tasks/debug-application-cluster/audit/)

Default: `/mnt/log/kube-apiserver-audit.log` -`CONCAT_SEPARATOR` |The character to use to delimit lines within the final concatenated message. Most multi-line messages contain a newline at the end of each line.

Default: "" -`EXCLUDE_CONTAINER_REGEX` |A regular expression for containers. Matching containers will be excluded from Sumo. The logs will still be sent to FluentD. -`EXCLUDE_FACILITY_REGEX`|A regular expression for syslog [facilities](https://en.wikipedia.org/wiki/Syslog#Facility). Matching facilities will be excluded from Sumo. The logs will still be sent to FluentD. -`EXCLUDE_HOST_REGEX`|A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. -`EXCLUDE_NAMESPACE_REGEX`|A regular expression for `namespaces`. Matching `namespaces` will be excluded from Sumo. The logs will still be sent to FluentD. -`EXCLUDE_PATH`|Files matching this pattern will be ignored by the `in_tail` plugin, and will not be sent to Kubernetes or Sumo. This can be a comma-separated list as well. See [in_tail](http://docs.fluentd.org/v0.12/articles/in_tail#excludepath) documentation for more information.

For example, defining `EXCLUDE_PATH` as shown below excludes all files matching `/var/log/containers/*.log`,

`...`

`env:`
  - `name: EXCLUDE_PATH`
  `value: "[\"/var/log/containers/*.log\"]"` -`EXCLUDE_POD_REGEX`|A regular expression for pods. Matching pods will be excluded from Sumo. The logs will still be sent to FluentD. -`EXCLUDE_PRIORITY_REGEX`|A regular expression for syslog [priorities](https://en.wikipedia.org/wiki/Syslog#Severity_level). Matching priorities will be excluded from Sumo. The logs will still be sent to FluentD. -`EXCLUDE_UNIT_REGEX` |A regular expression for `systemd` units. Matching units will be excluded from Sumo. The logs will still be sent to FluentD. -`FLUENTD_SOURCE`|Fluentd can use log tail, systemd query or forward as the source, Allowable values: `file`, `systemd`, `forward`.

Default: `file` -`FLUENTD_USER_CONFIG_DIR`|A directory of user-defined fluentd configuration files, which must be in the `*.conf` directory in the container. -`FLUSH_INTERVAL` |How frequently to push logs to Sumo.

Default: `5s` -`KUBERNETES_META`|Include or exclude Kubernetes metadata such as `namespace` and `pod_name` if using JSON log format.

Default: `true` -`KUBERNETES_META_REDUCE`| Reduces redundant Kubernetes metadata, see [_Reducing Kubernetes Metadata_](#reducing-kubernetes-metadata).

Default: `false` -`LOG_FORMAT`|Format in which to post logs to Sumo. Allowable values:

`text`—Logs will appear in SumoLogic in text format.
`json`—Logs will appear in SumoLogic in json format.
`json_merge`—Same as json but if the container logs in json format to stdout it will merge in the container json log at the root level and remove the log field.

Default: `json` -`MULTILINE_START_REGEXP`|The regular expression for the `concat` plugin to use when merging multi-line messages. Defaults to Julian dates, for example, Jul 29, 2017. -`NUM_THREADS`|Set the number of HTTP threads to Sumo. It might be necessary to do so in heavy-logging clusters.

Default: `1` -`READ_FROM_HEAD`|Start to read the logs from the head of file, not bottom. Only applies to containers log files. See in_tail doc for more information.

Default: `true` -`SOURCE_CATEGORY` |Set the `_sourceCategory` metadata field in Sumo.

Default: `"%{namespace}/%{pod_name}"` -`SOURCE_CATEGORY_PREFIX`|Prepends a string that identifies the cluster to the `_sourceCategory` metadata field in Sumo.

Default: `kubernetes/` -`SOURCE_CATEGORY_REPLACE_DASH` |Used to replace a dash (-) character with another character.

Default: `/`

For example, a Pod called `travel-nginx-3629474229-dirmo` within namespace `app` will appear in Sumo with `_sourceCategory=app/travel/nginx`. -`SOURCE_HOST`|Set the `_sourceHost` metadata field in Sumo.

Default: `""` -`SOURCE_NAME`|Set the `_sourceName` metadata field in Sumo.

Default: `"%{namespace}.%{pod}.%{container}"` -`TIME_KEY`|The field name for json formatted sources that should be used as the time. See [time_key](https://docs.fluentd.org/v0.12/articles/formatter_json#time_key-(string,-optional,-defaults-to-%E2%80%9Ctime%E2%80%9D)). Default: `time` -`ADD_TIMESTAMP`|Option to control adding timestamp to logs. Default: `true` -`TIMESTAMP_KEY`|Field name when add_timestamp is on. Default: `timestamp` -`ADD_STREAM`|Option to control adding stream to logs. Default: `true` -`ADD_TIME`|Option to control adding time to logs. Default: `true` -`CONTAINER_LOGS_PATH`|Specify the path in_tail should watch for container logs. Default: `/mnt/log/containers/*.log` -`PROXY_URI`|Add the uri of the proxy environment if present. -`ENABLE_STAT_WATCHER`|Option to control the enabling of [stat_watcher](https://docs.fluentd.org/v1.0/articles/in_tail#enable_stat_watcher). Default: `true` -`K8S_METADATA_FILTER_WATCH`|Option to control the enabling of [metadata filter plugin watch](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). Default: `true` -`K8S_METADATA_FILTER_CA_FILE`|Option to control the enabling of [metadata filter plugin ca_file](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). -`K8S_METADATA_FILTER_VERIFY_SSL`|Option to control the enabling of [metadata filter plugin verify_ssl](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). Default: `true` -`K8S_METADATA_FILTER_CLIENT_CERT`|Option to control the enabling of [metadata filter plugin client_cert](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). -`K8S_METADATA_FILTER_CLIENT_KEY`|Option to control the enabling of [metadata filter plugin client_key](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). -`K8S_METADATA_FILTER_BEARER_TOKEN_FILE`|Option to control the enabling of [metadata filter plugin bearer_token_file](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). -`K8S_METADATA_FILTER_BEARER_CACHE_SIZE`|Option to control the enabling of [metadata filter plugin cache_size](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). Default: `1000` -`K8S_METADATA_FILTER_BEARER_CACHE_TTL`|Option to control the enabling of [metadata filter plugin cache_ttl](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#configuration). Default: `3600` -`K8S_NODE_NAME`|If set, improves [caching of pod metadata](https://github.com/fabric8io/fluent-plugin-kubernetes_metadata_filter#environment-variables-for-kubernetes) and reduces API calls. -`VERIFY_SSL`|Verify ssl certificate of sumologic endpoint. Default: `true` -`FORWARD_INPUT_BIND`|The bind address to listen to if using forward as `FLUENTD_SOURCE`. Default: `0.0.0.0` (all addresses) -`FORWARD_INPUT_PORT`|The port to listen to if using forward as `FLUENTD_SOURCE`. Default: `24224` - - -The following table show which environment variables affect which Fluentd sources. - -| Environment Variable | Containers | Docker | Kubernetes | Systemd | -|----------------------|------------|--------|------------|---------| -| `EXCLUDE_CONTAINER_REGEX` | ✔ | ✘ | ✘ | ✘ | -| `EXCLUDE_FACILITY_REGEX` | ✘ | ✘ | ✘ | ✔ | -| `EXCLUDE_HOST_REGEX `| ✔ | ✘ | ✘ | ✔ | -| `EXCLUDE_NAMESPACE_REGEX` | ✔ | ✘ | ✔ | ✘ | -| `EXCLUDE_PATH` | ✔ | ✔ | ✔ | ✘ | -| `EXCLUDE_PRIORITY_REGEX` | ✘ | ✘ | ✘ | ✔ | -| `EXCLUDE_POD_REGEX` | ✔ | ✘ | ✘ | ✘ | -| `EXCLUDE_UNIT_REGEX` | ✘ | ✘ | ✘ | ✔ | -| `TIME_KEY` | ✔ | ✘ | ✘ | ✘ | - -### FluentD stops processing logs -When dealing with large volumes of data (TB's from what we have seen), FluentD may stop processing logs, but continue to run. This issue seems to be caused by the [scalability of the inotify process](https://github.com/fluent/fluentd/issues/1630) that is packaged with the FluentD in_tail plugin. If you encounter this situation, setting the `ENABLE_STAT_WATCHER` to `false` should resolve this issue. - -### Reducing Kubernetes metadata - -You can use the `KUBERNETES_META_REDUCE` environment variable (global) or the `sumologic.com/kubernetes_meta_reduce` annotation (per pod) to reduce the amount of Kubernetes metadata included with each log line under the `kubernetes` field. - -When set, FluentD will remove the following properties: - -* `pod_id` -* `container_id` -* `namespace_id` -* `master_url` -* `labels` -* `annotations` - -Logs will still include: - -* `pod_name` -* `container_name` -* `namespace_name` -* `host` - -These fields still allow you to uniquely identify a pod and look up additional details with the Kubernetes API. - -```yaml -apiVersion: v1 -kind: ReplicationController -metadata: - name: nginx -spec: - replicas: 1 - selector: - app: mywebsite - template: - metadata: - name: nginx - labels: - app: mywebsite - annotations: - sumologic.com/kubernetes_meta_reduce: "true" - spec: - containers: - - name: nginx - image: nginx - ports: - - containerPort: 80 -``` - - -### Override environment variables using annotations -You can override the `LOG_FORMAT`, `KUBERNETES_META_REDUCE`, `SOURCE_CATEGORY` and `SOURCE_NAME` environment variables, per pod, using [Kubernetes annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/). For example: - -``` -apiVersion: v1 -kind: ReplicationController -metadata: - name: nginx -spec: - replicas: 1 - selector: - app: mywebsite - template: - metadata: - name: nginx - labels: - app: mywebsite - annotations: - sumologic.com/format: "text" - sumologic.com/kubernetes_meta_reduce: "true" - sumologic.com/sourceCategory: "mywebsite/nginx" - sumologic.com/sourceName: "mywebsite_nginx" - spec: - containers: - - name: nginx - image: nginx - ports: - - containerPort: 80 -``` - -### Exclude data using annotations - -You can also use the `sumologic.com/exclude` annotation to exclude data from Sumo. This data is sent to FluentD, but not to Sumo. - -``` -apiVersion: v1 -kind: ReplicationController -metadata: - name: nginx -spec: - replicas: 1 - selector: - app: mywebsite - template: - metadata: - name: nginx - labels: - app: mywebsite - annotations: - sumologic.com/format: "text" - sumologic.com/sourceCategory: "mywebsite/nginx" - sumologic.com/sourceName: "mywebsite_nginx" - sumologic.com/exclude: "true" - spec: - containers: - - name: nginx - image: nginx - ports: - - containerPort: 80 -``` - -### Include excluded using annotations - -If you excluded a whole namespace, but still need one or few pods to be still included for shipping to Sumologic, you can use the `sumologic.com/include` annotation to include data to Sumo. It takes precedence over the exclusion described above. - -``` -apiVersion: v1 -kind: ReplicationController -metadata: - name: nginx -spec: - replicas: 1 - selector: - app: mywebsite - template: - metadata: - name: nginx - labels: - app: mywebsite - annotations: - sumologic.com/format: "text" - sumologic.com/sourceCategory: "mywebsite/nginx" - sumologic.com/sourceName: "mywebsite_nginx" - sumologic.com/include: "true" - spec: - containers: - - name: nginx - image: nginx - ports: - - containerPort: 80 -``` - -# Step 4 Set up Heapster for metric collection - -The recommended way to collect metrics from Kubernetes clusters is to use Heapster and a Sumo collector with a Graphite source. - -Heapster aggregates metrics across a Kubenetes cluster. Heapster runs as a pod in the cluster, and discovers all nodes in the cluster and queries usage information from each node's `kubelet`—the on-machine Kubernetes agent. - -Heapster provides metrics at the cluster, node and pod level. - -1. Install Heapster in your Kubernetes cluster and configure a Graphite Sink to send the data in Graphite format to Sumo. For instructions, see -https://github.com/kubernetes/heapster/blob/master/docs/sink-configuration.md#graphitecarbon. Assuming you have used the below YAML files to configure your system, then the sink option in graphite would be `--sink=graphite:tcp://sumo-graphite.kube-system.svc:2003`. You may need to change this depending on the namespace you run the deployment in, the name of the service or the port number for your Graphite source. - -2. Use the Sumo Docker container. For instructions, see https://hub.docker.com/r/sumologic/collector/. - -3. The following sections contain an example configmap, which contains the `sources.json` configuration, an example service, and an example deployment. Create these manifests in Kubernetes using `kubectl`. - - -## Kubernetes ConfigMap -``` -kind: ConfigMap -apiVersion: v1 -metadata: - name: "sumo-sources" -data: - sources.json: |- - { - "api.version": "v1", - "sources": [ - { - "name": "SOURCE_NAME", - "category": "SOURCE_CATEGORY", - "automaticDateParsing": true, - "contentType": "Graphite", - "timeZone": "UTC", - "encoding": "UTF-8", - "protocol": "TCP", - "port": 2003, - "sourceType": "Graphite" - } - ] - } - -``` -## Kubernetes Service -``` -apiVersion: v1 -kind: Service -metadata: - name: sumo-graphite -spec: - ports: - - port: 2003 - selector: - app: sumo-graphite -``` -## Kubernetes Deployment -``` -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - labels: - app: sumo-graphite - name: sumo-graphite -spec: - replicas: 2 - template: - metadata: - labels: - app: sumo-graphite - spec: - volumes: - - name: sumo-sources - configMap: - name: sumo-sources - items: - - key: sources.json - path: sources.json - containers: - - name: sumo-graphite - image: sumologic/collector:latest - ports: - - containerPort: 2003 - volumeMounts: - - mountPath: /sumo - name: sumo-sources - env: - - name: SUMO_ACCESS_ID - value: - - name: SUMO_ACCESS_KEY - value: - - name: SUMO_SOURCES_JSON - value: /sumo/sources.json - -``` - -# Templating Kubernetes metadata -The following Kubernetes metadata is available for string templating: - -| String template | Description | -| --------------- | ------------------------------------------------------ | -| `%{namespace}` | Namespace name | -| `%{pod}` | Full pod name (e.g. `travel-products-4136654265-zpovl`) | -| `%{pod_name}` | Friendly pod name (e.g. `travel-products`) | -| `%{pod_id}` | The pod's uid (a UUID) | -| `%{container}` | Container name | -| `%{source_host}` | Host | -| `%{label:foo}` | The value of label `foo` | - -## Missing labels -Unlike the other templates, labels are not guaranteed to exist, so missing labels interpolate as `"undefined"`. - -For example, if you have only the label `app: travel` but you define `SOURCE_NAME="%{label:app}@%{label:version}"`, the source name will appear as `travel@undefined`. - -# Log data -After performing the configuration described above, your logs should start streaming to SumoLogic in `json` or text format with the appropriate metadata. If you are using `json` format you can auto extract fields, for example `_sourceCategory=some/app | json auto`. - -## Docker -![Docker Logs](/screenshots/docker.png) - -## Kubelet -Note that Kubelet logs are only collected if you are using systemd. Kubernetes no longer outputs the kubelet logs to a file. -![Docker Logs](/screenshots/kubelet.png) - -## Containers -![Docker Logs](/screenshots/container.png) - -# Taints and Tolerations -By default, the fluentd pods will schedule on, and therefore collect logs from, any worker nodes that do not have a taint and any master node that does not have a taint beyond the default master taint. If you would like to schedule pods on all nodes, regardless of taints, uncomment the following line from fluentd.yaml before applying it. - -``` -tolerations: - #- operator: "Exists" -``` - -# Running On OpenShift - -This daemonset setting mounts /var/log as service account FluentD so you need to run containers as privileged container. Here is command example: - -``` -oc adm policy add-scc-to-user privileged system:serviceaccount:logging:fluentd -oc adm policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:fluentd -oc label node —all logging-sumologic-fluentd=true -oc patch ds fluentd-sumologic -p "spec: - template: - spec: - containers: - - image: sumologic/fluentd-kubernetes-sumologic:latest - name: fluentd - securityContext: - privileged: true" -oc delete pod -l name = fluentd-sumologic -``` - -## Running on Kubernetes versions <1.8 - -In order to run this plugin on Kubernetes <1.8 you will need to make some changes the yaml file prior to deploying it. - -Replace: - -``` - - name: pos-files - hostPath: - path: /var/run/fluentd-pos - type: "" -``` -With: - -``` - - name: pos-files - emptyDir: {} -``` - -## Output to S3 - -If you need to also send data to S3 (i.e. as a secondary backup/audit trail) the image includes the `fluent-plugin-s3` plugin. In order to send the logs from FluentD to multiple outputs, you must use the `copy` plugin. This image comes with an [OOB configuration](conf.d/out.sumo.conf) to output the logs to Sumo Logic. In order to output to multiple destinations, you need to modify that existing configuration. - -**Example:** Send all logs to S3 and Sumo: - -``` - - @type copy - - @type sumologic - log_key log - endpoint "#{ENV['COLLECTOR_URL']}" - verify_ssl "#{ENV['VERIFY_SSL']}" - log_format "#{ENV['LOG_FORMAT']}" - flush_interval "#{ENV['FLUSH_INTERVAL']}" - num_threads "#{ENV['NUM_THREADS']}" - open_timeout 60 - add_timestamp "#{ENV['ADD_TIMESTAMP']}" - proxy_uri "#{ENV['PROXY_URI']}" - - - @type s3 - - aws_key_id YOUR_AWS_KEY_ID - aws_sec_key YOUR_AWS_SECRET_KEY - s3_bucket YOUR_S3_BUCKET_NAME - s3_region us-west-1 - path logs/ - buffer_path /var/log/fluent/s3 - - time_slice_format %Y%m%d%H - time_slice_wait 10m - utc - - buffer_chunk_limit 256m - - -``` - -You can replace the OOB configuration by creating a new Docker image from our image or by using a configmap to inject the new configuration to the pod. - -More details about the S3 plugin can be found [in the docs](https://docs.fluentd.org/v0.12/articles/out_s3). - -## Upgrading to v2.0.0 - -In version 2.0.0, some legacy FluentD configuration has been removed that could lead to [duplicate logs being ingested into Sumo Logic](https://github.com/SumoLogic/fluentd-kubernetes-sumologic/issues/79). These logs were control plane components. This version was done as a major release as it breaks the current version of the [Kubernetes App](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Kubernetes/Install_the_Kubernetes_App_and_View_the_Dashboards) you may have installed in Sumo Logic. - -After upgrading to this version, you will need to reinstall the [Kubernetes App](https://help.sumologic.com/Send-Data/Applications-and-Other-Data-Sources/Kubernetes/Install_the_Kubernetes_App_and_View_the_Dashboards) in Sumo Logic. If you do not some of the panels in the dashboards will not render properly. - -If you have other content outside the app (Partitions, Scheduled Views, Field Extraction Rules or Scheduled Searches and Alerts), these may need to be updated after upgrading to v2.0.0. The logs, while the same content, have a different format and the same parsing logic and metadata may not apply. - -The previous log format that is removed in v2.0.0: -```json -{ - "timestamp": 1538776281387, - "severity": "I", - "pid": "1", - "source": "wrap.go:42", - "message": "GET /api/v1/namespaces/kube-system/endpoints/kube-scheduler: (3.514372ms) 200 [[kube-scheduler/v1.10.5 (linux/amd64) kubernetes/32ac1c9/leader-election] 127.0.0.1:46290]" -} -``` -Is replaced by the following version. It is the same log line in a different format enriched with the same metadata the plugin applies to all pod logs. -```json -{ - "timestamp": 1538776282152, - "log": "I1005 21:51:21.387204 1 wrap.go:42] GET /api/v1/namespaces/kube-system/endpoints/kube-scheduler: (3.514372ms) 200 [[kube-scheduler/v1.10.5 (linux/amd64) kubernetes/32ac1c9/leader-election] 127.0.0.1:46290]", - "stream": "stdout", - "time": "2018-10-05T21:51:21.387477546Z", - "docker": { - "container_id": "a442fd2982dfdc09ab6235941f8d661a0a5c8df5e1d21f23ff48a9923ac14739" - }, - "kubernetes": { - "container_name": "kube-apiserver", - "namespace_name": "kube-system", - "pod_name": "kube-apiserver-ip-172-20-122-71.us-west-2.compute.internal", - "pod_id": "80fa5e13-c8b9-11e8-a456-0a8c1424d0d4", - "labels": { - "k8s-app": "kube-apiserver" - }, - "host": "ip-172-20-122-71.us-west-2.compute.internal", - "master_url": "https://100.64.0.1:443/api", - "namespace_id": "9b9b75b7-aa16-11e8-9d62-06df85b5d3bc" - } -} -``` +For details, see https://github.com/SumoLogic/fluentd-kubernetes-sumologic From 40ee8d13261b00cdda33cbcc18f693b447cf1566 Mon Sep 17 00:00:00 2001 From: Sam Song Date: Wed, 29 May 2019 16:19:57 -0700 Subject: [PATCH 07/10] make Dockerfile comment more specific --- deploy/docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile index 691e79aad7..8adca3be42 100644 --- a/deploy/docker/Dockerfile +++ b/deploy/docker/Dockerfile @@ -34,7 +34,7 @@ RUN gem sources --clear-all \ && rm -rf /home/fluent/.gem/ruby/2.5.0/cache/*.gem \ && rm -f ./*.gem -# Default settings +# Default settings for log collection ENV LOG_FORMAT "json" ENV FLUSH_INTERVAL "5s" ENV NUM_THREADS "1" From 2ecdb7054d87fbdf77ad6a1dd457eb8f4bbbfb72 Mon Sep 17 00:00:00 2001 From: Sam Song Date: Wed, 29 May 2019 16:41:07 -0700 Subject: [PATCH 08/10] add concat plugin for multiline --- deploy/docker/Dockerfile | 3 +++ deploy/kubernetes/fluentd-sumologic.yaml | 8 ++++++++ 2 files changed, 11 insertions(+) diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile index 8adca3be42..4876072caf 100644 --- a/deploy/docker/Dockerfile +++ b/deploy/docker/Dockerfile @@ -18,6 +18,7 @@ RUN gem install fluent-plugin-s3 -v 1.1.4 \ && gem install fluent-plugin-record-modifier \ && gem install fluent-plugin-kubernetes_metadata_filter -v 1.0.2 \ && gem install fluent-plugin-sumologic_output -v 1.4.0 \ + && gem install fluent-plugin-concat -v 2.3.0 \ && gem install fluent-plugin-rewrite-tag-filter -v 2.1.0 \ && gem install fluent-plugin-prometheus -v 1.1.0 \ && gem install fluent-plugin-kubernetes_sumologic @@ -44,6 +45,8 @@ ENV SOURCE_CATEGORY_REPLACE_DASH "/" ENV SOURCE_NAME "%{namespace}.%{pod}.%{container}" ENV KUBERNETES_META "true" ENV KUBERNETES_META_REDUCE "false" +ENV MULTILINE_START_REGEXP "/^\w{3} \d{1,2}, \d{4}/" +ENV CONCAT_SEPARATOR "" ENV ADD_TIMESTAMP "true" ENV TIMESTAMP_KEY "timestamp" ENV ADD_STREAM "true" diff --git a/deploy/kubernetes/fluentd-sumologic.yaml b/deploy/kubernetes/fluentd-sumologic.yaml index 8a3497c362..a91726a449 100644 --- a/deploy/kubernetes/fluentd-sumologic.yaml +++ b/deploy/kubernetes/fluentd-sumologic.yaml @@ -145,6 +145,14 @@ data: @include logs.source.systemd.conf logs.source.containers.conf: |- + + @type concat + key log + multiline_start_regexp "#{ENV['MULTILINE_START_REGEXP']}" + separator "#{ENV['CONCAT_SEPARATOR']}" + timeout_label @NORMAL + + @type relabel @label @NORMAL From ec11d88191f79d438fa3d7acdbb1f821e2c0840f Mon Sep 17 00:00:00 2001 From: Sam Song Date: Wed, 29 May 2019 17:08:19 -0700 Subject: [PATCH 09/10] remove kubernetes_sumologic plugin code from repo --- fluent-plugin-kubernetes_sumologic/Gemfile | 9 - fluent-plugin-kubernetes_sumologic/README.md | 5 - fluent-plugin-kubernetes_sumologic/Rakefile | 11 - ...fluent-plugin-kubernetes_sumologic.gemspec | 28 - .../plugin/filter_kubernetes_sumologic.rb | 201 --- .../test/helper.rb | 16 - .../test_filter_kubernetes_sumologic.rb | 1473 ----------------- 7 files changed, 1743 deletions(-) delete mode 100644 fluent-plugin-kubernetes_sumologic/Gemfile delete mode 100644 fluent-plugin-kubernetes_sumologic/README.md delete mode 100644 fluent-plugin-kubernetes_sumologic/Rakefile delete mode 100644 fluent-plugin-kubernetes_sumologic/fluent-plugin-kubernetes_sumologic.gemspec delete mode 100644 fluent-plugin-kubernetes_sumologic/lib/fluent/plugin/filter_kubernetes_sumologic.rb delete mode 100644 fluent-plugin-kubernetes_sumologic/test/helper.rb delete mode 100644 fluent-plugin-kubernetes_sumologic/test/plugin/test_filter_kubernetes_sumologic.rb diff --git a/fluent-plugin-kubernetes_sumologic/Gemfile b/fluent-plugin-kubernetes_sumologic/Gemfile deleted file mode 100644 index 9342b36475..0000000000 --- a/fluent-plugin-kubernetes_sumologic/Gemfile +++ /dev/null @@ -1,9 +0,0 @@ -source 'https://rubygems.org' - -group :test do - gem 'codecov' - gem 'simplecov' - gem 'webmock' -end - -gemspec \ No newline at end of file diff --git a/fluent-plugin-kubernetes_sumologic/README.md b/fluent-plugin-kubernetes_sumologic/README.md deleted file mode 100644 index 83832f85b8..0000000000 --- a/fluent-plugin-kubernetes_sumologic/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# fluent-plugin-kubernetes_sumologic - -[Fluentd](https://fluentd.org/) filter plugin to attach Sumo Logic metadata to the logs. - -For details, see https://github.com/SumoLogic/fluentd-kubernetes-sumologic diff --git a/fluent-plugin-kubernetes_sumologic/Rakefile b/fluent-plugin-kubernetes_sumologic/Rakefile deleted file mode 100644 index 93a45e97eb..0000000000 --- a/fluent-plugin-kubernetes_sumologic/Rakefile +++ /dev/null @@ -1,11 +0,0 @@ -require 'bundler/gem_tasks' -require 'rake/testtask' - -Rake::TestTask.new(:test) do |test| - test.libs << 'test' - test.pattern = 'test/**/test_*.rb' - test.verbose = true - test.warning = false -end - -task :default => :test diff --git a/fluent-plugin-kubernetes_sumologic/fluent-plugin-kubernetes_sumologic.gemspec b/fluent-plugin-kubernetes_sumologic/fluent-plugin-kubernetes_sumologic.gemspec deleted file mode 100644 index 40cbf1fe2c..0000000000 --- a/fluent-plugin-kubernetes_sumologic/fluent-plugin-kubernetes_sumologic.gemspec +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 -lib = File.expand_path('../lib', __FILE__) -$LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib) - -Gem::Specification.new do |gem| - gem.name = "fluent-plugin-kubernetes_sumologic" - gem.version = "0.0.0" - gem.authors = ["Sumo Logic"] - gem.email = ["collection@sumologic.com"] - gem.description = %q{FluentD plugin to extract logs from Kubernetes clusters, enrich and ship to Sumo logic.} - gem.summary = %q{FluentD plugin to extract logs from Kubernetes clusters, enrich and ship to Sumo logic.} - gem.homepage = "https://github.com/SumoLogic/fluentd-kubernetes-sumologic" - gem.license = "Apache-2.0" - - gem.files = `git ls-files`.split($/) - gem.executables = gem.files.grep(%r{^bin/}) { |f| File.basename(f) } - gem.test_files = gem.files.grep(%r{^(test|spec|features)/}) - gem.require_paths = ["lib"] - - gem.required_ruby_version = '>= 2.0.0' - - gem.add_development_dependency "bundler", "~> 2" - gem.add_development_dependency "rake" - gem.add_development_dependency 'test-unit', '~> 3.1.0' - gem.add_development_dependency "codecov", ">= 0.1.10" - gem.add_runtime_dependency "fluentd", ">= 0.14.12" - gem.add_runtime_dependency 'httpclient', '~> 2.8.0' -end diff --git a/fluent-plugin-kubernetes_sumologic/lib/fluent/plugin/filter_kubernetes_sumologic.rb b/fluent-plugin-kubernetes_sumologic/lib/fluent/plugin/filter_kubernetes_sumologic.rb deleted file mode 100644 index 24a2253a87..0000000000 --- a/fluent-plugin-kubernetes_sumologic/lib/fluent/plugin/filter_kubernetes_sumologic.rb +++ /dev/null @@ -1,201 +0,0 @@ -require "fluent/filter" - -module Fluent::Plugin - class SumoContainerOutput < Fluent::Plugin::Filter - # Register type - Fluent::Plugin.register_filter("kubernetes_sumologic", self) - - config_param :kubernetes_meta, :bool, :default => true - config_param :kubernetes_meta_reduce, :bool, :default => false - config_param :source_category, :string, :default => "%{namespace}/%{pod_name}" - config_param :source_category_replace_dash, :string, :default => "/" - config_param :source_category_prefix, :string, :default => "kubernetes/" - config_param :source_name, :string, :default => "%{namespace}.%{pod}.%{container}" - config_param :log_format, :string, :default => "json" - config_param :source_host, :string, :default => "" - config_param :exclude_container_regex, :string, :default => "" - config_param :exclude_facility_regex, :string, :default => "" - config_param :exclude_host_regex, :string, :default => "" - config_param :exclude_namespace_regex, :string, :default => "" - config_param :exclude_pod_regex, :string, :default => "" - config_param :exclude_priority_regex, :string, :default => "" - config_param :exclude_unit_regex, :string, :default => "" - config_param :add_stream, :bool, :default => true - config_param :add_time, :bool, :default => true - - def configure(conf) - super - end - - def is_number?(string) - true if Float(string) rescue false - end - - def sanitize_pod_name(k8s_metadata) - # Strip out dynamic bits from pod name. - # NOTE: Kubernetes deployments append a template hash. - # At the moment this can be in 3 different forms: - # 1) pre-1.8: numeric in pod_template_hash and pod_parts[-2] - # 2) 1.8-1.11: numeric in pod_template_hash, hash in pod_parts[-2] - # 3) post-1.11: hash in pod_template_hash and pod_parts[-2] - - pod_parts = k8s_metadata[:pod].split("-") - pod_template_hash = k8s_metadata[:"label:pod-template-hash"] - if (pod_template_hash == pod_parts[-2] || - to_hash(pod_template_hash) == pod_parts[-2]) - k8s_metadata[:pod_name] = pod_parts[0..-3].join("-") - else - k8s_metadata[:pod_name] = pod_parts[0..-2].join("-") - end - end - - def to_hash(pod_template_hash) - # Convert the pod_template_hash to an alphanumeric string using the same logic Kubernetes - # uses at https://github.com/kubernetes/apimachinery/blob/18a5ff3097b4b189511742e39151a153ee16988b/pkg/util/rand/rand.go#L119 - alphanums = "bcdfghjklmnpqrstvwxz2456789" - pod_template_hash.each_byte.map { |i| alphanums[i.to_i % alphanums.length] }.join("") - end - - def filter(tag, time, record) - # Set the sumo metadata fields - sumo_metadata = record["_sumo_metadata"] || {} - record["_sumo_metadata"] = sumo_metadata - sumo_metadata[:log_format] = @log_format - sumo_metadata[:host] = @source_host if @source_host - sumo_metadata[:source] = @source_name if @source_name - - unless @source_category.nil? - sumo_metadata[:category] = @source_category.dup - unless @source_category_prefix.nil? - sumo_metadata[:category].prepend(@source_category_prefix) - end - end - - if record.key?("_SYSTEMD_UNIT") and not record.fetch("_SYSTEMD_UNIT").nil? - unless @exclude_unit_regex.empty? - if Regexp.compile(@exclude_unit_regex).match(record["_SYSTEMD_UNIT"]) - return nil - end - end - - unless @exclude_facility_regex.empty? - if Regexp.compile(@exclude_facility_regex).match(record["SYSLOG_FACILITY"]) - return nil - end - end - - unless @exclude_priority_regex.empty? - if Regexp.compile(@exclude_priority_regex).match(record["PRIORITY"]) - return nil - end - end - - unless @exclude_host_regex.empty? - if Regexp.compile(@exclude_host_regex).match(record["_HOSTNAME"]) - return nil - end - end - end - - # Allow fields to be overridden by annotations - if record.key?("kubernetes") and not record.fetch("kubernetes").nil? - # Clone kubernetes hash so we don't override the cache - kubernetes = record["kubernetes"].clone - k8s_metadata = { - :namespace => kubernetes["namespace_name"], - :pod => kubernetes["pod_name"], - :pod_id => kubernetes['pod_id'], - :container => kubernetes["container_name"], - :source_host => kubernetes["host"], - } - - - if kubernetes.has_key? "labels" - kubernetes["labels"].each { |k, v| k8s_metadata["label:#{k}".to_sym] = v } - end - k8s_metadata.default = "undefined" - - annotations = kubernetes.fetch("annotations", {}) - if annotations["sumologic.com/include"] == "true" - include = true - else - include = false - end - - unless @exclude_namespace_regex.empty? - if Regexp.compile(@exclude_namespace_regex).match(k8s_metadata[:namespace]) and not include - return nil - end - end - - unless @exclude_pod_regex.empty? - if Regexp.compile(@exclude_pod_regex).match(k8s_metadata[:pod]) and not include - return nil - end - end - - unless @exclude_container_regex.empty? - if Regexp.compile(@exclude_container_regex).match(k8s_metadata[:container]) and not include - return nil - end - end - - unless @exclude_host_regex.empty? - if Regexp.compile(@exclude_host_regex).match(k8s_metadata[:source_host]) and not include - return nil - end - end - - sanitize_pod_name(k8s_metadata) - - if annotations["sumologic.com/exclude"] == "true" - return nil - end - - sumo_metadata[:log_format] = annotations["sumologic.com/format"] if annotations["sumologic.com/format"] - - if annotations["sumologic.com/sourceHost"].nil? - sumo_metadata[:host] = sumo_metadata[:host] % k8s_metadata - else - sumo_metadata[:host] = annotations["sumologic.com/sourceHost"] % k8s_metadata - end - - if annotations["sumologic.com/sourceName"].nil? - sumo_metadata[:source] = sumo_metadata[:source] % k8s_metadata - else - sumo_metadata[:source] = annotations["sumologic.com/sourceName"] % k8s_metadata - end - - if annotations["sumologic.com/sourceCategory"].nil? - sumo_metadata[:category] = sumo_metadata[:category] % k8s_metadata - else - sumo_metadata[:category] = (annotations["sumologic.com/sourceCategory"] % k8s_metadata).prepend(@source_category_prefix) - end - sumo_metadata[:category].gsub!("-", @source_category_replace_dash) - - # Strip kubernetes metadata from json if disabled - if annotations["sumologic.com/kubernetes_meta"] == "false" || !@kubernetes_meta - record.delete("docker") - record.delete("kubernetes") - end - if annotations["sumologic.com/kubernetes_meta_reduce"] == "true" || annotations["sumologic.com/kubernetes_meta_reduce"].nil? && @kubernetes_meta_reduce == true - record.delete("docker") - record["kubernetes"].delete("pod_id") - record["kubernetes"].delete("namespace_id") - record["kubernetes"].delete("labels") - record["kubernetes"].delete("master_url") - record["kubernetes"].delete("annotations") - end - if @add_stream == false - record.delete("stream") - end - if @add_time == false - record.delete("time") - end - # Strip sumologic.com annotations - kubernetes.delete("annotations") if annotations - end - record - end - end -end diff --git a/fluent-plugin-kubernetes_sumologic/test/helper.rb b/fluent-plugin-kubernetes_sumologic/test/helper.rb deleted file mode 100644 index 5a12ee9c08..0000000000 --- a/fluent-plugin-kubernetes_sumologic/test/helper.rb +++ /dev/null @@ -1,16 +0,0 @@ -require "simplecov" -SimpleCov.start - -if ENV["CI"] == "true" - require "codecov" - SimpleCov.formatter = SimpleCov::Formatter::Codecov -end - -$LOAD_PATH.unshift(File.expand_path("../../", __FILE__)) -require "test-unit" -require "fluent/test" -require "fluent/test/driver/filter" -require "fluent/test/helpers" - -Test::Unit::TestCase.include(Fluent::Test::Helpers) -Test::Unit::TestCase.extend(Fluent::Test::Helpers) diff --git a/fluent-plugin-kubernetes_sumologic/test/plugin/test_filter_kubernetes_sumologic.rb b/fluent-plugin-kubernetes_sumologic/test/plugin/test_filter_kubernetes_sumologic.rb deleted file mode 100644 index 0e706f5fd4..0000000000 --- a/fluent-plugin-kubernetes_sumologic/test/plugin/test_filter_kubernetes_sumologic.rb +++ /dev/null @@ -1,1473 +0,0 @@ -require "fluent/test" -require "fluent/test/helpers" -require "fluent/test/driver/filter" -require "fluent/plugin/filter_kubernetes_sumologic" -require "test-unit" -require "webmock/test_unit" - -class SumoContainerOutputTest < Test::Unit::TestCase - include Fluent::Test::Helpers - - setup do - Fluent::Test.setup - @time = Fluent::Engine.now - end - - def create_driver(conf = CONFIG) - Fluent::Test::Driver::Filter.new(Fluent::Plugin::SumoContainerOutput).configure(conf) - end - - test "test_empty_config" do - conf = %{} - assert_nothing_raised do - create_driver(conf) - end - end - - test "test_default_config" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_no_k8s_labels" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs/54575ccdb9", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_sourcecategory_prefix" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_add_stream" do - conf = %{ - add_stream false - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_add_time" do - conf = %{ - add_time false - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "time" => time, - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_sourcecategory_replace_dash" do - conf = %{ - source_category_replace_dash - - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log-format-labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_kubernetes_meta" do - conf = %{ - kubernetes_meta false - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_kubernetes_meta_reduce_via_annotation" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs" - }, - "annotations" => { - "sumologic.com/kubernetes_meta_reduce" => "true", - }, - "host" => "docker-for-desktop", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "kubernetes" => { - "container_name" => "log-format-labs", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "host" => "docker-for-desktop", - "namespace_name" => "default", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_kubernetes_meta_reduce_via_conf" do - conf = %{ - kubernetes_meta_reduce true - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs" - }, - "host" => "docker-for-desktop", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "kubernetes" => { - "container_name" => "log-format-labs", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "host" => "docker-for-desktop", - "namespace_name" => "default", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_kubernetes_meta_reduce_via_annotation_and_conf" do - conf = %{ - kubernetes_meta_reduce false - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs" - }, - "annotations" => { - "sumologic.com/kubernetes_meta_reduce" => "true", - }, - "host" => "docker-for-desktop", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "kubernetes" => { - "container_name" => "log-format-labs", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "host" => "docker-for-desktop", - "namespace_name" => "default", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_log_format_json_merge" do - conf = %{ - log_format json_merge - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json_merge", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_log_format_text" do - conf = %{ - log_format text - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "text", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_exclude_pod_regex" do - conf = %{ - exclude_pod_regex foo - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) - end - assert_equal(1, d.filtered_records.size) - end - - test "test_exclude_pod_regex_whitelist" do - conf = %{ - exclude_pod_regex .* - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) - end - assert_equal(1, d.filtered_records.size) - end - - test "test_exclude_container_regex" do - conf = %{ - exclude_container_regex foo - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) - end - assert_equal(1, d.filtered_records.size) - end - - test "test_exclude_container_regex_whitelist" do - conf = %{ - exclude_container_regex .* - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) - end - assert_equal(1, d.filtered_records.size) - end - - test "test_exclude_namespace_regex" do - conf = %{ - exclude_namespace_regex foo - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "foo", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "bar", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) - end - assert_equal(1, d.filtered_records.size) - end - - test "test_exclude_namespace_regex_whitelist" do - conf = %{ - exclude_namespace_regex .* - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) - end - assert_equal(1, d.filtered_records.size) - end - - test "test_exclude_host_regex" do - conf = %{ - exclude_host_regex foo - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "foo", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "bar", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) - end - assert_equal(1, d.filtered_records.size) - end - - test "test_exclude_host_regex_whitelist" do - conf = %{ - exclude_host_regex .* - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "false"}}, "message" => "foo"}) - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "5679EFGH", "pod_name" => "bar-6554321-a87f", "container_name" => "bar", "labels" => {"app" => "bar"}, "host" => "localhost", "annotations" => {"sumologic.com/include" => "true"}}, "message" => "foo"}) - end - assert_equal(1, d.filtered_records.size) - end - - test "test_exclude_annotation" do - conf = %{ - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost", "annotations" => {"sumologic.com/exclude" => "true"}}, "message" => "foo"}) - end - assert_equal(0, d.filtered_records.size) - end - - test "test_sourcehost_annotation" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "annotations" => { - "sumologic.com/sourceHost" => "foo", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "annotations" => { - "sumologic.com/sourceHost" => "foo", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "foo", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_sourcename_annotation" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "annotations" => { - "sumologic.com/sourceName" => "foo", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "annotations" => { - "sumologic.com/sourceName" => "foo", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "foo", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_sourcecategory_annotation" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "annotations" => { - "sumologic.com/sourceCategory" => "foo", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "annotations" => { - "sumologic.com/sourceCategory" => "foo", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/foo", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_sourcecategory_using_labels" do - conf = %{ - source_category %{namespace}/%{pod_name}/%{label:run} - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_sourcehost_using_pod_id" do - conf = %{ - source_host %{pod_id} - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "170af806-c801-11e8-9009-025000000001", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_undefined_labels" do - conf = %{ - source_category %{namespace}/%{pod_name}/%{label:foo} - } - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs/undefined", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_exclude_systemd_unit_regex" do - conf = %{ - exclude_unit_regex .* - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"_SYSTEMD_UNIT" => "test", "kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost"}, "message" => "foo"}) - end - assert_equal(0, d.filtered_records.size) - end - - test "test_exclude_systemd_facility_regex" do - conf = %{ - exclude_facility_regex .* - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"_SYSTEMD_UNIT" => "test", "SYSLOG_FACILITY" => "test", "kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost"}, "message" => "foo"}) - end - assert_equal(0, d.filtered_records.size) - end - - test "test_exclude_systemd_priority_regex" do - conf = %{ - exclude_priority_regex .* - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"_SYSTEMD_UNIT" => "test", "PRIORITY" => "test", "kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost"}, "message" => "foo"}) - end - assert_equal(0, d.filtered_records.size) - end - - test "test_exclude_systemd_hostname_regex" do - conf = %{ - exclude_host_regex .* - } - d = create_driver(conf) - time = @time - d.run do - d.feed("filter.test", time, {"_SYSTEMD_UNIT" => "test", "_HOSTNAME" => "test", "kubernetes" => {"namespace_name" => "test", "pod_id" => "1234ABCD", "pod_name" => "foo-1234556-f87a", "container_name" => "foo", "labels" => {"app" => "foo"}, "host" => "localhost"}, "message" => "foo"}) - end - assert_equal(0, d.filtered_records.size) - end - - test "test_pre_1.8_dynamic_bit_removal" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-1013177865-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-1013177865-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-1013177865-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_1.8-1.11_dynamic_bit_removal" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "1013177865", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_post_1.11_dynamic_bit_removal" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "54575ccdb9", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-54575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "54575ccdb9", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-54575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end - - test "test_mismatch_dynamic_bit_is_left" do - conf = %{} - d = create_driver(conf) - time = @time - input = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-53575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "54575ccdb9", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - } - d.run do - d.feed("filter.test", time, input) - end - expected = { - "timestamp" => 1538677347823, - "log" => "some message", - "stream" => "stdout", - "docker" => { - "container_id" => "5c280b6ad5abec32e9af729295c20f60fbeadf3ba16fda2d121f87228e6822e0", - }, - "kubernetes" => { - "container_name" => "log-format-labs", - "namespace_name" => "default", - "pod_name" => "log-format-labs-53575ccdb9-9d677", - "pod_id" => "170af806-c801-11e8-9009-025000000001", - "labels" => { - "pod-template-hash" => "54575ccdb9", - "run" => "log-format-labs", - }, - "host" => "docker-for-desktop", - "master_url" => "https =>//10.96.0.1 =>443/api", - "namespace_id" => "e8572415-9596-11e8-b28b-025000000001", - }, - "_sumo_metadata" => { - :category => "kubernetes/default/log/format/labs/53575ccdb9", - :host => "", - :log_format => "json", - :source => "default.log-format-labs-53575ccdb9-9d677.log-format-labs", - }, - } - assert_equal(1, d.filtered_records.size) - assert_equal(d.filtered_records[0], expected) - end -end From bdd9da69b905d7b3c7575b7bf9e17eb1a971967f Mon Sep 17 00:00:00 2001 From: Sam Song Date: Thu, 30 May 2019 15:01:45 -0700 Subject: [PATCH 10/10] add tolerations so fluentbit runs on master nodes --- deploy/fluent-bit/overrides.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deploy/fluent-bit/overrides.yaml b/deploy/fluent-bit/overrides.yaml index d400ce2753..9d369b3f61 100644 --- a/deploy/fluent-bit/overrides.yaml +++ b/deploy/fluent-bit/overrides.yaml @@ -10,6 +10,10 @@ backend: trackOffsets: true +tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + input: tail: memBufLimit: 5MB