From fa17a78fb456209f9dea9ca18614416a038b727e Mon Sep 17 00:00:00 2001 From: SG Date: Thu, 26 Oct 2023 08:59:10 -0600 Subject: [PATCH 01/82] bump for v23.11.0 development --- docker-compose-standalone.yml | 44 +++++----- docker-compose.yml | 44 +++++----- docs/contributing-pcap.md | 2 +- docs/download.md | 4 +- docs/hedgehog-iso-build.md | 2 +- docs/kubernetes.md | 88 +++++++++---------- docs/malcolm-iso.md | 2 +- docs/quickstart.md | 38 ++++---- docs/ubuntu-install-example.md | 38 ++++---- kubernetes/03-opensearch.yml | 4 +- kubernetes/04-dashboards.yml | 2 +- kubernetes/05-upload.yml | 4 +- kubernetes/06-pcap-monitor.yml | 4 +- kubernetes/07-arkime.yml | 4 +- kubernetes/08-api.yml | 2 +- kubernetes/09-dashboards-helper.yml | 2 +- kubernetes/10-zeek.yml | 4 +- kubernetes/11-suricata.yml | 4 +- kubernetes/12-file-monitor.yml | 4 +- kubernetes/13-filebeat.yml | 4 +- kubernetes/14-logstash.yml | 4 +- kubernetes/15-netbox-redis.yml | 4 +- kubernetes/16-netbox-redis-cache.yml | 2 +- kubernetes/17-netbox-postgres.yml | 4 +- kubernetes/18-netbox.yml | 4 +- kubernetes/19-htadmin.yml | 4 +- kubernetes/20-pcap-capture.yml | 4 +- kubernetes/21-zeek-live.yml | 4 +- kubernetes/22-suricata-live.yml | 4 +- kubernetes/23-freq.yml | 2 +- kubernetes/98-nginx-proxy.yml | 4 +- .../aws/ami/packer_vars.json.example | 2 +- 32 files changed, 171 insertions(+), 171 deletions(-) diff --git a/docker-compose-standalone.yml b/docker-compose-standalone.yml index 87aa7e02b..684e4a808 100644 --- a/docker-compose-standalone.yml +++ b/docker-compose-standalone.yml @@ -4,7 +4,7 @@ version: '3.7' services: opensearch: - image: ghcr.io/idaholab/malcolm/opensearch:23.10.0 + image: ghcr.io/idaholab/malcolm/opensearch:23.11.0 # Technically the "hedgehog" profile doesn't have OpenSearch, but in that case # OPENSEARCH_PRIMARY will be set to remote, which means the container will # start but not actually run OpenSearch. It's included in both profiles to @@ -42,7 +42,7 @@ services: retries: 3 start_period: 180s dashboards-helper: - image: ghcr.io/idaholab/malcolm/dashboards-helper:23.10.0 + image: ghcr.io/idaholab/malcolm/dashboards-helper:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -71,7 +71,7 @@ services: retries: 3 start_period: 30s dashboards: - image: ghcr.io/idaholab/malcolm/dashboards:23.10.0 + image: ghcr.io/idaholab/malcolm/dashboards:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -98,7 +98,7 @@ services: retries: 3 start_period: 210s logstash: - image: ghcr.io/idaholab/malcolm/logstash-oss:23.10.0 + image: ghcr.io/idaholab/malcolm/logstash-oss:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -141,7 +141,7 @@ services: retries: 3 start_period: 600s filebeat: - image: ghcr.io/idaholab/malcolm/filebeat-oss:23.10.0 + image: ghcr.io/idaholab/malcolm/filebeat-oss:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -175,7 +175,7 @@ services: retries: 3 start_period: 60s arkime: - image: ghcr.io/idaholab/malcolm/arkime:23.10.0 + image: ghcr.io/idaholab/malcolm/arkime:23.11.0 # todo: viewer/wise in hedgehog profile (and what about nginx reaching back?) profiles: ["malcolm", "hedgehog"] restart: "no" @@ -215,7 +215,7 @@ services: retries: 3 start_period: 210s zeek: - image: ghcr.io/idaholab/malcolm/zeek:23.10.0 + image: ghcr.io/idaholab/malcolm/zeek:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -254,7 +254,7 @@ services: retries: 3 start_period: 60s zeek-live: - image: ghcr.io/idaholab/malcolm/zeek:23.10.0 + image: ghcr.io/idaholab/malcolm/zeek:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -283,7 +283,7 @@ services: - ./zeek-logs/extract_files:/zeek/extract_files - ./zeek/intel:/opt/zeek/share/zeek/site/intel suricata: - image: ghcr.io/idaholab/malcolm/suricata:23.10.0 + image: ghcr.io/idaholab/malcolm/suricata:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -318,7 +318,7 @@ services: retries: 3 start_period: 120s suricata-live: - image: ghcr.io/idaholab/malcolm/suricata:23.10.0 + image: ghcr.io/idaholab/malcolm/suricata:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -345,7 +345,7 @@ services: - ./suricata-logs:/var/log/suricata - ./suricata/rules:/opt/suricata/rules:ro file-monitor: - image: ghcr.io/idaholab/malcolm/file-monitor:23.10.0 + image: ghcr.io/idaholab/malcolm/file-monitor:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -372,7 +372,7 @@ services: retries: 3 start_period: 60s pcap-capture: - image: ghcr.io/idaholab/malcolm/pcap-capture:23.10.0 + image: ghcr.io/idaholab/malcolm/pcap-capture:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -395,7 +395,7 @@ services: - ./nginx/ca-trust:/var/local/ca-trust:ro - ./pcap/upload:/pcap pcap-monitor: - image: ghcr.io/idaholab/malcolm/pcap-monitor:23.10.0 + image: ghcr.io/idaholab/malcolm/pcap-monitor:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -422,7 +422,7 @@ services: retries: 3 start_period: 90s upload: - image: ghcr.io/idaholab/malcolm/file-upload:23.10.0 + image: ghcr.io/idaholab/malcolm/file-upload:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -448,7 +448,7 @@ services: retries: 3 start_period: 60s htadmin: - image: ghcr.io/idaholab/malcolm/htadmin:23.10.0 + image: ghcr.io/idaholab/malcolm/htadmin:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -474,7 +474,7 @@ services: retries: 3 start_period: 60s freq: - image: ghcr.io/idaholab/malcolm/freq:23.10.0 + image: ghcr.io/idaholab/malcolm/freq:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -497,7 +497,7 @@ services: retries: 3 start_period: 60s netbox: - image: ghcr.io/idaholab/malcolm/netbox:23.10.0 + image: ghcr.io/idaholab/malcolm/netbox:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -530,7 +530,7 @@ services: retries: 3 start_period: 120s netbox-postgres: - image: ghcr.io/idaholab/malcolm/postgresql:23.10.0 + image: ghcr.io/idaholab/malcolm/postgresql:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -555,7 +555,7 @@ services: retries: 3 start_period: 45s netbox-redis: - image: ghcr.io/idaholab/malcolm/redis:23.10.0 + image: ghcr.io/idaholab/malcolm/redis:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -584,7 +584,7 @@ services: retries: 3 start_period: 45s netbox-redis-cache: - image: ghcr.io/idaholab/malcolm/redis:23.10.0 + image: ghcr.io/idaholab/malcolm/redis:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -612,7 +612,7 @@ services: retries: 3 start_period: 45s api: - image: ghcr.io/idaholab/malcolm/api:23.10.0 + image: ghcr.io/idaholab/malcolm/api:23.11.0 profiles: ["malcolm"] command: gunicorn --bind 0:5000 manage:app restart: "no" @@ -638,7 +638,7 @@ services: retries: 3 start_period: 60s nginx-proxy: - image: ghcr.io/idaholab/malcolm/nginx-proxy:23.10.0 + image: ghcr.io/idaholab/malcolm/nginx-proxy:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false diff --git a/docker-compose.yml b/docker-compose.yml index cdfa5f761..c38dc4b04 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -7,7 +7,7 @@ services: build: context: . dockerfile: Dockerfiles/opensearch.Dockerfile - image: ghcr.io/idaholab/malcolm/opensearch:23.10.0 + image: ghcr.io/idaholab/malcolm/opensearch:23.11.0 # Technically the "hedgehog" profile doesn't have OpenSearch, but in that case # OPENSEARCH_PRIMARY will be set to remote, which means the container will # start but not actually run OpenSearch. It's included in both profiles to @@ -48,7 +48,7 @@ services: build: context: . dockerfile: Dockerfiles/dashboards-helper.Dockerfile - image: ghcr.io/idaholab/malcolm/dashboards-helper:23.10.0 + image: ghcr.io/idaholab/malcolm/dashboards-helper:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -80,7 +80,7 @@ services: build: context: . dockerfile: Dockerfiles/dashboards.Dockerfile - image: ghcr.io/idaholab/malcolm/dashboards:23.10.0 + image: ghcr.io/idaholab/malcolm/dashboards:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -110,7 +110,7 @@ services: build: context: . dockerfile: Dockerfiles/logstash.Dockerfile - image: ghcr.io/idaholab/malcolm/logstash-oss:23.10.0 + image: ghcr.io/idaholab/malcolm/logstash-oss:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -160,7 +160,7 @@ services: build: context: . dockerfile: Dockerfiles/filebeat.Dockerfile - image: ghcr.io/idaholab/malcolm/filebeat-oss:23.10.0 + image: ghcr.io/idaholab/malcolm/filebeat-oss:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -197,7 +197,7 @@ services: build: context: . dockerfile: Dockerfiles/arkime.Dockerfile - image: ghcr.io/idaholab/malcolm/arkime:23.10.0 + image: ghcr.io/idaholab/malcolm/arkime:23.11.0 # todo: viewer/wise in hedgehog profile (and what about nginx reaching back?) profiles: ["malcolm", "hedgehog"] restart: "no" @@ -243,7 +243,7 @@ services: build: context: . dockerfile: Dockerfiles/zeek.Dockerfile - image: ghcr.io/idaholab/malcolm/zeek:23.10.0 + image: ghcr.io/idaholab/malcolm/zeek:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -286,7 +286,7 @@ services: build: context: . dockerfile: Dockerfiles/zeek.Dockerfile - image: ghcr.io/idaholab/malcolm/zeek:23.10.0 + image: ghcr.io/idaholab/malcolm/zeek:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -319,7 +319,7 @@ services: build: context: . dockerfile: Dockerfiles/suricata.Dockerfile - image: ghcr.io/idaholab/malcolm/suricata:23.10.0 + image: ghcr.io/idaholab/malcolm/suricata:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -357,7 +357,7 @@ services: build: context: . dockerfile: Dockerfiles/suricata.Dockerfile - image: ghcr.io/idaholab/malcolm/suricata:23.10.0 + image: ghcr.io/idaholab/malcolm/suricata:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -387,7 +387,7 @@ services: build: context: . dockerfile: Dockerfiles/file-monitor.Dockerfile - image: ghcr.io/idaholab/malcolm/file-monitor:23.10.0 + image: ghcr.io/idaholab/malcolm/file-monitor:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -417,7 +417,7 @@ services: build: context: . dockerfile: Dockerfiles/pcap-capture.Dockerfile - image: ghcr.io/idaholab/malcolm/pcap-capture:23.10.0 + image: ghcr.io/idaholab/malcolm/pcap-capture:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -443,7 +443,7 @@ services: build: context: . dockerfile: Dockerfiles/pcap-monitor.Dockerfile - image: ghcr.io/idaholab/malcolm/pcap-monitor:23.10.0 + image: ghcr.io/idaholab/malcolm/pcap-monitor:23.11.0 profiles: ["malcolm", "hedgehog"] restart: "no" stdin_open: false @@ -473,7 +473,7 @@ services: build: context: . dockerfile: Dockerfiles/file-upload.Dockerfile - image: ghcr.io/idaholab/malcolm/file-upload:23.10.0 + image: ghcr.io/idaholab/malcolm/file-upload:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false @@ -499,7 +499,7 @@ services: retries: 3 start_period: 60s htadmin: - image: ghcr.io/idaholab/malcolm/htadmin:23.10.0 + image: ghcr.io/idaholab/malcolm/htadmin:23.11.0 profiles: ["malcolm"] build: context: . @@ -528,7 +528,7 @@ services: retries: 3 start_period: 60s freq: - image: ghcr.io/idaholab/malcolm/freq:23.10.0 + image: ghcr.io/idaholab/malcolm/freq:23.11.0 profiles: ["malcolm"] build: context: . @@ -554,7 +554,7 @@ services: retries: 3 start_period: 60s netbox: - image: ghcr.io/idaholab/malcolm/netbox:23.10.0 + image: ghcr.io/idaholab/malcolm/netbox:23.11.0 profiles: ["malcolm"] build: context: . @@ -590,7 +590,7 @@ services: retries: 3 start_period: 120s netbox-postgres: - image: ghcr.io/idaholab/malcolm/postgresql:23.10.0 + image: ghcr.io/idaholab/malcolm/postgresql:23.11.0 profiles: ["malcolm"] build: context: . @@ -618,7 +618,7 @@ services: retries: 3 start_period: 45s netbox-redis: - image: ghcr.io/idaholab/malcolm/redis:23.10.0 + image: ghcr.io/idaholab/malcolm/redis:23.11.0 profiles: ["malcolm"] build: context: . @@ -650,7 +650,7 @@ services: retries: 3 start_period: 45s netbox-redis-cache: - image: ghcr.io/idaholab/malcolm/redis:23.10.0 + image: ghcr.io/idaholab/malcolm/redis:23.11.0 profiles: ["malcolm"] build: context: . @@ -681,7 +681,7 @@ services: retries: 3 start_period: 45s api: - image: ghcr.io/idaholab/malcolm/api:23.10.0 + image: ghcr.io/idaholab/malcolm/api:23.11.0 profiles: ["malcolm"] build: context: . @@ -713,7 +713,7 @@ services: build: context: . dockerfile: Dockerfiles/nginx.Dockerfile - image: ghcr.io/idaholab/malcolm/nginx-proxy:23.10.0 + image: ghcr.io/idaholab/malcolm/nginx-proxy:23.11.0 profiles: ["malcolm"] restart: "no" stdin_open: false diff --git a/docs/contributing-pcap.md b/docs/contributing-pcap.md index c4ce70767..6d83e1717 100644 --- a/docs/contributing-pcap.md +++ b/docs/contributing-pcap.md @@ -1,6 +1,6 @@ # PCAP processors -When a PCAP is uploaded (either through Malcolm's [upload web interface](upload.md#Upload) or just copied manually into the `./pcap/upload` directory), the `pcap-monitor` container has a script that picks up those PCAP files and publishes to a [ZeroMQ](https://zeromq.org/) topic that can be subscribed to by any other process that wants to analyze that PCAP. In Malcolm (at the time of the [v23.10.0 release]({{ site.github.repository_url }}/releases/tag/v23.10.0)), there are three such ZeroMQ topics: the `zeek`, `suricata` and `arkime` containers. These actually share the [same script]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/shared/bin/pcap_processor.py) to run the PCAP through Zeek, Suricata, and Arkime, respectively. For an example to follow, the `zeek` container is the less complicated of the two. To integrate a new PCAP processing tool into Malcolm (named `cooltool` for this example) the process would entail: +When a PCAP is uploaded (either through Malcolm's [upload web interface](upload.md#Upload) or just copied manually into the `./pcap/upload` directory), the `pcap-monitor` container has a script that picks up those PCAP files and publishes to a [ZeroMQ](https://zeromq.org/) topic that can be subscribed to by any other process that wants to analyze that PCAP. In Malcolm (at the time of the [v23.11.0 release]({{ site.github.repository_url }}/releases/tag/v23.11.0)), there are three such ZeroMQ topics: the `zeek`, `suricata` and `arkime` containers. These actually share the [same script]({{ site.github.repository_url }}/blob/{{ site.github.build_revision }}/shared/bin/pcap_processor.py) to run the PCAP through Zeek, Suricata, and Arkime, respectively. For an example to follow, the `zeek` container is the less complicated of the two. To integrate a new PCAP processing tool into Malcolm (named `cooltool` for this example) the process would entail: 1. Define the service as instructed in the [Adding a new service](contributing-new-image.md#NewImage) section * Note how the existing `zeek` and `arkime` services use [bind mounts](contributing-local-modifications.md#Bind) to access the local `./pcap` directory diff --git a/docs/download.md b/docs/download.md index 933dc8fb7..7261f51a3 100644 --- a/docs/download.md +++ b/docs/download.md @@ -16,7 +16,7 @@ While official downloads of the Malcolm installer ISO are not provided, an **uno | ISO | SHA256 | |---|---| -| [malcolm-23.10.0.iso](/iso/malcolm-23.10.0.iso) (5.4GiB) | [`021103f8d8a4ac8a4c467dd4dc18e59fbb57f1b57c3927de702ef465953b0cf0`](/iso/malcolm-23.10.0.iso.sha256.txt) | +| [malcolm-23.11.0.iso](/iso/malcolm-23.11.0.iso) (5.4GiB) | [`xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`](/iso/malcolm-23.11.0.iso.sha256.txt) | ## Hedgehog Linux @@ -26,7 +26,7 @@ While official downloads of the Malcolm installer ISO are not provided, an **uno | ISO | SHA256 | |---|---| -| [hedgehog-23.10.0.iso](/iso/hedgehog-23.10.0.iso) (2.6GiB) | [`65f3d15c102ab3b518965eb87fec8f4b61ee10e1aa366654576105265cb2a9c8`](/iso/hedgehog-23.10.0.iso.sha256.txt) | +| [hedgehog-23.11.0.iso](/iso/hedgehog-23.11.0.iso) (2.6GiB) | [`xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`](/iso/hedgehog-23.11.0.iso.sha256.txt) | ## Warning diff --git a/docs/hedgehog-iso-build.md b/docs/hedgehog-iso-build.md index 466656a72..1e5d240a2 100644 --- a/docs/hedgehog-iso-build.md +++ b/docs/hedgehog-iso-build.md @@ -29,7 +29,7 @@ Building the ISO may take 90 minutes or more depending on your system. As the bu ``` … -Finished, created "/sensor-build/hedgehog-23.10.0.iso" +Finished, created "/sensor-build/hedgehog-23.11.0.iso" … ``` diff --git a/docs/kubernetes.md b/docs/kubernetes.md index 82b7ec983..3f8c9d3c5 100644 --- a/docs/kubernetes.md +++ b/docs/kubernetes.md @@ -272,28 +272,28 @@ agent2 | agent2 | 192.168.56.12 | agent2 | k3s | 6000m | agent1 | agent1 | 192.168.56.11 | agent1 | k3s | 6000m | 861.34m | 14.36% | 19.55Gi | 9.29Gi | 61.28Gi | 11 | Pod Name | State | Pod IP | Pod Kind | Worker Node | CPU Usage | Memory Usage | Container Name:Restarts | Container Image | -api-deployment-6f4686cf59-bn286 | Running | 10.42.2.14 | ReplicaSet | agent1 | 0.11m | 59.62Mi | api-container:0 | api:23.10.0 | -file-monitor-deployment-855646bd75-vk7st | Running | 10.42.2.16 | ReplicaSet | agent1 | 8.47m | 1.46Gi | file-monitor-container:0 | file-monitor:23.10.0 | -zeek-live-deployment-64b69d4b6f-947vr | Running | 10.42.2.17 | ReplicaSet | agent1 | 0.02m | 12.44Mi | zeek-live-container:0 | zeek:23.10.0 | -dashboards-helper-deployment-69dc54f6b6-ln4sq | Running | 10.42.2.15 | ReplicaSet | agent1 | 10.77m | 38.43Mi | dashboards-helper-container:0 | dashboards-helper:23.10.0 | -upload-deployment-586568844b-4jnk9 | Running | 10.42.2.18 | ReplicaSet | agent1 | 0.15m | 29.78Mi | upload-container:0 | file-upload:23.10.0 | -filebeat-deployment-6ff8bc444f-t7h49 | Running | 10.42.2.20 | ReplicaSet | agent1 | 2.84m | 70.71Mi | filebeat-container:0 | filebeat-oss:23.10.0 | -zeek-offline-deployment-844f4865bd-g2sdm | Running | 10.42.2.21 | ReplicaSet | agent1 | 0.17m | 41.92Mi | zeek-offline-container:0 | zeek:23.10.0 | -logstash-deployment-6fbc9fdcd5-hwx8s | Running | 10.42.2.22 | ReplicaSet | agent1 | 85.55m | 2.91Gi | logstash-container:0 | logstash-oss:23.10.0 | -netbox-deployment-cdcff4977-hbbw5 | Running | 10.42.2.23 | ReplicaSet | agent1 | 807.64m | 702.86Mi | netbox-container:0 | netbox:23.10.0 | -suricata-offline-deployment-6ccdb89478-z5696 | Running | 10.42.2.19 | ReplicaSet | agent1 | 0.22m | 34.88Mi | suricata-offline-container:0 | suricata:23.10.0 | -dashboards-deployment-69b5465db-vz88g | Running | 10.42.1.14 | ReplicaSet | agent2 | 0.94m | 100.12Mi | dashboards-container:0 | dashboards:23.10.0 | -netbox-redis-cache-deployment-5f77d47b8b-z7t2z | Running | 10.42.1.15 | ReplicaSet | agent2 | 3.57m | 7.36Mi | netbox-redis-cache-container:0 | redis:23.10.0 | -suricata-live-deployment-6494c77759-9rlnt | Running | 10.42.1.16 | ReplicaSet | agent2 | 0.02m | 9.69Mi | suricata-live-container:0 | suricata:23.10.0 | -freq-deployment-cfd84fd97-dnngf | Running | 10.42.1.17 | ReplicaSet | agent2 | 0.2m | 26.36Mi | freq-container:0 | freq:23.10.0 | -arkime-deployment-56999cdd66-s98pp | Running | 10.42.1.18 | ReplicaSet | agent2 | 4.15m | 113.07Mi | arkime-container:0 | arkime:23.10.0 | -pcap-monitor-deployment-594ff674c4-fsm7m | Running | 10.42.1.19 | ReplicaSet | agent2 | 1.24m | 48.44Mi | pcap-monitor-container:0 | pcap-monitor:23.10.0 | -pcap-capture-deployment-7c8bf6957-jzpzn | Running | 10.42.1.20 | ReplicaSet | agent2 | 0.02m | 9.64Mi | pcap-capture-container:0 | pcap-capture:23.10.0 | -netbox-postgres-deployment-5879b8dffc-kkt56 | Running | 10.42.1.21 | ReplicaSet | agent2 | 70.91m | 33.02Mi | netbox-postgres-container:0 | postgresql:23.10.0 | -htadmin-deployment-6fc46888b9-sq6ln | Running | 10.42.1.23 | ReplicaSet | agent2 | 0.14m | 30.53Mi | htadmin-container:0 | htadmin:23.10.0 | -netbox-redis-deployment-5bcd8f6c96-j5xpf | Running | 10.42.1.24 | ReplicaSet | agent2 | 1.46m | 7.34Mi | netbox-redis-container:0 | redis:23.10.0 | -nginx-proxy-deployment-69fcc4968d-f68tq | Running | 10.42.1.22 | ReplicaSet | agent2 | 0.31m | 22.63Mi | nginx-proxy-container:0 | nginx-proxy:23.10.0 | -opensearch-deployment-75498799f6-4zmwd | Running | 10.42.1.25 | ReplicaSet | agent2 | 89.8m | 11.03Gi | opensearch-container:0 | opensearch:23.10.0 | +api-deployment-6f4686cf59-bn286 | Running | 10.42.2.14 | ReplicaSet | agent1 | 0.11m | 59.62Mi | api-container:0 | api:23.11.0 | +file-monitor-deployment-855646bd75-vk7st | Running | 10.42.2.16 | ReplicaSet | agent1 | 8.47m | 1.46Gi | file-monitor-container:0 | file-monitor:23.11.0 | +zeek-live-deployment-64b69d4b6f-947vr | Running | 10.42.2.17 | ReplicaSet | agent1 | 0.02m | 12.44Mi | zeek-live-container:0 | zeek:23.11.0 | +dashboards-helper-deployment-69dc54f6b6-ln4sq | Running | 10.42.2.15 | ReplicaSet | agent1 | 10.77m | 38.43Mi | dashboards-helper-container:0 | dashboards-helper:23.11.0 | +upload-deployment-586568844b-4jnk9 | Running | 10.42.2.18 | ReplicaSet | agent1 | 0.15m | 29.78Mi | upload-container:0 | file-upload:23.11.0 | +filebeat-deployment-6ff8bc444f-t7h49 | Running | 10.42.2.20 | ReplicaSet | agent1 | 2.84m | 70.71Mi | filebeat-container:0 | filebeat-oss:23.11.0 | +zeek-offline-deployment-844f4865bd-g2sdm | Running | 10.42.2.21 | ReplicaSet | agent1 | 0.17m | 41.92Mi | zeek-offline-container:0 | zeek:23.11.0 | +logstash-deployment-6fbc9fdcd5-hwx8s | Running | 10.42.2.22 | ReplicaSet | agent1 | 85.55m | 2.91Gi | logstash-container:0 | logstash-oss:23.11.0 | +netbox-deployment-cdcff4977-hbbw5 | Running | 10.42.2.23 | ReplicaSet | agent1 | 807.64m | 702.86Mi | netbox-container:0 | netbox:23.11.0 | +suricata-offline-deployment-6ccdb89478-z5696 | Running | 10.42.2.19 | ReplicaSet | agent1 | 0.22m | 34.88Mi | suricata-offline-container:0 | suricata:23.11.0 | +dashboards-deployment-69b5465db-vz88g | Running | 10.42.1.14 | ReplicaSet | agent2 | 0.94m | 100.12Mi | dashboards-container:0 | dashboards:23.11.0 | +netbox-redis-cache-deployment-5f77d47b8b-z7t2z | Running | 10.42.1.15 | ReplicaSet | agent2 | 3.57m | 7.36Mi | netbox-redis-cache-container:0 | redis:23.11.0 | +suricata-live-deployment-6494c77759-9rlnt | Running | 10.42.1.16 | ReplicaSet | agent2 | 0.02m | 9.69Mi | suricata-live-container:0 | suricata:23.11.0 | +freq-deployment-cfd84fd97-dnngf | Running | 10.42.1.17 | ReplicaSet | agent2 | 0.2m | 26.36Mi | freq-container:0 | freq:23.11.0 | +arkime-deployment-56999cdd66-s98pp | Running | 10.42.1.18 | ReplicaSet | agent2 | 4.15m | 113.07Mi | arkime-container:0 | arkime:23.11.0 | +pcap-monitor-deployment-594ff674c4-fsm7m | Running | 10.42.1.19 | ReplicaSet | agent2 | 1.24m | 48.44Mi | pcap-monitor-container:0 | pcap-monitor:23.11.0 | +pcap-capture-deployment-7c8bf6957-jzpzn | Running | 10.42.1.20 | ReplicaSet | agent2 | 0.02m | 9.64Mi | pcap-capture-container:0 | pcap-capture:23.11.0 | +netbox-postgres-deployment-5879b8dffc-kkt56 | Running | 10.42.1.21 | ReplicaSet | agent2 | 70.91m | 33.02Mi | netbox-postgres-container:0 | postgresql:23.11.0 | +htadmin-deployment-6fc46888b9-sq6ln | Running | 10.42.1.23 | ReplicaSet | agent2 | 0.14m | 30.53Mi | htadmin-container:0 | htadmin:23.11.0 | +netbox-redis-deployment-5bcd8f6c96-j5xpf | Running | 10.42.1.24 | ReplicaSet | agent2 | 1.46m | 7.34Mi | netbox-redis-container:0 | redis:23.11.0 | +nginx-proxy-deployment-69fcc4968d-f68tq | Running | 10.42.1.22 | ReplicaSet | agent2 | 0.31m | 22.63Mi | nginx-proxy-container:0 | nginx-proxy:23.11.0 | +opensearch-deployment-75498799f6-4zmwd | Running | 10.42.1.25 | ReplicaSet | agent2 | 89.8m | 11.03Gi | opensearch-container:0 | opensearch:23.11.0 | ``` The other control scripts (`stop`, `restart`, `logs`, etc.) work in a similar manner as in a Docker-based deployment. One notable difference is the `wipe` script: data on PersistentVolume storage cannot be deleted by `wipe`. It must be deleted manually on the storage media underlying the PersistentVolumes. @@ -553,28 +553,28 @@ agent1 | agent1 | 192.168.56.11 | agent1 | k3s | 6000m | agent2 | agent2 | 192.168.56.12 | agent2 | k3s | 6000m | 552.71m | 9.21% | 19.55Gi | 13.27Gi | 61.28Gi | 12 | Pod Name | State | Pod IP | Pod Kind | Worker Node | CPU Usage | Memory Usage | Container Name:Restarts | Container Image | -netbox-redis-cache-deployment-5f77d47b8b-jr9nt | Running | 10.42.2.6 | ReplicaSet | agent2 | 1.89m | 7.24Mi | netbox-redis-cache-container:0 | redis:23.10.0 | -netbox-redis-deployment-5bcd8f6c96-bkzmh | Running | 10.42.2.5 | ReplicaSet | agent2 | 1.62m | 7.52Mi | netbox-redis-container:0 | redis:23.10.0 | -dashboards-helper-deployment-69dc54f6b6-ks7ps | Running | 10.42.2.4 | ReplicaSet | agent2 | 12.95m | 40.75Mi | dashboards-helper-container:0 | dashboards-helper:23.10.0 | -freq-deployment-cfd84fd97-5bwp6 | Running | 10.42.2.8 | ReplicaSet | agent2 | 0.11m | 26.33Mi | freq-container:0 | freq:23.10.0 | -pcap-capture-deployment-7c8bf6957-hkvkn | Running | 10.42.2.12 | ReplicaSet | agent2 | 0.02m | 9.21Mi | pcap-capture-container:0 | pcap-capture:23.10.0 | -nginx-proxy-deployment-69fcc4968d-m57rz | Running | 10.42.2.10 | ReplicaSet | agent2 | 0.91m | 22.72Mi | nginx-proxy-container:0 | nginx-proxy:23.10.0 | -htadmin-deployment-6fc46888b9-vpt7l | Running | 10.42.2.7 | ReplicaSet | agent2 | 0.16m | 30.21Mi | htadmin-container:0 | htadmin:23.10.0 | -opensearch-deployment-75498799f6-5v92w | Running | 10.42.2.13 | ReplicaSet | agent2 | 139.2m | 10.86Gi | opensearch-container:0 | opensearch:23.10.0 | -zeek-live-deployment-64b69d4b6f-fcb6n | Running | 10.42.2.9 | ReplicaSet | agent2 | 0.02m | 109.55Mi | zeek-live-container:0 | zeek:23.10.0 | -dashboards-deployment-69b5465db-kgsqk | Running | 10.42.2.3 | ReplicaSet | agent2 | 14.98m | 108.85Mi | dashboards-container:0 | dashboards:23.10.0 | -arkime-deployment-56999cdd66-xxpw9 | Running | 10.42.2.11 | ReplicaSet | agent2 | 208.95m | 78.42Mi | arkime-container:0 | arkime:23.10.0 | -api-deployment-6f4686cf59-xt9md | Running | 10.42.1.3 | ReplicaSet | agent1 | 0.14m | 56.88Mi | api-container:0 | api:23.10.0 | -netbox-postgres-deployment-5879b8dffc-lb4qm | Running | 10.42.1.6 | ReplicaSet | agent1 | 141.2m | 48.02Mi | netbox-postgres-container:0 | postgresql:23.10.0 | -pcap-monitor-deployment-594ff674c4-fwq7g | Running | 10.42.1.12 | ReplicaSet | agent1 | 3.93m | 46.44Mi | pcap-monitor-container:0 | pcap-monitor:23.10.0 | -suricata-offline-deployment-6ccdb89478-j5fgj | Running | 10.42.1.10 | ReplicaSet | agent1 | 10.42m | 35.12Mi | suricata-offline-container:0 | suricata:23.10.0 | -suricata-live-deployment-6494c77759-rpt48 | Running | 10.42.1.8 | ReplicaSet | agent1 | 0.01m | 9.62Mi | suricata-live-container:0 | suricata:23.10.0 | -netbox-deployment-cdcff4977-7ns2q | Running | 10.42.1.7 | ReplicaSet | agent1 | 830.47m | 530.7Mi | netbox-container:0 | netbox:23.10.0 | -zeek-offline-deployment-844f4865bd-7x68b | Running | 10.42.1.9 | ReplicaSet | agent1 | 1.44m | 43.66Mi | zeek-offline-container:0 | zeek:23.10.0 | -filebeat-deployment-6ff8bc444f-pdgzj | Running | 10.42.1.11 | ReplicaSet | agent1 | 0.78m | 75.25Mi | filebeat-container:0 | filebeat-oss:23.10.0 | -file-monitor-deployment-855646bd75-nbngq | Running | 10.42.1.4 | ReplicaSet | agent1 | 1.69m | 1.46Gi | file-monitor-container:0 | file-monitor:23.10.0 | -upload-deployment-586568844b-9s7f5 | Running | 10.42.1.13 | ReplicaSet | agent1 | 0.14m | 29.62Mi | upload-container:0 | file-upload:23.10.0 | -logstash-deployment-6fbc9fdcd5-2hhx8 | Running | 10.42.1.5 | ReplicaSet | agent1 | 3236.29m | 357.36Mi | logstash-container:0 | logstash-oss:23.10.0 | +netbox-redis-cache-deployment-5f77d47b8b-jr9nt | Running | 10.42.2.6 | ReplicaSet | agent2 | 1.89m | 7.24Mi | netbox-redis-cache-container:0 | redis:23.11.0 | +netbox-redis-deployment-5bcd8f6c96-bkzmh | Running | 10.42.2.5 | ReplicaSet | agent2 | 1.62m | 7.52Mi | netbox-redis-container:0 | redis:23.11.0 | +dashboards-helper-deployment-69dc54f6b6-ks7ps | Running | 10.42.2.4 | ReplicaSet | agent2 | 12.95m | 40.75Mi | dashboards-helper-container:0 | dashboards-helper:23.11.0 | +freq-deployment-cfd84fd97-5bwp6 | Running | 10.42.2.8 | ReplicaSet | agent2 | 0.11m | 26.33Mi | freq-container:0 | freq:23.11.0 | +pcap-capture-deployment-7c8bf6957-hkvkn | Running | 10.42.2.12 | ReplicaSet | agent2 | 0.02m | 9.21Mi | pcap-capture-container:0 | pcap-capture:23.11.0 | +nginx-proxy-deployment-69fcc4968d-m57rz | Running | 10.42.2.10 | ReplicaSet | agent2 | 0.91m | 22.72Mi | nginx-proxy-container:0 | nginx-proxy:23.11.0 | +htadmin-deployment-6fc46888b9-vpt7l | Running | 10.42.2.7 | ReplicaSet | agent2 | 0.16m | 30.21Mi | htadmin-container:0 | htadmin:23.11.0 | +opensearch-deployment-75498799f6-5v92w | Running | 10.42.2.13 | ReplicaSet | agent2 | 139.2m | 10.86Gi | opensearch-container:0 | opensearch:23.11.0 | +zeek-live-deployment-64b69d4b6f-fcb6n | Running | 10.42.2.9 | ReplicaSet | agent2 | 0.02m | 109.55Mi | zeek-live-container:0 | zeek:23.11.0 | +dashboards-deployment-69b5465db-kgsqk | Running | 10.42.2.3 | ReplicaSet | agent2 | 14.98m | 108.85Mi | dashboards-container:0 | dashboards:23.11.0 | +arkime-deployment-56999cdd66-xxpw9 | Running | 10.42.2.11 | ReplicaSet | agent2 | 208.95m | 78.42Mi | arkime-container:0 | arkime:23.11.0 | +api-deployment-6f4686cf59-xt9md | Running | 10.42.1.3 | ReplicaSet | agent1 | 0.14m | 56.88Mi | api-container:0 | api:23.11.0 | +netbox-postgres-deployment-5879b8dffc-lb4qm | Running | 10.42.1.6 | ReplicaSet | agent1 | 141.2m | 48.02Mi | netbox-postgres-container:0 | postgresql:23.11.0 | +pcap-monitor-deployment-594ff674c4-fwq7g | Running | 10.42.1.12 | ReplicaSet | agent1 | 3.93m | 46.44Mi | pcap-monitor-container:0 | pcap-monitor:23.11.0 | +suricata-offline-deployment-6ccdb89478-j5fgj | Running | 10.42.1.10 | ReplicaSet | agent1 | 10.42m | 35.12Mi | suricata-offline-container:0 | suricata:23.11.0 | +suricata-live-deployment-6494c77759-rpt48 | Running | 10.42.1.8 | ReplicaSet | agent1 | 0.01m | 9.62Mi | suricata-live-container:0 | suricata:23.11.0 | +netbox-deployment-cdcff4977-7ns2q | Running | 10.42.1.7 | ReplicaSet | agent1 | 830.47m | 530.7Mi | netbox-container:0 | netbox:23.11.0 | +zeek-offline-deployment-844f4865bd-7x68b | Running | 10.42.1.9 | ReplicaSet | agent1 | 1.44m | 43.66Mi | zeek-offline-container:0 | zeek:23.11.0 | +filebeat-deployment-6ff8bc444f-pdgzj | Running | 10.42.1.11 | ReplicaSet | agent1 | 0.78m | 75.25Mi | filebeat-container:0 | filebeat-oss:23.11.0 | +file-monitor-deployment-855646bd75-nbngq | Running | 10.42.1.4 | ReplicaSet | agent1 | 1.69m | 1.46Gi | file-monitor-container:0 | file-monitor:23.11.0 | +upload-deployment-586568844b-9s7f5 | Running | 10.42.1.13 | ReplicaSet | agent1 | 0.14m | 29.62Mi | upload-container:0 | file-upload:23.11.0 | +logstash-deployment-6fbc9fdcd5-2hhx8 | Running | 10.42.1.5 | ReplicaSet | agent1 | 3236.29m | 357.36Mi | logstash-container:0 | logstash-oss:23.11.0 | ``` View container logs for the Malcolm deployment with `./scripts/logs` (if **[stern](https://github.com/stern/stern)** present in `$PATH`): diff --git a/docs/malcolm-iso.md b/docs/malcolm-iso.md index a6db45fdd..858930ab3 100644 --- a/docs/malcolm-iso.md +++ b/docs/malcolm-iso.md @@ -41,7 +41,7 @@ Building the ISO may take 30 minutes or more depending on the system. As the bui ``` … -Finished, created "/malcolm-build/malcolm-iso/malcolm-23.10.0.iso" +Finished, created "/malcolm-build/malcolm-iso/malcolm-23.11.0.iso" … ``` diff --git a/docs/quickstart.md b/docs/quickstart.md index 73aebc4b1..7262ab8d4 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -54,25 +54,25 @@ You can then observe the images have been retrieved by running `docker images`: ``` $ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -ghcr.io/idaholab/malcolm/api 23.10.0 xxxxxxxxxxxx 3 days ago 158MB -ghcr.io/idaholab/malcolm/arkime 23.10.0 xxxxxxxxxxxx 3 days ago 816MB -ghcr.io/idaholab/malcolm/dashboards 23.10.0 xxxxxxxxxxxx 3 days ago 1.02GB -ghcr.io/idaholab/malcolm/dashboards-helper 23.10.0 xxxxxxxxxxxx 3 days ago 184MB -ghcr.io/idaholab/malcolm/file-monitor 23.10.0 xxxxxxxxxxxx 3 days ago 588MB -ghcr.io/idaholab/malcolm/file-upload 23.10.0 xxxxxxxxxxxx 3 days ago 259MB -ghcr.io/idaholab/malcolm/filebeat-oss 23.10.0 xxxxxxxxxxxx 3 days ago 624MB -ghcr.io/idaholab/malcolm/freq 23.10.0 xxxxxxxxxxxx 3 days ago 132MB -ghcr.io/idaholab/malcolm/htadmin 23.10.0 xxxxxxxxxxxx 3 days ago 242MB -ghcr.io/idaholab/malcolm/logstash-oss 23.10.0 xxxxxxxxxxxx 3 days ago 1.35GB -ghcr.io/idaholab/malcolm/netbox 23.10.0 xxxxxxxxxxxx 3 days ago 1.01GB -ghcr.io/idaholab/malcolm/nginx-proxy 23.10.0 xxxxxxxxxxxx 3 days ago 121MB -ghcr.io/idaholab/malcolm/opensearch 23.10.0 xxxxxxxxxxxx 3 days ago 1.17GB -ghcr.io/idaholab/malcolm/pcap-capture 23.10.0 xxxxxxxxxxxx 3 days ago 121MB -ghcr.io/idaholab/malcolm/pcap-monitor 23.10.0 xxxxxxxxxxxx 3 days ago 213MB -ghcr.io/idaholab/malcolm/postgresql 23.10.0 xxxxxxxxxxxx 3 days ago 268MB -ghcr.io/idaholab/malcolm/redis 23.10.0 xxxxxxxxxxxx 3 days ago 34.2MB -ghcr.io/idaholab/malcolm/suricata 23.10.0 xxxxxxxxxxxx 3 days ago 278MB -ghcr.io/idaholab/malcolm/zeek 23.10.0 xxxxxxxxxxxx 3 days ago 1GB +ghcr.io/idaholab/malcolm/api 23.11.0 xxxxxxxxxxxx 3 days ago 158MB +ghcr.io/idaholab/malcolm/arkime 23.11.0 xxxxxxxxxxxx 3 days ago 816MB +ghcr.io/idaholab/malcolm/dashboards 23.11.0 xxxxxxxxxxxx 3 days ago 1.02GB +ghcr.io/idaholab/malcolm/dashboards-helper 23.11.0 xxxxxxxxxxxx 3 days ago 184MB +ghcr.io/idaholab/malcolm/file-monitor 23.11.0 xxxxxxxxxxxx 3 days ago 588MB +ghcr.io/idaholab/malcolm/file-upload 23.11.0 xxxxxxxxxxxx 3 days ago 259MB +ghcr.io/idaholab/malcolm/filebeat-oss 23.11.0 xxxxxxxxxxxx 3 days ago 624MB +ghcr.io/idaholab/malcolm/freq 23.11.0 xxxxxxxxxxxx 3 days ago 132MB +ghcr.io/idaholab/malcolm/htadmin 23.11.0 xxxxxxxxxxxx 3 days ago 242MB +ghcr.io/idaholab/malcolm/logstash-oss 23.11.0 xxxxxxxxxxxx 3 days ago 1.35GB +ghcr.io/idaholab/malcolm/netbox 23.11.0 xxxxxxxxxxxx 3 days ago 1.01GB +ghcr.io/idaholab/malcolm/nginx-proxy 23.11.0 xxxxxxxxxxxx 3 days ago 121MB +ghcr.io/idaholab/malcolm/opensearch 23.11.0 xxxxxxxxxxxx 3 days ago 1.17GB +ghcr.io/idaholab/malcolm/pcap-capture 23.11.0 xxxxxxxxxxxx 3 days ago 121MB +ghcr.io/idaholab/malcolm/pcap-monitor 23.11.0 xxxxxxxxxxxx 3 days ago 213MB +ghcr.io/idaholab/malcolm/postgresql 23.11.0 xxxxxxxxxxxx 3 days ago 268MB +ghcr.io/idaholab/malcolm/redis 23.11.0 xxxxxxxxxxxx 3 days ago 34.2MB +ghcr.io/idaholab/malcolm/suricata 23.11.0 xxxxxxxxxxxx 3 days ago 278MB +ghcr.io/idaholab/malcolm/zeek 23.11.0 xxxxxxxxxxxx 3 days ago 1GB ``` ### Import from pre-packaged tarballs diff --git a/docs/ubuntu-install-example.md b/docs/ubuntu-install-example.md index b7bddc17d..c08ddc0d4 100644 --- a/docs/ubuntu-install-example.md +++ b/docs/ubuntu-install-example.md @@ -250,25 +250,25 @@ Pulling zeek ... done user@host:~/Malcolm$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -ghcr.io/idaholab/malcolm/api 23.10.0 xxxxxxxxxxxx 3 days ago 158MB -ghcr.io/idaholab/malcolm/arkime 23.10.0 xxxxxxxxxxxx 3 days ago 816MB -ghcr.io/idaholab/malcolm/dashboards 23.10.0 xxxxxxxxxxxx 3 days ago 1.02GB -ghcr.io/idaholab/malcolm/dashboards-helper 23.10.0 xxxxxxxxxxxx 3 days ago 184MB -ghcr.io/idaholab/malcolm/file-monitor 23.10.0 xxxxxxxxxxxx 3 days ago 588MB -ghcr.io/idaholab/malcolm/file-upload 23.10.0 xxxxxxxxxxxx 3 days ago 259MB -ghcr.io/idaholab/malcolm/filebeat-oss 23.10.0 xxxxxxxxxxxx 3 days ago 624MB -ghcr.io/idaholab/malcolm/freq 23.10.0 xxxxxxxxxxxx 3 days ago 132MB -ghcr.io/idaholab/malcolm/htadmin 23.10.0 xxxxxxxxxxxx 3 days ago 242MB -ghcr.io/idaholab/malcolm/logstash-oss 23.10.0 xxxxxxxxxxxx 3 days ago 1.35GB -ghcr.io/idaholab/malcolm/netbox 23.10.0 xxxxxxxxxxxx 3 days ago 1.01GB -ghcr.io/idaholab/malcolm/nginx-proxy 23.10.0 xxxxxxxxxxxx 3 days ago 121MB -ghcr.io/idaholab/malcolm/opensearch 23.10.0 xxxxxxxxxxxx 3 days ago 1.17GB -ghcr.io/idaholab/malcolm/pcap-capture 23.10.0 xxxxxxxxxxxx 3 days ago 121MB -ghcr.io/idaholab/malcolm/pcap-monitor 23.10.0 xxxxxxxxxxxx 3 days ago 213MB -ghcr.io/idaholab/malcolm/postgresql 23.10.0 xxxxxxxxxxxx 3 days ago 268MB -ghcr.io/idaholab/malcolm/redis 23.10.0 xxxxxxxxxxxx 3 days ago 34.2MB -ghcr.io/idaholab/malcolm/suricata 23.10.0 xxxxxxxxxxxx 3 days ago 278MB -ghcr.io/idaholab/malcolm/zeek 23.10.0 xxxxxxxxxxxx 3 days ago 1GB +ghcr.io/idaholab/malcolm/api 23.11.0 xxxxxxxxxxxx 3 days ago 158MB +ghcr.io/idaholab/malcolm/arkime 23.11.0 xxxxxxxxxxxx 3 days ago 816MB +ghcr.io/idaholab/malcolm/dashboards 23.11.0 xxxxxxxxxxxx 3 days ago 1.02GB +ghcr.io/idaholab/malcolm/dashboards-helper 23.11.0 xxxxxxxxxxxx 3 days ago 184MB +ghcr.io/idaholab/malcolm/file-monitor 23.11.0 xxxxxxxxxxxx 3 days ago 588MB +ghcr.io/idaholab/malcolm/file-upload 23.11.0 xxxxxxxxxxxx 3 days ago 259MB +ghcr.io/idaholab/malcolm/filebeat-oss 23.11.0 xxxxxxxxxxxx 3 days ago 624MB +ghcr.io/idaholab/malcolm/freq 23.11.0 xxxxxxxxxxxx 3 days ago 132MB +ghcr.io/idaholab/malcolm/htadmin 23.11.0 xxxxxxxxxxxx 3 days ago 242MB +ghcr.io/idaholab/malcolm/logstash-oss 23.11.0 xxxxxxxxxxxx 3 days ago 1.35GB +ghcr.io/idaholab/malcolm/netbox 23.11.0 xxxxxxxxxxxx 3 days ago 1.01GB +ghcr.io/idaholab/malcolm/nginx-proxy 23.11.0 xxxxxxxxxxxx 3 days ago 121MB +ghcr.io/idaholab/malcolm/opensearch 23.11.0 xxxxxxxxxxxx 3 days ago 1.17GB +ghcr.io/idaholab/malcolm/pcap-capture 23.11.0 xxxxxxxxxxxx 3 days ago 121MB +ghcr.io/idaholab/malcolm/pcap-monitor 23.11.0 xxxxxxxxxxxx 3 days ago 213MB +ghcr.io/idaholab/malcolm/postgresql 23.11.0 xxxxxxxxxxxx 3 days ago 268MB +ghcr.io/idaholab/malcolm/redis 23.11.0 xxxxxxxxxxxx 3 days ago 34.2MB +ghcr.io/idaholab/malcolm/suricata 23.11.0 xxxxxxxxxxxx 3 days ago 278MB +ghcr.io/idaholab/malcolm/zeek 23.11.0 xxxxxxxxxxxx 3 days ago 1GB ``` Finally, start Malcolm. When Malcolm starts it will stream informational and debug messages to the console until it has completed initializing. diff --git a/kubernetes/03-opensearch.yml b/kubernetes/03-opensearch.yml index eba6174b0..fbd2e3172 100644 --- a/kubernetes/03-opensearch.yml +++ b/kubernetes/03-opensearch.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: opensearch-container - image: ghcr.io/idaholab/malcolm/opensearch:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/opensearch:development imagePullPolicy: Always stdin: false tty: true @@ -69,7 +69,7 @@ spec: subPath: "opensearch" initContainers: - name: opensearch-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/04-dashboards.yml b/kubernetes/04-dashboards.yml index e39248616..cfbb8b422 100644 --- a/kubernetes/04-dashboards.yml +++ b/kubernetes/04-dashboards.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: dashboards-container - image: ghcr.io/idaholab/malcolm/dashboards:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dashboards:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/05-upload.yml b/kubernetes/05-upload.yml index b4f613ead..7631d405f 100644 --- a/kubernetes/05-upload.yml +++ b/kubernetes/05-upload.yml @@ -34,7 +34,7 @@ spec: spec: containers: - name: upload-container - image: ghcr.io/idaholab/malcolm/file-upload:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/file-upload:development imagePullPolicy: Always stdin: false tty: true @@ -73,7 +73,7 @@ spec: subPath: "upload" initContainers: - name: upload-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/06-pcap-monitor.yml b/kubernetes/06-pcap-monitor.yml index 7c2c734e3..70da6fc02 100644 --- a/kubernetes/06-pcap-monitor.yml +++ b/kubernetes/06-pcap-monitor.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: pcap-monitor-container - image: ghcr.io/idaholab/malcolm/pcap-monitor:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/pcap-monitor:development imagePullPolicy: Always stdin: false tty: true @@ -70,7 +70,7 @@ spec: name: pcap-monitor-zeek-volume initContainers: - name: pcap-monitor-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/07-arkime.yml b/kubernetes/07-arkime.yml index 42d6055f9..ec138d853 100644 --- a/kubernetes/07-arkime.yml +++ b/kubernetes/07-arkime.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: arkime-container - image: ghcr.io/idaholab/malcolm/arkime:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/arkime:development imagePullPolicy: Always stdin: false tty: true @@ -83,7 +83,7 @@ spec: subPath: "arkime" initContainers: - name: arkime-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/08-api.yml b/kubernetes/08-api.yml index d3144c138..dff8c4274 100644 --- a/kubernetes/08-api.yml +++ b/kubernetes/08-api.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: api-container - image: ghcr.io/idaholab/malcolm/api:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/api:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/09-dashboards-helper.yml b/kubernetes/09-dashboards-helper.yml index 0950ea0b4..3c1292517 100644 --- a/kubernetes/09-dashboards-helper.yml +++ b/kubernetes/09-dashboards-helper.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: dashboards-helper-container - image: ghcr.io/idaholab/malcolm/dashboards-helper:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dashboards-helper:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/10-zeek.yml b/kubernetes/10-zeek.yml index f40d920cc..3f02eb94e 100644 --- a/kubernetes/10-zeek.yml +++ b/kubernetes/10-zeek.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: zeek-offline-container - image: ghcr.io/idaholab/malcolm/zeek:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/zeek:development imagePullPolicy: Always stdin: false tty: true @@ -68,7 +68,7 @@ spec: subPath: "zeek/intel" initContainers: - name: zeek-offline-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/11-suricata.yml b/kubernetes/11-suricata.yml index cdeb592f3..5e31720b6 100644 --- a/kubernetes/11-suricata.yml +++ b/kubernetes/11-suricata.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: suricata-offline-container - image: ghcr.io/idaholab/malcolm/suricata:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/suricata:development imagePullPolicy: Always stdin: false tty: true @@ -61,7 +61,7 @@ spec: name: suricata-offline-custom-rules-volume initContainers: - name: suricata-offline-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/12-file-monitor.yml b/kubernetes/12-file-monitor.yml index c3dfedb56..9cf768a47 100644 --- a/kubernetes/12-file-monitor.yml +++ b/kubernetes/12-file-monitor.yml @@ -33,7 +33,7 @@ spec: spec: containers: - name: file-monitor-container - image: ghcr.io/idaholab/malcolm/file-monitor:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/file-monitor:development imagePullPolicy: Always stdin: false tty: true @@ -81,7 +81,7 @@ spec: name: file-monitor-yara-rules-custom-volume initContainers: - name: file-monitor-live-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/13-filebeat.yml b/kubernetes/13-filebeat.yml index 0f91ed6c9..da45a94d1 100644 --- a/kubernetes/13-filebeat.yml +++ b/kubernetes/13-filebeat.yml @@ -33,7 +33,7 @@ spec: spec: containers: - name: filebeat-container - image: ghcr.io/idaholab/malcolm/filebeat-oss:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/filebeat-oss:development imagePullPolicy: Always stdin: false tty: true @@ -83,7 +83,7 @@ spec: subPath: "nginx" initContainers: - name: filebeat-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/14-logstash.yml b/kubernetes/14-logstash.yml index 2f4c8529b..8f9029b76 100644 --- a/kubernetes/14-logstash.yml +++ b/kubernetes/14-logstash.yml @@ -49,7 +49,7 @@ spec: # topologyKey: "kubernetes.io/hostname" containers: - name: logstash-container - image: ghcr.io/idaholab/malcolm/logstash-oss:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/logstash-oss:development imagePullPolicy: Always stdin: false tty: true @@ -113,7 +113,7 @@ spec: subPath: "logstash" initContainers: - name: logstash-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/15-netbox-redis.yml b/kubernetes/15-netbox-redis.yml index bd32f74c3..922f54f1d 100644 --- a/kubernetes/15-netbox-redis.yml +++ b/kubernetes/15-netbox-redis.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: netbox-redis-container - image: ghcr.io/idaholab/malcolm/redis:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/redis:development imagePullPolicy: Always stdin: false tty: true @@ -83,7 +83,7 @@ spec: subPath: netbox/redis initContainers: - name: netbox-redis-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/16-netbox-redis-cache.yml b/kubernetes/16-netbox-redis-cache.yml index 84ed5d37c..0fef1bbf0 100644 --- a/kubernetes/16-netbox-redis-cache.yml +++ b/kubernetes/16-netbox-redis-cache.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: netbox-redis-cache-container - image: ghcr.io/idaholab/malcolm/redis:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/redis:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/17-netbox-postgres.yml b/kubernetes/17-netbox-postgres.yml index 6a1ad30a0..55a066358 100644 --- a/kubernetes/17-netbox-postgres.yml +++ b/kubernetes/17-netbox-postgres.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: netbox-postgres-container - image: ghcr.io/idaholab/malcolm/postgresql:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/postgresql:development imagePullPolicy: Always stdin: false tty: true @@ -74,7 +74,7 @@ spec: subPath: netbox/postgres initContainers: - name: netbox-postgres-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/18-netbox.yml b/kubernetes/18-netbox.yml index 2252e64a5..f81438018 100644 --- a/kubernetes/18-netbox.yml +++ b/kubernetes/18-netbox.yml @@ -36,7 +36,7 @@ spec: spec: containers: - name: netbox-container - image: ghcr.io/idaholab/malcolm/netbox:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/netbox:development imagePullPolicy: Always stdin: false tty: true @@ -88,7 +88,7 @@ spec: subPath: netbox/media initContainers: - name: netbox-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/19-htadmin.yml b/kubernetes/19-htadmin.yml index 88702af7b..de5293761 100644 --- a/kubernetes/19-htadmin.yml +++ b/kubernetes/19-htadmin.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: htadmin-container - image: ghcr.io/idaholab/malcolm/htadmin:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/htadmin:development imagePullPolicy: Always stdin: false tty: true @@ -63,7 +63,7 @@ spec: subPath: "htadmin" initContainers: - name: htadmin-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/20-pcap-capture.yml b/kubernetes/20-pcap-capture.yml index e6f2d01f1..275cffe99 100644 --- a/kubernetes/20-pcap-capture.yml +++ b/kubernetes/20-pcap-capture.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: pcap-capture-container - image: ghcr.io/idaholab/malcolm/pcap-capture:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/pcap-capture:development imagePullPolicy: Always stdin: false tty: true @@ -46,7 +46,7 @@ spec: subPath: "upload" initContainers: - name: pcap-capture-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/21-zeek-live.yml b/kubernetes/21-zeek-live.yml index 3cd7caa4e..e9651aa99 100644 --- a/kubernetes/21-zeek-live.yml +++ b/kubernetes/21-zeek-live.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: zeek-live-container - image: ghcr.io/idaholab/malcolm/zeek:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/zeek:development imagePullPolicy: Always stdin: false tty: true @@ -60,7 +60,7 @@ spec: subPath: "zeek/intel" initContainers: - name: zeek-live-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/22-suricata-live.yml b/kubernetes/22-suricata-live.yml index 19ae4f7e8..eade40dc2 100644 --- a/kubernetes/22-suricata-live.yml +++ b/kubernetes/22-suricata-live.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: suricata-live-container - image: ghcr.io/idaholab/malcolm/suricata:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/suricata:development imagePullPolicy: Always stdin: false tty: true @@ -51,7 +51,7 @@ spec: name: suricata-live-suricata-logs-volume initContainers: - name: suricata-live-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/23-freq.yml b/kubernetes/23-freq.yml index df89a5745..b9dc580df 100644 --- a/kubernetes/23-freq.yml +++ b/kubernetes/23-freq.yml @@ -30,7 +30,7 @@ spec: spec: containers: - name: freq-container - image: ghcr.io/idaholab/malcolm/freq:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/freq:development imagePullPolicy: Always stdin: false tty: true diff --git a/kubernetes/98-nginx-proxy.yml b/kubernetes/98-nginx-proxy.yml index 95ab75caa..94e7861e2 100644 --- a/kubernetes/98-nginx-proxy.yml +++ b/kubernetes/98-nginx-proxy.yml @@ -39,7 +39,7 @@ spec: spec: containers: - name: nginx-proxy-container - image: ghcr.io/idaholab/malcolm/nginx-proxy:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/nginx-proxy:development imagePullPolicy: Always stdin: false tty: true @@ -95,7 +95,7 @@ spec: subPath: "nginx" initContainers: - name: nginx-dirinit-container - image: ghcr.io/idaholab/malcolm/dirinit:v23.10.0 + image: ghcr.io/mmguero-dev/malcolm/dirinit:development imagePullPolicy: Always stdin: false tty: true diff --git a/scripts/third-party-environments/aws/ami/packer_vars.json.example b/scripts/third-party-environments/aws/ami/packer_vars.json.example index 2b1f0a3b2..aaa65ca30 100644 --- a/scripts/third-party-environments/aws/ami/packer_vars.json.example +++ b/scripts/third-party-environments/aws/ami/packer_vars.json.example @@ -2,7 +2,7 @@ "aws_access_key": "XXXXXXXXXXXXXXXXXXXX", "aws_secret_key": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", "instance_type": "t2.micro", - "malcolm_tag": "v23.10.0", + "malcolm_tag": "v23.11.0", "malcolm_repo": "idaholab/Malcolm", "malcolm_uid": "1000", "ssh_username": "ec2-user", From 308a1b0d8b595231ef8f0a31f52e8b71d017d001 Mon Sep 17 00:00:00 2001 From: SG Date: Thu, 26 Oct 2023 13:30:45 -0600 Subject: [PATCH 02/82] add option to auto-create catch-all netbox IPAM prefixes for private IP space (idaholab/Malcolm#279) --- Dockerfiles/netbox.Dockerfile | 2 + config/netbox-common.env.example | 2 + docs/kubernetes.md | 2 + docs/malcolm-hedgehog-e2e-iso-install.md | 2 + netbox/preload/prefixes_defaults.yml | 6 +++ netbox/preload/vrfs_defaults.yml | 6 +++ netbox/scripts/netbox_init.py | 56 ++++++++++++++++++------ netbox/supervisord.conf | 2 + scripts/install.py | 19 ++++++++ 9 files changed, 83 insertions(+), 14 deletions(-) create mode 100644 netbox/preload/prefixes_defaults.yml create mode 100644 netbox/preload/vrfs_defaults.yml diff --git a/Dockerfiles/netbox.Dockerfile b/Dockerfiles/netbox.Dockerfile index 8fbb0a628..4d5dbc7fb 100644 --- a/Dockerfiles/netbox.Dockerfile +++ b/Dockerfiles/netbox.Dockerfile @@ -39,6 +39,7 @@ ARG NETBOX_DEVICETYPE_LIBRARY_PATH="/opt/netbox-devicetype-library" ARG NETBOX_DEFAULT_SITE=Malcolm ARG NETBOX_CRON=true ARG NETBOX_PRELOAD_PATH="/opt/netbox-preload" +ARG NETBOX_PRELOAD_PREFIXES=false ENV NETBOX_PATH /opt/netbox ENV BASE_PATH netbox @@ -46,6 +47,7 @@ ENV NETBOX_DEVICETYPE_LIBRARY_PATH $NETBOX_DEVICETYPE_LIBRARY_PATH ENV NETBOX_DEFAULT_SITE $NETBOX_DEFAULT_SITE ENV NETBOX_CRON $NETBOX_CRON ENV NETBOX_PRELOAD_PATH $NETBOX_PRELOAD_PATH +ENV NETBOX_PRELOAD_PREFIXES $NETBOX_PRELOAD_PREFIXES ADD netbox/patch/* /tmp/netbox-patches/ diff --git a/config/netbox-common.env.example b/config/netbox-common.env.example index 882cc64ae..0caba9062 100644 --- a/config/netbox-common.env.example +++ b/config/netbox-common.env.example @@ -3,6 +3,8 @@ # The name of the default "site" to be created upon NetBox initialization, and to be queried # for enrichment (see LOGSTASH_NETBOX_ENRICHMENT) NETBOX_DEFAULT_SITE=Malcolm +# Whether or not to create catch-all VRFs/IP Prefixes for private IP space +NETBOX_PRELOAD_PREFIXES=false # Whether to disable Malcolm's NetBox instance ('true') or not ('false') NETBOX_DISABLED=true NETBOX_POSTGRES_DISABLED=true diff --git a/docs/kubernetes.md b/docs/kubernetes.md index 3f8c9d3c5..91294d6c1 100644 --- a/docs/kubernetes.md +++ b/docs/kubernetes.md @@ -430,6 +430,8 @@ Should Malcolm automatically populate NetBox inventory based on observed network Specify default NetBox site name: Malcolm +Should Malcolm create "catch-all" prefixes for private IP address space? (y / N): n + Enable dark mode for OpenSearch Dashboards? (Y / n): y Malcolm has been installed to /home/user/Malcolm. See README.md for more information. diff --git a/docs/malcolm-hedgehog-e2e-iso-install.md b/docs/malcolm-hedgehog-e2e-iso-install.md index 492d8fe1f..280698e90 100644 --- a/docs/malcolm-hedgehog-e2e-iso-install.md +++ b/docs/malcolm-hedgehog-e2e-iso-install.md @@ -255,6 +255,8 @@ The [configuration and tuning](malcolm-config.md#ConfigAndTuning) wizard's quest - Answer **Y** to [populate the NetBox inventory](asset-interaction-analysis.md#NetBoxPopPassive) based on observed network traffic. Autopopulation is **not** recommended: [manual inventory population](asset-interaction-analysis.md#NetBoxPopManual) is the preferred method to create an accurate representation of the intended network design. * **Specify default NetBox site name** - NetBox has the concept of [sites](https://demo.netbox.dev/static/docs/core-functionality/sites-and-racks/); this default site name will be used as a query parameter for these enrichment lookups. +* **Should Malcolm create "catch-all" prefixes for private IP address space?** + - Answer **Y** to automatically create "catch-all" NetBox prefixes for private IP address space (i.e., one each for `10.0.0.0/8`, `172.16.0.0/12`, and `192.168.0.0/16`, respectively). This is not recommended for networks with more than one subnet. * **Should Malcolm capture live network traffic?** - Malcolm itself can perform [live analysis](live-analysis.md#LocalPCAP) of traffic it sees on another network interface (ideally not the same one used for its management). Answer **no** to this question in installations where Hedgehog Linux will be handling all network traffic capture. If users want Malcolm to observe and capture traffic instead of, or in addition to, a sensor running Hedgehog Linux, they should answer **yes** enable life traffic analysis using default settings, or select **customize** to proceed to answer the following related questions individually. - **Should Malcolm capture live network traffic to PCAP files for analysis with Arkime?** diff --git a/netbox/preload/prefixes_defaults.yml b/netbox/preload/prefixes_defaults.yml new file mode 100644 index 000000000..0e7d4c736 --- /dev/null +++ b/netbox/preload/prefixes_defaults.yml @@ -0,0 +1,6 @@ +- prefix: 10.0.0.0/8 + vrf: Private IP Space (10.0.0.0/8) +- prefix: 172.16.0.0/12 + vrf: Private IP Space (172.16.0.0/12) +- prefix: 10.0.0.0/8 + vrf: Private IP Space (192.168.0.0/16) diff --git a/netbox/preload/vrfs_defaults.yml b/netbox/preload/vrfs_defaults.yml new file mode 100644 index 000000000..866439507 --- /dev/null +++ b/netbox/preload/vrfs_defaults.yml @@ -0,0 +1,6 @@ +- enforce_unique: true + name: Private IP Space (10.0.0.0/8) +- enforce_unique: true + name: Private IP Space (172.16.0.0/12) +- enforce_unique: true + name: Private IP Space (192.168.0.0/16) diff --git a/netbox/scripts/netbox_init.py b/netbox/scripts/netbox_init.py index 0e2f500fe..a41063f9b 100755 --- a/netbox/scripts/netbox_init.py +++ b/netbox/scripts/netbox_init.py @@ -12,11 +12,14 @@ import pynetbox import randomcolor import re +import shutil import sys +import tempfile import time import malcolm_utils from collections.abc import Iterable +from distutils.dir_util import copy_tree from datetime import datetime from slugify import slugify from netbox_library_import import import_library @@ -238,6 +241,16 @@ def main(): required=False, help="Directory containing netbox-initializers files to preload", ) + parser.add_argument( + '--preload-prefixes', + dest='preloadPrefixes', + type=malcolm_utils.str2bool, + metavar="true|false", + nargs='?', + const=True, + default=malcolm_utils.str2bool(os.getenv('NETBOX_PRELOAD_PREFIXES', default='False')), + help="Preload IPAM VRFs/IP Prefixes for private IP space", + ) try: parser.error = parser.exit args = parser.parse_args() @@ -642,20 +655,35 @@ def main(): if os.path.isfile(netboxVenvPy) and os.path.isfile(manageScript) and os.path.isdir(args.preloadDir): try: with malcolm_utils.pushd(os.path.dirname(manageScript)): - retcode, output = malcolm_utils.run_process( - [ - netboxVenvPy, - os.path.basename(manageScript), - "load_initializer_data", - "--path", - args.preloadDir, - ], - logger=logging, - ) - if retcode == 0: - logging.debug(f"netbox-initializers: {retcode} {output}") - else: - logging.error(f"Error processing netbox-initializers: {retcode} {output}") + # make a local copy of the YMLs to preload + with tempfile.TemporaryDirectory() as tmpPreloadDir: + copy_tree(args.preloadDir, tmpPreloadDir) + + # only preload catch-all VRFs and IP Prefixes if explicitly specified and they don't already exist + if args.preloadPrefixes: + for loadType in ('vrfs', 'prefixes'): + defaultFileName = os.path.join(tmpPreloadDir, f'{loadType}_defaults.yml') + loadFileName = os.path.join(tmpPreloadDir, f'{loadType}.yml') + if os.path.isfile(defaultFileName) and (not os.path.isfile(loadFileName)): + try: + shutil.copyfile(defaultFileName, loadFileName) + except Exception: + pass + + retcode, output = malcolm_utils.run_process( + [ + netboxVenvPy, + os.path.basename(manageScript), + "load_initializer_data", + "--path", + tmpPreloadDir, + ], + logger=logging, + ) + if retcode == 0: + logging.debug(f"netbox-initializers: {retcode} {output}") + else: + logging.error(f"Error processing netbox-initializers: {retcode} {output}") except Exception as e: logging.error(f"{type(e).__name__} processing netbox-initializers: {e}") diff --git a/netbox/supervisord.conf b/netbox/supervisord.conf index 44e997720..8fe938f56 100644 --- a/netbox/supervisord.conf +++ b/netbox/supervisord.conf @@ -39,6 +39,8 @@ command=/opt/netbox/venv/bin/python /usr/local/bin/netbox_init.py --token "%(ENV_SUPERUSER_API_TOKEN)s" --net-map /usr/local/share/net-map.json --library "%(ENV_NETBOX_DEVICETYPE_LIBRARY_PATH)s" + --preload "%(ENV_NETBOX_PRELOAD_PATH)s" + --preload-prefixes %(ENV_NETBOX_PRELOAD_PREFIXES)s autostart=true autorestart=false startsecs=0 diff --git a/scripts/install.py b/scripts/install.py index 09285cef0..ad3b6b022 100755 --- a/scripts/install.py +++ b/scripts/install.py @@ -1263,6 +1263,10 @@ def tweak_malcolm_runtime(self, malcolm_install_path): ) if len(netboxSiteName) == 0: netboxSiteName = 'Malcolm' + netboxPreloadPrefixes = netboxEnabled and InstallerYesOrNo( + 'Should Malcolm create "catch-all" prefixes for private IP address space?', + default=args.netboxPreloadPrefixes, + ) # input packet capture parameters pcapNetSniff = False @@ -1511,6 +1515,11 @@ def tweak_malcolm_runtime(self, malcolm_install_path): 'NETBOX_DISABLED', TrueOrFalseNoQuote(not netboxEnabled), ), + EnvValue( + os.path.join(args.configDir, 'netbox-common.env'), + 'NETBOX_PRELOAD_PREFIXES', + TrueOrFalseNoQuote(netboxPreloadPrefixes), + ), # enable/disable netbox (postgres) EnvValue( os.path.join(args.configDir, 'netbox-common.env'), @@ -3671,6 +3680,16 @@ def main(): default=False, help="Automatically populate NetBox inventory based on observed network traffic", ) + netboxArgGroup.add_argument( + '--netbox-preload-prefixes', + dest='netboxPreloadPrefixes', + type=str2bool, + metavar="true|false", + nargs='?', + const=True, + default=False, + help="Preload NetBox IPAM VRFs/IP Prefixes for private IP space", + ) netboxArgGroup.add_argument( '--netbox-site-name', dest='netboxSiteName', From e20983ca8b8cadea24cfc6fdecc500f3a637885e Mon Sep 17 00:00:00 2001 From: SG Date: Thu, 26 Oct 2023 13:42:23 -0600 Subject: [PATCH 03/82] add option to auto-create catch-all netbox IPAM prefixes for private IP space (idaholab/Malcolm#279) --- netbox/preload/prefixes_defaults.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/netbox/preload/prefixes_defaults.yml b/netbox/preload/prefixes_defaults.yml index 0e7d4c736..dde5c6cb8 100644 --- a/netbox/preload/prefixes_defaults.yml +++ b/netbox/preload/prefixes_defaults.yml @@ -2,5 +2,5 @@ vrf: Private IP Space (10.0.0.0/8) - prefix: 172.16.0.0/12 vrf: Private IP Space (172.16.0.0/12) -- prefix: 10.0.0.0/8 +- prefix: 192.168.0.0/16 vrf: Private IP Space (192.168.0.0/16) From edd49a142e59e010d5b6241c20be83d5a5bccc2f Mon Sep 17 00:00:00 2001 From: SG Date: Thu, 26 Oct 2023 13:51:30 -0600 Subject: [PATCH 04/82] reduce verbosity of netbox --- scripts/malcolm_common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/malcolm_common.py b/scripts/malcolm_common.py index ec7b7e45d..552557eaf 100644 --- a/scripts/malcolm_common.py +++ b/scripts/malcolm_common.py @@ -700,8 +700,9 @@ def DownloadToFile(url, local_filename, debug=False): | esindices/list | executing\s+attempt_(transition|set_replica_count)\s+for | failed\s+to\s+get\s+tcp6?\s+stats\s+from\s+/proc - | GET\s+/(netbox/api|_cat/health|api/status|sessions2-|arkime_\w+).+HTTP/[\d\.].+\b200\b + | GET\s+/(_cat/health|api/status|sessions2-|arkime_\w+).+HTTP/[\d\.].+\b200\b | GET\s+/\s+.+\b200\b.+ELB-HealthChecker + | (GET|POST|PATCH)\s+/netbox/.+HTTP/[\d\.].+\b20[01]\b | loaded\s+config\s+'/etc/netbox/config/ | LOG:\s+checkpoint\s+(complete|starting) | "netbox"\s+application\s+started @@ -711,7 +712,6 @@ def DownloadToFile(url, local_filename, debug=False): | POST\s+/_bulk\s+HTTP/[\d\.].+\b20[01]\b | POST\s+/server/php/\s+HTTP/\d+\.\d+"\s+\d+\s+\d+.*:8443/ | POST\s+HTTP/[\d\.].+\b200\b - | (POST|PATCH)\s+/netbox/api/.+HTTP/[\d\.].+\b20[01]\b | reaped\s+unknown\s+pid | redis.*(changes.+seconds.+Saving|Background\s+saving\s+(started|terminated)|DB\s+saved\s+on\s+disk|Fork\s+CoW) | remov(ed|ing)\s+(old\s+file|dead\s+symlink|empty\s+directory) From 94e8f76cdebcb52c0d1ab448d09acbd2cd070ffd Mon Sep 17 00:00:00 2001 From: SG Date: Thu, 26 Oct 2023 13:54:00 -0600 Subject: [PATCH 05/82] add option to auto-create catch-all netbox IPAM prefixes for private IP space (idaholab/Malcolm#279) --- netbox/preload/prefixes_defaults.yml | 6 +++--- netbox/preload/vrfs_defaults.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/netbox/preload/prefixes_defaults.yml b/netbox/preload/prefixes_defaults.yml index dde5c6cb8..6e9fe981f 100644 --- a/netbox/preload/prefixes_defaults.yml +++ b/netbox/preload/prefixes_defaults.yml @@ -1,6 +1,6 @@ - prefix: 10.0.0.0/8 - vrf: Private IP Space (10.0.0.0/8) + vrf: 10.0.0.0/8 - prefix: 172.16.0.0/12 - vrf: Private IP Space (172.16.0.0/12) + vrf: 172.16.0.0/12 - prefix: 192.168.0.0/16 - vrf: Private IP Space (192.168.0.0/16) + vrf: 192.168.0.0/16 diff --git a/netbox/preload/vrfs_defaults.yml b/netbox/preload/vrfs_defaults.yml index 866439507..d83018c72 100644 --- a/netbox/preload/vrfs_defaults.yml +++ b/netbox/preload/vrfs_defaults.yml @@ -1,6 +1,6 @@ - enforce_unique: true - name: Private IP Space (10.0.0.0/8) + name: 10.0.0.0/8 - enforce_unique: true - name: Private IP Space (172.16.0.0/12) + name: 172.16.0.0/12 - enforce_unique: true - name: Private IP Space (192.168.0.0/16) + name: 192.168.0.0/16 From cab66bdc1318b5a11f89208f4dd7479032b9cb80 Mon Sep 17 00:00:00 2001 From: SG Date: Thu, 26 Oct 2023 15:20:56 -0600 Subject: [PATCH 06/82] address issues with NetBox database and Logstash's NetBox cache (idaholab/Malcolm#259); work in progress, almost certainly broken in this state --- Dockerfiles/logstash.Dockerfile | 5 +- logstash/ruby/netbox_enrich.rb | 972 ++++++++++++++++---------------- 2 files changed, 499 insertions(+), 478 deletions(-) diff --git a/Dockerfiles/logstash.Dockerfile b/Dockerfiles/logstash.Dockerfile index 80a7fb50a..67e1ed0c4 100644 --- a/Dockerfiles/logstash.Dockerfile +++ b/Dockerfiles/logstash.Dockerfile @@ -63,11 +63,12 @@ RUN set -x && \ pip3 install ipaddress supervisor manuf pyyaml && \ export JAVA_HOME=/usr/share/logstash/jdk && \ /usr/share/logstash/vendor/jruby/bin/jruby -S gem install bundler && \ - echo "gem 'lru_cache'" >> /usr/share/logstash/Gemfile && \ + echo "gem 'concurrent-ruby'" >> /usr/share/logstash/Gemfile && \ echo "gem 'deep_merge'" >> /usr/share/logstash/Gemfile && \ echo "gem 'fuzzy-string-match'" >> /usr/share/logstash/Gemfile && \ - echo "gem 'stringex'" >> /usr/share/logstash/Gemfile && \ + echo "gem 'lru_cache'" >> /usr/share/logstash/Gemfile && \ echo "gem 'psych'" >> /usr/share/logstash/Gemfile && \ + echo "gem 'stringex'" >> /usr/share/logstash/Gemfile && \ /usr/share/logstash/bin/ruby -S bundle install && \ logstash-plugin install --preserve logstash-filter-translate logstash-filter-cidr logstash-filter-dns \ logstash-filter-json logstash-filter-prune logstash-filter-http \ diff --git a/logstash/ruby/netbox_enrich.rb b/logstash/ruby/netbox_enrich.rb index 6de233590..117f88094 100644 --- a/logstash/ruby/netbox_enrich.rb +++ b/logstash/ruby/netbox_enrich.rb @@ -3,6 +3,7 @@ def concurrency end def register(params) + require 'concurrent' require 'date' require 'faraday' require 'fuzzystringmatch' @@ -45,10 +46,6 @@ def register(params) # API parameters @page_size = params.fetch("page_size", 50) - # caching parameters - @cache_size = params.fetch("cache_size", 1000) - @cache_ttl = params.fetch("cache_ttl", 600) - # target field to store looked-up value @target = params["target"] @@ -73,6 +70,13 @@ def register(params) @netbox_url = params.fetch("netbox_url", "http://netbox:8080/netbox/api").delete_suffix("/") @netbox_url_suffix = "/netbox/api" @netbox_url_base = @netbox_url.delete_suffix(@netbox_url_suffix) + @netbox_headers = { 'Content-Type': 'application/json' } + + # netbox connection (will be initialized in a thread-safe manner in filter) + @netbox_conn = nil + @netbox_conn_lock = Concurrent::ReentrantReadWriteLock.new + @netbox_conn_needs_reset = false + @netbox_conn_resetting = false # connection token (either specified directly or read from ENV via netbox_token_env) @netbox_token = params["netbox_token"] @@ -81,9 +85,6 @@ def register(params) @netbox_token = ENV[_netbox_token_env] end - # hash of lookup types (from @lookup_type), each of which contains the respective looked-up values - @cache_hash = LruRedux::ThreadSafeCache.new(params.fetch("lookup_cache_size", 512)) - # these are used for autopopulation only, not lookup/enrichment # autopopulate - either specified directly or read from ENV via autopopulate_env @@ -201,7 +202,6 @@ def filter(event) _url = @netbox_url _url_base = @netbox_url_base _url_suffix = @netbox_url_suffix - _token = @netbox_token _page_size = @page_size _verbose = @verbose _lookup_type = @lookup_type @@ -218,494 +218,514 @@ def filter(event) _autopopulate_mac = event.get("#{@source_mac}") _autopopulate_oui = event.get("#{@source_oui}") - _result = @cache_hash.getset(_lookup_type){ - LruRedux::TTL::ThreadSafeCache.new(@cache_size, @cache_ttl) - }.getset(_key){ - - _nb = Faraday.new(_url) do |conn| - conn.request :authorization, 'Token', _token - conn.request :url_encoded - conn.response :json, :parser_options => { :symbolize_names => true } - end - _nb_headers = { 'Content-Type': 'application/json' } - - _lookup_result = nil - _autopopulate_device = nil - _autopopulate_role = nil - _autopopulate_dtype = nil - _autopopulate_interface = nil - _autopopulate_ip = nil - _autopopulate_manuf = nil - _autopopulate_site = nil - _vrfs = nil - _devices = nil - _exception_error = false - - # handle :ip_device first, because if we're doing autopopulate we're also going to use - # some of the logic from :ip_vrf - - if (_lookup_type == :ip_device) - ################################################################################# - # retrieve the list of IP addresses where address matches the search key, limited to "assigned" addresses. - # then, for those IP addresses, search for devices pertaining to the interfaces assigned to each - # IP address (e.g., ipam.ip_address -> dcim.interface -> dcim.device, or - # ipam.ip_address -> virtualization.interface -> virtualization.virtual_machine) - _devices = Array.new - _query = { :address => _key, - :offset => 0, - :limit => _page_size } - begin + _result = nil + _autopopulate_device = nil + _autopopulate_role = nil + _autopopulate_dtype = nil + _autopopulate_interface = nil + _autopopulate_ip = nil + _autopopulate_manuf = nil + _autopopulate_site = nil + _vrfs = nil + _devices = nil + _exception_error_general = false + _exception_error_connection = false + + @netbox_conn_lock.with_read_lock { + + # make sure the connection to the NetBox API exists and wasn't flagged for reconnect + if @netbox_conn.nil? || @netbox_conn_needs_reset + @netbox_conn_lock.with_write_lock { + if @netbox_conn.nil? || @netbox_conn_needs_reset + begin + # we need to reconnect to the NetBox API + @netbox_conn_resetting = true + @netbox_conn = Faraday.new(_url) do |conn| + conn.request :authorization, 'Token', @netbox_token + conn.request :url_encoded + conn.response :json, :parser_options => { :symbolize_names => true } + end + ensure + @netbox_conn_resetting = false + @netbox_conn_needs_reset = @netbox_conn.nil? + end + end # connection check in write lock + } # @netbox_conn_lock.with_write_lock + end # connection check in read lock + + # handle :ip_device first, because if we're doing autopopulate we're also going to use + # some of the logic from :ip_vrf + + if (_lookup_type == :ip_device) + ################################################################################# + # retrieve the list of IP addresses where address matches the search key, limited to "assigned" addresses. + # then, for those IP addresses, search for devices pertaining to the interfaces assigned to each + # IP address (e.g., ipam.ip_address -> dcim.interface -> dcim.device, or + # ipam.ip_address -> virtualization.interface -> virtualization.virtual_machine) + _devices = Array.new + _query = { :address => _key, + :offset => 0, + :limit => _page_size } + begin + while true do + if (_ip_addresses_response = @netbox_conn.get('ipam/ip-addresses/', _query).body) && + _ip_addresses_response.is_a?(Hash) + then + _tmp_ip_addresses = _ip_addresses_response.fetch(:results, []) + _tmp_ip_addresses.each do |i| + _is_device = nil + if (_obj = i.fetch(:assigned_object, nil)) && + ((_device_obj = _obj.fetch(:device, nil)) || + (_virtualized_obj = _obj.fetch(:virtual_machine, nil))) + then + _is_device = !_device_obj.nil? + _device = _is_device ? _device_obj : _virtualized_obj + # if we can, follow the :assigned_object's "full" device URL to get more information + _device = (_device.has_key?(:url) && (_full_device = @netbox_conn.get(_device[:url].delete_prefix(_url_base).delete_prefix(_url_suffix).delete_prefix("/")).body)) ? _full_device : _device + _device_id = _device.fetch(:id, nil) + _device_site = ((_site = _device.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil) + next unless (_device_site.to_s.downcase == _lookup_site.to_s.downcase) || _lookup_site.nil? || _lookup_site.empty? || _device_site.nil? || _device_site.empty? + # look up service if requested (based on device/vm found and service port) + if (_lookup_service_port > 0) + _services = Array.new + _service_query = { (_is_device ? :device_id : :virtual_machine_id) => _device_id, :port => _lookup_service_port, :offset => 0, :limit => _page_size } while true do - if (_ip_addresses_response = _nb.get('ipam/ip-addresses/', _query).body) && - _ip_addresses_response.is_a?(Hash) + if (_services_response = @netbox_conn.get('ipam/services/', _service_query).body) && + _services_response.is_a?(Hash) then - _tmp_ip_addresses = _ip_addresses_response.fetch(:results, []) - _tmp_ip_addresses.each do |i| - _is_device = nil - if (_obj = i.fetch(:assigned_object, nil)) && - ((_device_obj = _obj.fetch(:device, nil)) || - (_virtualized_obj = _obj.fetch(:virtual_machine, nil))) - then - _is_device = !_device_obj.nil? - _device = _is_device ? _device_obj : _virtualized_obj - # if we can, follow the :assigned_object's "full" device URL to get more information - _device = (_device.has_key?(:url) && (_full_device = _nb.get(_device[:url].delete_prefix(_url_base).delete_prefix(_url_suffix).delete_prefix("/")).body)) ? _full_device : _device - _device_id = _device.fetch(:id, nil) - _device_site = ((_site = _device.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil) - next unless (_device_site.to_s.downcase == _lookup_site.to_s.downcase) || _lookup_site.nil? || _lookup_site.empty? || _device_site.nil? || _device_site.empty? - # look up service if requested (based on device/vm found and service port) - if (_lookup_service_port > 0) - _services = Array.new - _service_query = { (_is_device ? :device_id : :virtual_machine_id) => _device_id, :port => _lookup_service_port, :offset => 0, :limit => _page_size } - while true do - if (_services_response = _nb.get('ipam/services/', _service_query).body) && - _services_response.is_a?(Hash) - then - _tmp_services = _services_response.fetch(:results, []) - _services.unshift(*_tmp_services) unless _tmp_services.nil? || _tmp_services.empty? - _service_query[:offset] += _tmp_services.length() - break unless (_tmp_services.length() >= _page_size) - else - break - end - end - _device[:service] = _services - end - # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } - # if _verbose, include entire object as :details - _devices << { :name => _device.fetch(:name, _device.fetch(:display, nil)), - :id => _device_id, - :url => _device.fetch(:url, nil), - :service => _device.fetch(:service, []).map {|s| s.fetch(:name, s.fetch(:display, nil)) }, - :site => _device_site, - :role => ((_role = _device.fetch(:role, nil)) && _role&.has_key?(:name)) ? _role[:name] : _role&.fetch(:display, nil), - :cluster => ((_cluster = _device.fetch(:cluster, nil)) && _cluster&.has_key?(:name)) ? _cluster[:name] : _cluster&.fetch(:display, nil), - :device_type => ((_dtype = _device.fetch(:device_type, nil)) && _dtype&.has_key?(:name)) ? _dtype[:name] : _dtype&.fetch(:display, nil), - :manufacturer => ((_manuf = _device.dig(:device_type, :manufacturer)) && _manuf&.has_key?(:name)) ? _manuf[:name] : _manuf&.fetch(:display, nil), - :details => _verbose ? _device : nil } - end - end - _query[:offset] += _tmp_ip_addresses.length() - break unless (_tmp_ip_addresses.length() >= _page_size) + _tmp_services = _services_response.fetch(:results, []) + _services.unshift(*_tmp_services) unless _tmp_services.nil? || _tmp_services.empty? + _service_query[:offset] += _tmp_services.length() + break unless (_tmp_services.length() >= _page_size) else - # weird/bad response, bail - _exception_error = true break end - end # while true - rescue Faraday::Error - # give up aka do nothing - _exception_error = true + end + _device[:service] = _services end - - if _autopopulate && (_query[:offset] == 0) && !_exception_error && _key_ip&.private? - - # no results found, autopopulate enabled, private-space IP address... - # let's create an entry for this device - - # if MAC is set but OUI is not, do a quick lookup - if (!_autopopulate_mac.nil? && !_autopopulate_mac.empty?) && - (_autopopulate_oui.nil? || _autopopulate_oui.empty?) + # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } + # if _verbose, include entire object as :details + _devices << { :name => _device.fetch(:name, _device.fetch(:display, nil)), + :id => _device_id, + :url => _device.fetch(:url, nil), + :service => _device.fetch(:service, []).map {|s| s.fetch(:name, s.fetch(:display, nil)) }, + :site => _device_site, + :role => ((_role = _device.fetch(:role, nil)) && _role&.has_key?(:name)) ? _role[:name] : _role&.fetch(:display, nil), + :cluster => ((_cluster = _device.fetch(:cluster, nil)) && _cluster&.has_key?(:name)) ? _cluster[:name] : _cluster&.fetch(:display, nil), + :device_type => ((_dtype = _device.fetch(:device_type, nil)) && _dtype&.has_key?(:name)) ? _dtype[:name] : _dtype&.fetch(:display, nil), + :manufacturer => ((_manuf = _device.dig(:device_type, :manufacturer)) && _manuf&.has_key?(:name)) ? _manuf[:name] : _manuf&.fetch(:display, nil), + :details => _verbose ? _device : nil } + end + end + _query[:offset] += _tmp_ip_addresses.length() + break unless (_tmp_ip_addresses.length() >= _page_size) + else + # weird/bad response, bail + _exception_error_general = true + break + end + end # while true + rescue Faraday::Error + # give up aka do nothing + _exception_error_general = true + end + + if _autopopulate && (_query[:offset] == 0) && !_exception_error_general && !_exception_error_connection && _key_ip&.private? + + # no results found, autopopulate enabled, private-space IP address... + # let's create an entry for this device + + # if MAC is set but OUI is not, do a quick lookup + if (!_autopopulate_mac.nil? && !_autopopulate_mac.empty?) && + (_autopopulate_oui.nil? || _autopopulate_oui.empty?) + then + case _autopopulate_mac + when String + if @macregex.match?(_autopopulate_mac) + _macint = mac_string_to_integer(_autopopulate_mac) + _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} + _autopopulate_oui = _vendor[2] unless _vendor.nil? + end # _autopopulate_mac matches @macregex + when Array + _autopopulate_mac.each do |_addr| + if @macregex.match?(_addr) + _macint = mac_string_to_integer(_addr) + _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} + if !_vendor.nil? + _autopopulate_oui = _vendor[2] + break + end # !_vendor.nil? + end # _addr matches @macregex + end # _autopopulate_mac.each do + end # case statement _autopopulate_mac String vs. Array + end # MAC is populated but OUI is not + + # match/look up manufacturer based on OUI + if !_autopopulate_oui.nil? && !_autopopulate_oui.empty? + + _autopopulate_oui = _autopopulate_oui.first() unless !_autopopulate_oui.is_a?(Array) + + # does it look like a VM or a regular device? + if @vm_namesarray.include?(_autopopulate_oui.downcase) + # looks like this is probably a virtual machine + _autopopulate_manuf = { :name => _autopopulate_oui, + :match => 1.0, + :vm => true, + :id => nil } + + else + # looks like this is not a virtual machine (or we can't tell) so assume its' a regular device + _autopopulate_manuf = @manuf_hash.getset(_autopopulate_oui) { + _fuzzy_matcher = FuzzyStringMatch::JaroWinkler.create( :pure ) + _manufs = Array.new + # fetch the manufacturers to do the comparison. this is a lot of work + # and not terribly fast but once the hash it populated it shouldn't happen too often + _query = { :offset => 0, + :limit => _page_size } + begin + while true do + if (_manufs_response = @netbox_conn.get('dcim/manufacturers/', _query).body) && + _manufs_response.is_a?(Hash) then - case _autopopulate_mac - when String - if @macregex.match?(_autopopulate_mac) - _macint = mac_string_to_integer(_autopopulate_mac) - _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} - _autopopulate_oui = _vendor[2] unless _vendor.nil? - end # _autopopulate_mac matches @macregex - when Array - _autopopulate_mac.each do |_addr| - if @macregex.match?(_addr) - _macint = mac_string_to_integer(_addr) - _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} - if !_vendor.nil? - _autopopulate_oui = _vendor[2] - break - end # !_vendor.nil? - end # _addr matches @macregex - end # _autopopulate_mac.each do - end # case statement _autopopulate_mac String vs. Array - end # MAC is populated but OUI is not - - # match/look up manufacturer based on OUI - if !_autopopulate_oui.nil? && !_autopopulate_oui.empty? - - _autopopulate_oui = _autopopulate_oui.first() unless !_autopopulate_oui.is_a?(Array) - - # does it look like a VM or a regular device? - if @vm_namesarray.include?(_autopopulate_oui.downcase) - # looks like this is probably a virtual machine - _autopopulate_manuf = { :name => _autopopulate_oui, - :match => 1.0, - :vm => true, - :id => nil } - - else - # looks like this is not a virtual machine (or we can't tell) so assume its' a regular device - _autopopulate_manuf = @manuf_hash.getset(_autopopulate_oui) { - _fuzzy_matcher = FuzzyStringMatch::JaroWinkler.create( :pure ) - _manufs = Array.new - # fetch the manufacturers to do the comparison. this is a lot of work - # and not terribly fast but once the hash it populated it shouldn't happen too often - _query = { :offset => 0, - :limit => _page_size } - begin - while true do - if (_manufs_response = _nb.get('dcim/manufacturers/', _query).body) && - _manufs_response.is_a?(Hash) - then - _tmp_manufs = _manufs_response.fetch(:results, []) - _tmp_manufs.each do |_manuf| - _tmp_name = _manuf.fetch(:name, _manuf.fetch(:display, nil)) - _manufs << { :name => _tmp_name, - :id => _manuf.fetch(:id, nil), - :url => _manuf.fetch(:url, nil), - :match => _fuzzy_matcher.getDistance(_tmp_name.to_s.downcase, _autopopulate_oui.to_s.downcase), - :vm => false - } - end - _query[:offset] += _tmp_manufs.length() - break unless (_tmp_manufs.length() >= _page_size) - else - break - end - end - rescue Faraday::Error - # give up aka do nothing - _exception_error = true - end - # return the manuf with the highest match - !_manufs&.empty? ? _manufs.max_by{|k| k[:match] } : nil - } - end # virtual machine vs. regular device - end # _autopopulate_oui specified - - if !_autopopulate_manuf.is_a?(Hash) - # no match was found at ANY match level (empty database or no OUI specified), set default ("unspecified") manufacturer - _autopopulate_manuf = { :name => _autopopulate_create_manuf ? _autopopulate_oui : _autopopulate_default_manuf, - :match => 0.0, - :vm => false, - :id => nil} - end - - # make sure the site and role exists - - _autopopulate_site = @site_hash.getset(_autopopulate_default_site) { - begin - _site = nil - - # look it up first - _query = { :offset => 0, - :limit => 1, - :name => _autopopulate_default_site } - if (_sites_response = _nb.get('dcim/sites/', _query).body) && - _sites_response.is_a?(Hash) && - (_tmp_sites = _sites_response.fetch(:results, [])) && - (_tmp_sites.length() > 0) - then - _site = _tmp_sites.first - end - - if _site.nil? - # the device site is not found, create it - _site_data = { :name => _autopopulate_default_site, - :slug => _autopopulate_default_site.to_url, - :status => "active" } - if (_site_create_response = _nb.post('dcim/sites/', _site_data.to_json, _nb_headers).body) && - _site_create_response.is_a?(Hash) && - _site_create_response.has_key?(:id) - then - _site = _site_create_response - end - end - - rescue Faraday::Error - # give up aka do nothing - _exception_error = true - end - _site - } - - _autopopulate_role = @role_hash.getset(_autopopulate_default_role) { - begin - _role = nil - - # look it up first - _query = { :offset => 0, - :limit => 1, - :name => _autopopulate_default_role } - if (_roles_response = _nb.get('dcim/device-roles/', _query).body) && - _roles_response.is_a?(Hash) && - (_tmp_roles = _roles_response.fetch(:results, [])) && - (_tmp_roles.length() > 0) - then - _role = _tmp_roles.first - end - - if _role.nil? - # the role is not found, create it - _role_data = { :name => _autopopulate_default_role, - :slug => _autopopulate_default_role.to_url, - :color => "d3d3d3" } - if (_role_create_response = _nb.post('dcim/device-roles/', _role_data.to_json, _nb_headers).body) && - _role_create_response.is_a?(Hash) && - _role_create_response.has_key?(:id) - then - _role = _role_create_response - end - end - - rescue Faraday::Error - # give up aka do nothing - _exception_error = true + _tmp_manufs = _manufs_response.fetch(:results, []) + _tmp_manufs.each do |_manuf| + _tmp_name = _manuf.fetch(:name, _manuf.fetch(:display, nil)) + _manufs << { :name => _tmp_name, + :id => _manuf.fetch(:id, nil), + :url => _manuf.fetch(:url, nil), + :match => _fuzzy_matcher.getDistance(_tmp_name.to_s.downcase, _autopopulate_oui.to_s.downcase), + :vm => false + } end - _role - } - - # we should have found or created the autopopulate role and site - begin - if _autopopulate_site&.fetch(:id, nil)&.nonzero? && - _autopopulate_role&.fetch(:id, nil)&.nonzero? - then - - if _autopopulate_manuf[:vm] - # a virtual machine - _device_name = _autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{_key}" : "#{_autopopulate_hostname} @ #{_key}" - _device_data = { :name => _device_name, - :site => _autopopulate_site[:id], - :status => "staged" } - if (_device_create_response = _nb.post('virtualization/virtual-machines/', _device_data.to_json, _nb_headers).body) && - _device_create_response.is_a?(Hash) && - _device_create_response.has_key?(:id) - then - _autopopulate_device = _device_create_response - end - - else - # a regular non-vm device - - if !_autopopulate_manuf.fetch(:id, nil)&.nonzero? - # the manufacturer was default (not found) so look it up first - _query = { :offset => 0, - :limit => 1, - :name => _autopopulate_manuf[:name] } - if (_manufs_response = _nb.get('dcim/manufacturers/', _query).body) && - _manufs_response.is_a?(Hash) && - (_tmp_manufs = _manufs_response.fetch(:results, [])) && - (_tmp_manufs.length() > 0) - then - _autopopulate_manuf[:id] = _tmp_manufs.first.fetch(:id, nil) - _autopopulate_manuf[:match] = 1.0 - end - end - - if !_autopopulate_manuf.fetch(:id, nil)&.nonzero? - # the manufacturer is still not found, create it - _manuf_data = { :name => _autopopulate_manuf[:name], - :slug => _autopopulate_manuf[:name].to_url } - if (_manuf_create_response = _nb.post('dcim/manufacturers/', _manuf_data.to_json, _nb_headers).body) && - _manuf_create_response.is_a?(Hash) - then - _autopopulate_manuf[:id] = _manuf_create_response.fetch(:id, nil) - _autopopulate_manuf[:match] = 1.0 - end - end - - # at this point we *must* have the manufacturer ID - if _autopopulate_manuf.fetch(:id, nil)&.nonzero? - - # make sure the desired device type also exists, look it up first - _query = { :offset => 0, - :limit => 1, - :manufacturer_id => _autopopulate_manuf[:id], - :model => _autopopulate_default_dtype } - if (_dtypes_response = _nb.get('dcim/device-types/', _query).body) && - _dtypes_response.is_a?(Hash) && - (_tmp_dtypes = _dtypes_response.fetch(:results, [])) && - (_tmp_dtypes.length() > 0) - then - _autopopulate_dtype = _tmp_dtypes.first - end - - if _autopopulate_dtype.nil? - # the device type is not found, create it - _dtype_data = { :manufacturer => _autopopulate_manuf[:id], - :model => _autopopulate_default_dtype, - :slug => _autopopulate_default_dtype.to_url } - if (_dtype_create_response = _nb.post('dcim/device-types/', _dtype_data.to_json, _nb_headers).body) && - _dtype_create_response.is_a?(Hash) && - _dtype_create_response.has_key?(:id) - then - _autopopulate_dtype = _dtype_create_response - end - end - - # # now we must also have the device type ID - if _autopopulate_dtype&.fetch(:id, nil)&.nonzero? - - # create the device - _device_name = _autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{_key}" : "#{_autopopulate_hostname} @ #{_key}" - _device_data = { :name => _device_name, - :device_type => _autopopulate_dtype[:id], - :role => _autopopulate_role[:id], - :site => _autopopulate_site[:id], - :status => "staged" } - if (_device_create_response = _nb.post('dcim/devices/', _device_data.to_json, _nb_headers).body) && - _device_create_response.is_a?(Hash) && - _device_create_response.has_key?(:id) - then - _autopopulate_device = _device_create_response - end - - end # _autopopulate_dtype[:id] is valid - - end # _autopopulate_manuf[:id] is valid - - end # virtual machine vs. regular device - - end # site and role are valid - - rescue Faraday::Error - # give up aka do nothing - _exception_error = true + _query[:offset] += _tmp_manufs.length() + break unless (_tmp_manufs.length() >= _page_size) + else + break end - - if !_autopopulate_device.nil? - # we created a device, so send it back out as the result for the event as well - _devices << { :name => _autopopulate_device&.fetch(:name, _autopopulate_device&.fetch(:display, nil)), - :id => _autopopulate_device&.fetch(:id, nil), - :url => _autopopulate_device&.fetch(:url, nil), - :site => _autopopulate_site&.fetch(:name, nil), - :role => _autopopulate_role&.fetch(:name, nil), - :device_type => _autopopulate_dtype&.fetch(:name, nil), - :manufacturer => _autopopulate_manuf&.fetch(:name, nil), - :details => _verbose ? _autopopulate_device : nil } - end # _autopopulate_device was not nil (i.e., we autocreated a device) - - end # _autopopulate turned on and no results found - - _devices = collect_values(crush(_devices)) - _devices.fetch(:service, [])&.flatten!&.uniq! - _lookup_result = _devices - end # _lookup_type == :ip_device - - # this || is because we are going to need to do the VRF lookup if we're autopopulating - # as well as if we're specifically requested to do that enrichment - - if (_lookup_type == :ip_vrf) || !_autopopulate_device.nil? - ################################################################################# - # retrieve the list VRFs containing IP address prefixes containing the search key - _vrfs = Array.new - _query = { :contains => _key, - :offset => 0, - :limit => _page_size } - _query[:site_n] = _lookup_site unless _lookup_site.nil? || _lookup_site.empty? - begin - while true do - if (_prefixes_response = _nb.get('ipam/prefixes/', _query).body) && - _prefixes_response.is_a?(Hash) - then - _tmp_prefixes = _prefixes_response.fetch(:results, []) - _tmp_prefixes.each do |p| - if (_vrf = p.fetch(:vrf, nil)) - # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } - # if _verbose, include entire object as :details - _vrfs << { :name => _vrf.fetch(:name, _vrf.fetch(:display, nil)), - :id => _vrf.fetch(:id, nil), - :site => ((_site = p.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil), - :tenant => ((_tenant = p.fetch(:tenant, nil)) && _tenant&.has_key?(:name)) ? _tenant[:name] : _tenant&.fetch(:display, nil), - :url => p.fetch(:url, _vrf.fetch(:url, nil)), - :details => _verbose ? _vrf.merge({:prefix => p.tap { |h| h.delete(:vrf) }}) : nil } - end - end - _query[:offset] += _tmp_prefixes.length() - break unless (_tmp_prefixes.length() >= _page_size) - else - break - end - end - rescue Faraday::Error - # give up aka do nothing - _exception_error = true end - _vrfs = collect_values(crush(_vrfs)) - _lookup_result = _vrfs unless (_lookup_type != :ip_vrf) - end # _lookup_type == :ip_vrf - - if !_autopopulate_device.nil? && _autopopulate_device.fetch(:id, nil)&.nonzero? - # device has been created, we need to create an interface for it - _interface_data = { _autopopulate_manuf[:vm] ? :virtual_machine : :device => _autopopulate_device[:id], - :name => "e0", - :type => "other" } - if !_autopopulate_mac.nil? && !_autopopulate_mac.empty? - _interface_data[:mac_address] = _autopopulate_mac.is_a?(Array) ? _autopopulate_mac.first : _autopopulate_mac + rescue Faraday::Error + # give up aka do nothing + _exception_error_general = true + end + # return the manuf with the highest match + !_manufs&.empty? ? _manufs.max_by{|k| k[:match] } : nil + } + end # virtual machine vs. regular device + end # _autopopulate_oui specified + + if !_autopopulate_manuf.is_a?(Hash) + # no match was found at ANY match level (empty database or no OUI specified), set default ("unspecified") manufacturer + _autopopulate_manuf = { :name => _autopopulate_create_manuf ? _autopopulate_oui : _autopopulate_default_manuf, + :match => 0.0, + :vm => false, + :id => nil} + end + + # make sure the site and role exists + + _autopopulate_site = @site_hash.getset(_autopopulate_default_site) { + begin + _site = nil + + # look it up first + _query = { :offset => 0, + :limit => 1, + :name => _autopopulate_default_site } + if (_sites_response = @netbox_conn.get('dcim/sites/', _query).body) && + _sites_response.is_a?(Hash) && + (_tmp_sites = _sites_response.fetch(:results, [])) && + (_tmp_sites.length() > 0) + then + _site = _tmp_sites.first + end + + if _site.nil? + # the device site is not found, create it + _site_data = { :name => _autopopulate_default_site, + :slug => _autopopulate_default_site.to_url, + :status => "active" } + if (_site_create_response = @netbox_conn.post('dcim/sites/', _site_data.to_json, @netbox_headers).body) && + _site_create_response.is_a?(Hash) && + _site_create_response.has_key?(:id) + then + _site = _site_create_response + end + end + + rescue Faraday::Error + # give up aka do nothing + _exception_error_general = true + end + _site + } + + _autopopulate_role = @role_hash.getset(_autopopulate_default_role) { + begin + _role = nil + + # look it up first + _query = { :offset => 0, + :limit => 1, + :name => _autopopulate_default_role } + if (_roles_response = @netbox_conn.get('dcim/device-roles/', _query).body) && + _roles_response.is_a?(Hash) && + (_tmp_roles = _roles_response.fetch(:results, [])) && + (_tmp_roles.length() > 0) + then + _role = _tmp_roles.first + end + + if _role.nil? + # the role is not found, create it + _role_data = { :name => _autopopulate_default_role, + :slug => _autopopulate_default_role.to_url, + :color => "d3d3d3" } + if (_role_create_response = @netbox_conn.post('dcim/device-roles/', _role_data.to_json, @netbox_headers).body) && + _role_create_response.is_a?(Hash) && + _role_create_response.has_key?(:id) + then + _role = _role_create_response + end + end + + rescue Faraday::ConnectionFailed + # give up aka do nothing (and connect next time) + _exception_error_connection = true + rescue Faraday::Error + # give up aka do nothing + _exception_error_general = true + end + _role + } + + # we should have found or created the autopopulate role and site + begin + if _autopopulate_site&.fetch(:id, nil)&.nonzero? && + _autopopulate_role&.fetch(:id, nil)&.nonzero? + then + + if _autopopulate_manuf[:vm] + # a virtual machine + _device_name = _autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{_key}" : "#{_autopopulate_hostname} @ #{_key}" + _device_data = { :name => _device_name, + :site => _autopopulate_site[:id], + :status => "staged" } + if (_device_create_response = @netbox_conn.post('virtualization/virtual-machines/', _device_data.to_json, @netbox_headers).body) && + _device_create_response.is_a?(Hash) && + _device_create_response.has_key?(:id) + then + _autopopulate_device = _device_create_response + end + + else + # a regular non-vm device + + if !_autopopulate_manuf.fetch(:id, nil)&.nonzero? + # the manufacturer was default (not found) so look it up first + _query = { :offset => 0, + :limit => 1, + :name => _autopopulate_manuf[:name] } + if (_manufs_response = @netbox_conn.get('dcim/manufacturers/', _query).body) && + _manufs_response.is_a?(Hash) && + (_tmp_manufs = _manufs_response.fetch(:results, [])) && + (_tmp_manufs.length() > 0) + then + _autopopulate_manuf[:id] = _tmp_manufs.first.fetch(:id, nil) + _autopopulate_manuf[:match] = 1.0 end - if !_vrfs.nil? && !_vrfs.empty? - _interface_data[:vrf] = _vrfs.fetch(:id, []).first + end + + if !_autopopulate_manuf.fetch(:id, nil)&.nonzero? + # the manufacturer is still not found, create it + _manuf_data = { :name => _autopopulate_manuf[:name], + :slug => _autopopulate_manuf[:name].to_url } + if (_manuf_create_response = @netbox_conn.post('dcim/manufacturers/', _manuf_data.to_json, @netbox_headers).body) && + _manuf_create_response.is_a?(Hash) + then + _autopopulate_manuf[:id] = _manuf_create_response.fetch(:id, nil) + _autopopulate_manuf[:match] = 1.0 end - if (_interface_create_reponse = _nb.post(_autopopulate_manuf[:vm] ? 'virtualization/interfaces/' : 'dcim/interfaces/', _interface_data.to_json, _nb_headers).body) && - _interface_create_reponse.is_a?(Hash) && - _interface_create_reponse.has_key?(:id) + end + + # at this point we *must* have the manufacturer ID + if _autopopulate_manuf.fetch(:id, nil)&.nonzero? + + # make sure the desired device type also exists, look it up first + _query = { :offset => 0, + :limit => 1, + :manufacturer_id => _autopopulate_manuf[:id], + :model => _autopopulate_default_dtype } + if (_dtypes_response = @netbox_conn.get('dcim/device-types/', _query).body) && + _dtypes_response.is_a?(Hash) && + (_tmp_dtypes = _dtypes_response.fetch(:results, [])) && + (_tmp_dtypes.length() > 0) then - _autopopulate_interface = _interface_create_reponse + _autopopulate_dtype = _tmp_dtypes.first end - if !_autopopulate_interface.nil? && _autopopulate_interface.fetch(:id, nil)&.nonzero? - # interface has been created, we need to create an IP address for it - _ip_data = { :address => "#{_key}/#{_key_ip&.prefix()}", - :assigned_object_type => _autopopulate_manuf[:vm] ? "virtualization.vminterface" : "dcim.interface", - :assigned_object_id => _autopopulate_interface[:id], - :status => "active" } - if (_vrf = _autopopulate_interface.fetch(:vrf, nil)) && - (_vrf.has_key?(:id)) + if _autopopulate_dtype.nil? + # the device type is not found, create it + _dtype_data = { :manufacturer => _autopopulate_manuf[:id], + :model => _autopopulate_default_dtype, + :slug => _autopopulate_default_dtype.to_url } + if (_dtype_create_response = @netbox_conn.post('dcim/device-types/', _dtype_data.to_json, @netbox_headers).body) && + _dtype_create_response.is_a?(Hash) && + _dtype_create_response.has_key?(:id) then - _ip_data[:vrf] = _vrf[:id] + _autopopulate_dtype = _dtype_create_response end - if (_ip_create_reponse = _nb.post('ipam/ip-addresses/', _ip_data.to_json, _nb_headers).body) && - _ip_create_reponse.is_a?(Hash) && - _ip_create_reponse.has_key?(:id) - then - _autopopulate_ip = _ip_create_reponse - end - end # check if interface was created and has ID - - if !_autopopulate_ip.nil? && _autopopulate_ip.fetch(:id, nil)&.nonzero? - # IP address was created, need to associate it as the primary IP for the device - _primary_ip_data = { _key_ip&.ipv6? ? :primary_ip6 : :primary_ip4 => _autopopulate_ip[:id] } - if (_ip_primary_reponse = _nb.patch("#{_autopopulate_manuf[:vm] ? 'virtualization/virtual-machines' : 'dcim/devices'}/#{_autopopulate_device[:id]}/", _primary_ip_data.to_json, _nb_headers).body) && - _ip_primary_reponse.is_a?(Hash) && - _ip_primary_reponse.has_key?(:id) + end + + # # now we must also have the device type ID + if _autopopulate_dtype&.fetch(:id, nil)&.nonzero? + + # create the device + _device_name = _autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{_key}" : "#{_autopopulate_hostname} @ #{_key}" + _device_data = { :name => _device_name, + :device_type => _autopopulate_dtype[:id], + :role => _autopopulate_role[:id], + :site => _autopopulate_site[:id], + :status => "staged" } + if (_device_create_response = @netbox_conn.post('dcim/devices/', _device_data.to_json, @netbox_headers).body) && + _device_create_response.is_a?(Hash) && + _device_create_response.has_key?(:id) then - _autopopulate_device = _ip_create_reponse + _autopopulate_device = _device_create_response end - end # check if the IP address was created and has an ID - - end # check if device was created and has ID - # yield return value for cache_hash getset - _lookup_result - } + end # _autopopulate_dtype[:id] is valid + + end # _autopopulate_manuf[:id] is valid + + end # virtual machine vs. regular device + + end # site and role are valid + + rescue Faraday::Error + # give up aka do nothing + _exception_error_general = true + end + + if !_autopopulate_device.nil? + # we created a device, so send it back out as the result for the event as well + _devices << { :name => _autopopulate_device&.fetch(:name, _autopopulate_device&.fetch(:display, nil)), + :id => _autopopulate_device&.fetch(:id, nil), + :url => _autopopulate_device&.fetch(:url, nil), + :site => _autopopulate_site&.fetch(:name, nil), + :role => _autopopulate_role&.fetch(:name, nil), + :device_type => _autopopulate_dtype&.fetch(:name, nil), + :manufacturer => _autopopulate_manuf&.fetch(:name, nil), + :details => _verbose ? _autopopulate_device : nil } + end # _autopopulate_device was not nil (i.e., we autocreated a device) + + end # _autopopulate turned on and no results found + + _devices = collect_values(crush(_devices)) + _devices.fetch(:service, [])&.flatten!&.uniq! + _result = _devices + end # _lookup_type == :ip_device + + # this || is because we are going to need to do the VRF lookup if we're autopopulating + # as well as if we're specifically requested to do that enrichment + + if (_lookup_type == :ip_vrf) || !_autopopulate_device.nil? + ################################################################################# + # retrieve the list VRFs containing IP address prefixes containing the search key + _vrfs = Array.new + _query = { :contains => _key, + :offset => 0, + :limit => _page_size } + _query[:site_n] = _lookup_site unless _lookup_site.nil? || _lookup_site.empty? + begin + while true do + if (_prefixes_response = @netbox_conn.get('ipam/prefixes/', _query).body) && + _prefixes_response.is_a?(Hash) + then + _tmp_prefixes = _prefixes_response.fetch(:results, []) + _tmp_prefixes.each do |p| + if (_vrf = p.fetch(:vrf, nil)) + # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } + # if _verbose, include entire object as :details + _vrfs << { :name => _vrf.fetch(:name, _vrf.fetch(:display, nil)), + :id => _vrf.fetch(:id, nil), + :site => ((_site = p.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil), + :tenant => ((_tenant = p.fetch(:tenant, nil)) && _tenant&.has_key?(:name)) ? _tenant[:name] : _tenant&.fetch(:display, nil), + :url => p.fetch(:url, _vrf.fetch(:url, nil)), + :details => _verbose ? _vrf.merge({:prefix => p.tap { |h| h.delete(:vrf) }}) : nil } + end + end + _query[:offset] += _tmp_prefixes.length() + break unless (_tmp_prefixes.length() >= _page_size) + else + break + end + end + rescue Faraday::Error + # give up aka do nothing + _exception_error_general = true + end + _vrfs = collect_values(crush(_vrfs)) + _result = _vrfs unless (_lookup_type != :ip_vrf) + end # _lookup_type == :ip_vrf + + if !_autopopulate_device.nil? && _autopopulate_device.fetch(:id, nil)&.nonzero? + # device has been created, we need to create an interface for it + _interface_data = { _autopopulate_manuf[:vm] ? :virtual_machine : :device => _autopopulate_device[:id], + :name => "e0", + :type => "other" } + if !_autopopulate_mac.nil? && !_autopopulate_mac.empty? + _interface_data[:mac_address] = _autopopulate_mac.is_a?(Array) ? _autopopulate_mac.first : _autopopulate_mac + end + if !_vrfs.nil? && !_vrfs.empty? + _interface_data[:vrf] = _vrfs.fetch(:id, []).first + end + if (_interface_create_reponse = @netbox_conn.post(_autopopulate_manuf[:vm] ? 'virtualization/interfaces/' : 'dcim/interfaces/', _interface_data.to_json, @netbox_headers).body) && + _interface_create_reponse.is_a?(Hash) && + _interface_create_reponse.has_key?(:id) + then + _autopopulate_interface = _interface_create_reponse + end + + if !_autopopulate_interface.nil? && _autopopulate_interface.fetch(:id, nil)&.nonzero? + # interface has been created, we need to create an IP address for it + _ip_data = { :address => "#{_key}/#{_key_ip&.prefix()}", + :assigned_object_type => _autopopulate_manuf[:vm] ? "virtualization.vminterface" : "dcim.interface", + :assigned_object_id => _autopopulate_interface[:id], + :status => "active" } + if (_vrf = _autopopulate_interface.fetch(:vrf, nil)) && + (_vrf.has_key?(:id)) + then + _ip_data[:vrf] = _vrf[:id] + end + if (_ip_create_reponse = @netbox_conn.post('ipam/ip-addresses/', _ip_data.to_json, @netbox_headers).body) && + _ip_create_reponse.is_a?(Hash) && + _ip_create_reponse.has_key?(:id) + then + _autopopulate_ip = _ip_create_reponse + end + end # check if interface was created and has ID + + if !_autopopulate_ip.nil? && _autopopulate_ip.fetch(:id, nil)&.nonzero? + # IP address was created, need to associate it as the primary IP for the device + _primary_ip_data = { _key_ip&.ipv6? ? :primary_ip6 : :primary_ip4 => _autopopulate_ip[:id] } + if (_ip_primary_reponse = @netbox_conn.patch("#{_autopopulate_manuf[:vm] ? 'virtualization/virtual-machines' : 'dcim/devices'}/#{_autopopulate_device[:id]}/", _primary_ip_data.to_json, @netbox_headers).body) && + _ip_primary_reponse.is_a?(Hash) && + _ip_primary_reponse.has_key?(:id) + then + _autopopulate_device = _ip_create_reponse + end + end # check if the IP address was created and has an ID + + end # check if device was created and has ID + + if _exception_error_connection && !@netbox_conn_resetting + @netbox_conn_lock.with_write_lock { + if !@netbox_conn_resetting + @netbox_conn_needs_reset = true + end + } + end + } # @netbox_conn_lock.with_read_lock if !_result.nil? && _result.has_key?(:url) && !_result[:url]&.empty? _result[:url].map! { |u| u.delete_prefix(@netbox_url_base).gsub('/api/', '/') } From 883147663db6eaa91130689027793981df728dd1 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Fri, 27 Oct 2023 07:01:09 -0600 Subject: [PATCH 07/82] work in pgoress for address issues with NetBox database and Logstash's NetBox cache (idaholab/Malcolm#259) --- logstash/ruby/netbox_enrich.rb | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/logstash/ruby/netbox_enrich.rb b/logstash/ruby/netbox_enrich.rb index 117f88094..2d071a970 100644 --- a/logstash/ruby/netbox_enrich.rb +++ b/logstash/ruby/netbox_enrich.rb @@ -231,11 +231,13 @@ def filter(event) _exception_error_general = false _exception_error_connection = false - @netbox_conn_lock.with_read_lock { + @netbox_conn_lock.acquire_read_lock + begin # make sure the connection to the NetBox API exists and wasn't flagged for reconnect if @netbox_conn.nil? || @netbox_conn_needs_reset - @netbox_conn_lock.with_write_lock { + @netbox_conn_lock.acquire_write_lock + begin if @netbox_conn.nil? || @netbox_conn_needs_reset begin # we need to reconnect to the NetBox API @@ -250,7 +252,9 @@ def filter(event) @netbox_conn_needs_reset = @netbox_conn.nil? end end # connection check in write lock - } # @netbox_conn_lock.with_write_lock + ensure + @netbox_conn_lock.release_write_lock + end end # connection check in read lock # handle :ip_device first, because if we're doing autopopulate we're also going to use @@ -719,13 +723,18 @@ def filter(event) end # check if device was created and has ID if _exception_error_connection && !@netbox_conn_resetting - @netbox_conn_lock.with_write_lock { + @netbox_conn_lock.acquire_write_lock + begin if !@netbox_conn_resetting @netbox_conn_needs_reset = true end - } + ensure + @netbox_conn_lock.release_write_lock + end end - } # @netbox_conn_lock.with_read_lock + ensure + @netbox_conn_lock.release_read_lock + end if !_result.nil? && _result.has_key?(:url) && !_result[:url]&.empty? _result[:url].map! { |u| u.delete_prefix(@netbox_url_base).gsub('/api/', '/') } From 20a89d58661a3e8a4b9a3351f7c560ab64d9bd88 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Fri, 27 Oct 2023 07:13:33 -0600 Subject: [PATCH 08/82] work in pgoress for address issues with NetBox database and Logstash's NetBox cache, should fix locking issues (idaholab/Malcolm#259) --- logstash/ruby/netbox_enrich.rb | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/logstash/ruby/netbox_enrich.rb b/logstash/ruby/netbox_enrich.rb index 2d071a970..96e4dc31e 100644 --- a/logstash/ruby/netbox_enrich.rb +++ b/logstash/ruby/netbox_enrich.rb @@ -236,6 +236,7 @@ def filter(event) # make sure the connection to the NetBox API exists and wasn't flagged for reconnect if @netbox_conn.nil? || @netbox_conn_needs_reset + @netbox_conn_lock.release_read_lock @netbox_conn_lock.acquire_write_lock begin if @netbox_conn.nil? || @netbox_conn_needs_reset @@ -254,6 +255,7 @@ def filter(event) end # connection check in write lock ensure @netbox_conn_lock.release_write_lock + @netbox_conn_lock.acquire_read_lock end end # connection check in read lock @@ -723,6 +725,7 @@ def filter(event) end # check if device was created and has ID if _exception_error_connection && !@netbox_conn_resetting + @netbox_conn_lock.release_read_lock @netbox_conn_lock.acquire_write_lock begin if !@netbox_conn_resetting @@ -730,6 +733,7 @@ def filter(event) end ensure @netbox_conn_lock.release_write_lock + @netbox_conn_lock.acquire_read_lock end end ensure From 16d5053e03de8b2e53c9632de2328fc2dc0248f6 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Fri, 27 Oct 2023 09:52:15 -0600 Subject: [PATCH 09/82] specify lru_redux in the gemspec --- Dockerfiles/logstash.Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfiles/logstash.Dockerfile b/Dockerfiles/logstash.Dockerfile index 67e1ed0c4..cebe8a6ee 100644 --- a/Dockerfiles/logstash.Dockerfile +++ b/Dockerfiles/logstash.Dockerfile @@ -66,7 +66,7 @@ RUN set -x && \ echo "gem 'concurrent-ruby'" >> /usr/share/logstash/Gemfile && \ echo "gem 'deep_merge'" >> /usr/share/logstash/Gemfile && \ echo "gem 'fuzzy-string-match'" >> /usr/share/logstash/Gemfile && \ - echo "gem 'lru_cache'" >> /usr/share/logstash/Gemfile && \ + echo "gem 'lru_redux'" >> /usr/share/logstash/Gemfile && \ echo "gem 'psych'" >> /usr/share/logstash/Gemfile && \ echo "gem 'stringex'" >> /usr/share/logstash/Gemfile && \ /usr/share/logstash/bin/ruby -S bundle install && \ From 92b4b34112d90fbc8e6d24ade7620abe95c29ab8 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Fri, 27 Oct 2023 10:21:44 -0600 Subject: [PATCH 10/82] address issues with NetBox database and Logstash's NetBox cache (idaholab/Malcolm#259); restore caching for performance reasons, but decrease TTL significantly and allow it to be specified via environment variable --- Dockerfiles/logstash.Dockerfile | 4 + config/logstash.env.example | 3 + logstash/pipelines/enrichment/21_netbox.conf | 8 + logstash/ruby/netbox_enrich.rb | 1011 +++++++++--------- 4 files changed, 514 insertions(+), 512 deletions(-) diff --git a/Dockerfiles/logstash.Dockerfile b/Dockerfiles/logstash.Dockerfile index cebe8a6ee..0c2c9258c 100644 --- a/Dockerfiles/logstash.Dockerfile +++ b/Dockerfiles/logstash.Dockerfile @@ -32,6 +32,8 @@ ARG LOGSTASH_NETBOX_ENRICHMENT=false ARG LOGSTASH_NETBOX_ENRICHMENT_VERBOSE=false ARG LOGSTASH_NETBOX_ENRICHMENT_LOOKUP_SERVICE=true ARG LOGSTASH_NETBOX_AUTO_POPULATE=false +ARG LOGSTASH_NETBOX_CACHE_SIZE=1000 +ARG LOGSTASH_NETBOX_CACHE_TTL=30 ENV LOGSTASH_ENRICHMENT_PIPELINE $LOGSTASH_ENRICHMENT_PIPELINE ENV LOGSTASH_PARSE_PIPELINE_ADDRESSES $LOGSTASH_PARSE_PIPELINE_ADDRESSES @@ -42,6 +44,8 @@ ENV LOGSTASH_NETBOX_ENRICHMENT $LOGSTASH_NETBOX_ENRICHMENT ENV LOGSTASH_NETBOX_ENRICHMENT_VERBOSE $LOGSTASH_NETBOX_ENRICHMENT_VERBOSE ENV LOGSTASH_NETBOX_ENRICHMENT_LOOKUP_SERVICE $LOGSTASH_NETBOX_ENRICHMENT_LOOKUP_SERVICE ENV LOGSTASH_NETBOX_AUTO_POPULATE $LOGSTASH_NETBOX_AUTO_POPULATE +ENV LOGSTASH_NETBOX_CACHE_SIZE $LOGSTASH_NETBOX_CACHE_SIZE +ENV LOGSTASH_NETBOX_CACHE_TTL $LOGSTASH_NETBOX_CACHE_TTL USER root diff --git a/config/logstash.env.example b/config/logstash.env.example index 6370a05c1..b5e6f7e56 100644 --- a/config/logstash.env.example +++ b/config/logstash.env.example @@ -13,5 +13,8 @@ LOGSTASH_REVERSE_DNS=false LOGSTASH_NETBOX_ENRICHMENT=false # Whether or not unobserved network entities in Logstash data will be used to populate NetBox LOGSTASH_NETBOX_AUTO_POPULATE=false +# Caching parameters for NetBox's LogStash lookups +LOGSTASH_NETBOX_CACHE_SIZE=1000 +LOGSTASH_NETBOX_CACHE_TTL=30 # Logstash memory allowance and other Java options LS_JAVA_OPTS=-server -Xms2500m -Xmx2500m -Xss1536k -XX:-HeapDumpOnOutOfMemoryError -Djava.security.egd=file:/dev/./urandom -Dlog4j.formatMsgNoLookups=true \ No newline at end of file diff --git a/logstash/pipelines/enrichment/21_netbox.conf b/logstash/pipelines/enrichment/21_netbox.conf index 50731cc12..66c0f34db 100644 --- a/logstash/pipelines/enrichment/21_netbox.conf +++ b/logstash/pipelines/enrichment/21_netbox.conf @@ -31,6 +31,8 @@ filter { "lookup_site_env" => "NETBOX_DEFAULT_SITE" "verbose_env" => "LOGSTASH_NETBOX_ENRICHMENT_VERBOSE" "netbox_token_env" => "SUPERUSER_API_TOKEN" + "cache_size_env" => "LOGSTASH_NETBOX_CACHE_SIZE" + "cache_ttl_env" => "LOGSTASH_NETBOX_CACHE_TTL" } } ruby { @@ -44,6 +46,8 @@ filter { "lookup_service" => "false" "verbose_env" => "LOGSTASH_NETBOX_ENRICHMENT_VERBOSE" "netbox_token_env" => "SUPERUSER_API_TOKEN" + "cache_size_env" => "LOGSTASH_NETBOX_CACHE_SIZE" + "cache_ttl_env" => "LOGSTASH_NETBOX_CACHE_TTL" "autopopulate_env" => "LOGSTASH_NETBOX_AUTO_POPULATE" "default_manuf_env" => "NETBOX_DEFAULT_MANUFACTURER" "default_dtype_env" => "NETBOX_DEFAULT_DEVICE_TYPE" @@ -66,6 +70,8 @@ filter { "lookup_site_env" => "NETBOX_DEFAULT_SITE" "verbose_env" => "LOGSTASH_NETBOX_ENRICHMENT_VERBOSE" "netbox_token_env" => "SUPERUSER_API_TOKEN" + "cache_size_env" => "LOGSTASH_NETBOX_CACHE_SIZE" + "cache_ttl_env" => "LOGSTASH_NETBOX_CACHE_TTL" } } ruby { @@ -80,6 +86,8 @@ filter { "lookup_service_port_source" => "[destination][port]" "verbose_env" => "LOGSTASH_NETBOX_ENRICHMENT_VERBOSE" "netbox_token_env" => "SUPERUSER_API_TOKEN" + "cache_size_env" => "LOGSTASH_NETBOX_CACHE_SIZE" + "cache_ttl_env" => "LOGSTASH_NETBOX_CACHE_TTL" "autopopulate_env" => "LOGSTASH_NETBOX_AUTO_POPULATE" "default_manuf_env" => "NETBOX_DEFAULT_MANUFACTURER" "default_dtype_env" => "NETBOX_DEFAULT_DEVICE_TYPE" diff --git a/logstash/ruby/netbox_enrich.rb b/logstash/ruby/netbox_enrich.rb index 96e4dc31e..fedc370dd 100644 --- a/logstash/ruby/netbox_enrich.rb +++ b/logstash/ruby/netbox_enrich.rb @@ -3,7 +3,6 @@ def concurrency end def register(params) - require 'concurrent' require 'date' require 'faraday' require 'fuzzystringmatch' @@ -46,6 +45,28 @@ def register(params) # API parameters @page_size = params.fetch("page_size", 50) + # caching parameters (default cache size = 1000, default cache TTL = 30 seconds) + _cache_size_val = params["cache_size"] + _cache_size_env = params["cache_size_env"] + if (!_cache_size_val.is_a?(Integer) || _cache_size_val <= 0) && !_cache_size_env.nil? + _cache_size_val = Integer(ENV[_cache_size_env], exception: false) + end + if _cache_size_val.is_a?(Integer) && (_cache_size_val > 0) + @cache_size = _cache_size_val + else + @cache_size = 1000 + end + _cache_ttl_val = params["cache_ttl"] + _cache_ttl_env = params["cache_ttl_env"] + if (!_cache_ttl_val.is_a?(Integer) || _cache_ttl_val <= 0) && !_cache_ttl_env.nil? + _cache_ttl_val = Integer(ENV[_cache_ttl_env], exception: false) + end + if _cache_ttl_val.is_a?(Integer) && (_cache_ttl_val > 0) + @cache_ttl = _cache_ttl_val + else + @cache_ttl = 30 + end + # target field to store looked-up value @target = params["target"] @@ -70,13 +91,6 @@ def register(params) @netbox_url = params.fetch("netbox_url", "http://netbox:8080/netbox/api").delete_suffix("/") @netbox_url_suffix = "/netbox/api" @netbox_url_base = @netbox_url.delete_suffix(@netbox_url_suffix) - @netbox_headers = { 'Content-Type': 'application/json' } - - # netbox connection (will be initialized in a thread-safe manner in filter) - @netbox_conn = nil - @netbox_conn_lock = Concurrent::ReentrantReadWriteLock.new - @netbox_conn_needs_reset = false - @netbox_conn_resetting = false # connection token (either specified directly or read from ENV via netbox_token_env) @netbox_token = params["netbox_token"] @@ -85,6 +99,9 @@ def register(params) @netbox_token = ENV[_netbox_token_env] end + # hash of lookup types (from @lookup_type), each of which contains the respective looked-up values + @cache_hash = LruRedux::ThreadSafeCache.new(params.fetch("lookup_cache_size", 512)) + # these are used for autopopulation only, not lookup/enrichment # autopopulate - either specified directly or read from ENV via autopopulate_env @@ -180,13 +197,13 @@ def register(params) @autopopulate_create_manuf = [1, true, '1', 'true', 't', 'on', 'enabled'].include?(_autopopulate_create_manuf_str.to_s.downcase) # case-insensitive hash of OUIs (https://standards-oui.ieee.org/) to Manufacturers (https://demo.netbox.dev/static/docs/core-functionality/device-types/) - @manuf_hash = LruRedux::ThreadSafeCache.new(params.fetch("manuf_cache_size", 2048)) + @manuf_hash = LruRedux::TTL::ThreadSafeCache.new(params.fetch("manuf_cache_size", 2048), @cache_ttl) # case-insensitive hash of role names to IDs - @role_hash = LruRedux::ThreadSafeCache.new(params.fetch("role_cache_size", 128)) + @role_hash = LruRedux::TTL::ThreadSafeCache.new(params.fetch("role_cache_size", 256), @cache_ttl) # case-insensitive hash of site names to IDs - @site_hash = LruRedux::ThreadSafeCache.new(params.fetch("site_cache_size", 128)) + @site_hash = LruRedux::TTL::ThreadSafeCache.new(params.fetch("site_cache_size", 128), @cache_ttl) # end of autopopulation arguments @@ -202,6 +219,9 @@ def filter(event) _url = @netbox_url _url_base = @netbox_url_base _url_suffix = @netbox_url_suffix + _token = @netbox_token + _cache_size = @cache_size + _cache_ttl = @cache_ttl _page_size = @page_size _verbose = @verbose _lookup_type = @lookup_type @@ -218,527 +238,494 @@ def filter(event) _autopopulate_mac = event.get("#{@source_mac}") _autopopulate_oui = event.get("#{@source_oui}") - _result = nil - _autopopulate_device = nil - _autopopulate_role = nil - _autopopulate_dtype = nil - _autopopulate_interface = nil - _autopopulate_ip = nil - _autopopulate_manuf = nil - _autopopulate_site = nil - _vrfs = nil - _devices = nil - _exception_error_general = false - _exception_error_connection = false - - @netbox_conn_lock.acquire_read_lock - begin - - # make sure the connection to the NetBox API exists and wasn't flagged for reconnect - if @netbox_conn.nil? || @netbox_conn_needs_reset - @netbox_conn_lock.release_read_lock - @netbox_conn_lock.acquire_write_lock - begin - if @netbox_conn.nil? || @netbox_conn_needs_reset - begin - # we need to reconnect to the NetBox API - @netbox_conn_resetting = true - @netbox_conn = Faraday.new(_url) do |conn| - conn.request :authorization, 'Token', @netbox_token - conn.request :url_encoded - conn.response :json, :parser_options => { :symbolize_names => true } - end - ensure - @netbox_conn_resetting = false - @netbox_conn_needs_reset = @netbox_conn.nil? - end - end # connection check in write lock - ensure - @netbox_conn_lock.release_write_lock - @netbox_conn_lock.acquire_read_lock - end - end # connection check in read lock - - # handle :ip_device first, because if we're doing autopopulate we're also going to use - # some of the logic from :ip_vrf - - if (_lookup_type == :ip_device) - ################################################################################# - # retrieve the list of IP addresses where address matches the search key, limited to "assigned" addresses. - # then, for those IP addresses, search for devices pertaining to the interfaces assigned to each - # IP address (e.g., ipam.ip_address -> dcim.interface -> dcim.device, or - # ipam.ip_address -> virtualization.interface -> virtualization.virtual_machine) - _devices = Array.new - _query = { :address => _key, - :offset => 0, - :limit => _page_size } - begin - while true do - if (_ip_addresses_response = @netbox_conn.get('ipam/ip-addresses/', _query).body) && - _ip_addresses_response.is_a?(Hash) - then - _tmp_ip_addresses = _ip_addresses_response.fetch(:results, []) - _tmp_ip_addresses.each do |i| - _is_device = nil - if (_obj = i.fetch(:assigned_object, nil)) && - ((_device_obj = _obj.fetch(:device, nil)) || - (_virtualized_obj = _obj.fetch(:virtual_machine, nil))) - then - _is_device = !_device_obj.nil? - _device = _is_device ? _device_obj : _virtualized_obj - # if we can, follow the :assigned_object's "full" device URL to get more information - _device = (_device.has_key?(:url) && (_full_device = @netbox_conn.get(_device[:url].delete_prefix(_url_base).delete_prefix(_url_suffix).delete_prefix("/")).body)) ? _full_device : _device - _device_id = _device.fetch(:id, nil) - _device_site = ((_site = _device.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil) - next unless (_device_site.to_s.downcase == _lookup_site.to_s.downcase) || _lookup_site.nil? || _lookup_site.empty? || _device_site.nil? || _device_site.empty? - # look up service if requested (based on device/vm found and service port) - if (_lookup_service_port > 0) - _services = Array.new - _service_query = { (_is_device ? :device_id : :virtual_machine_id) => _device_id, :port => _lookup_service_port, :offset => 0, :limit => _page_size } + _result = @cache_hash.getset(_lookup_type){ + LruRedux::TTL::ThreadSafeCache.new(_cache_size, _cache_ttl) + }.getset(_key){ + + _nb = Faraday.new(_url) do |conn| + conn.request :authorization, 'Token', _token + conn.request :url_encoded + conn.response :json, :parser_options => { :symbolize_names => true } + end + _nb_headers = { 'Content-Type': 'application/json' } + + _lookup_result = nil + _autopopulate_device = nil + _autopopulate_role = nil + _autopopulate_dtype = nil + _autopopulate_interface = nil + _autopopulate_ip = nil + _autopopulate_manuf = nil + _autopopulate_site = nil + _vrfs = nil + _devices = nil + _exception_error = false + + # handle :ip_device first, because if we're doing autopopulate we're also going to use + # some of the logic from :ip_vrf + + if (_lookup_type == :ip_device) + ################################################################################# + # retrieve the list of IP addresses where address matches the search key, limited to "assigned" addresses. + # then, for those IP addresses, search for devices pertaining to the interfaces assigned to each + # IP address (e.g., ipam.ip_address -> dcim.interface -> dcim.device, or + # ipam.ip_address -> virtualization.interface -> virtualization.virtual_machine) + _devices = Array.new + _query = { :address => _key, + :offset => 0, + :limit => _page_size } + begin while true do - if (_services_response = @netbox_conn.get('ipam/services/', _service_query).body) && - _services_response.is_a?(Hash) + if (_ip_addresses_response = _nb.get('ipam/ip-addresses/', _query).body) && + _ip_addresses_response.is_a?(Hash) then - _tmp_services = _services_response.fetch(:results, []) - _services.unshift(*_tmp_services) unless _tmp_services.nil? || _tmp_services.empty? - _service_query[:offset] += _tmp_services.length() - break unless (_tmp_services.length() >= _page_size) + _tmp_ip_addresses = _ip_addresses_response.fetch(:results, []) + _tmp_ip_addresses.each do |i| + _is_device = nil + if (_obj = i.fetch(:assigned_object, nil)) && + ((_device_obj = _obj.fetch(:device, nil)) || + (_virtualized_obj = _obj.fetch(:virtual_machine, nil))) + then + _is_device = !_device_obj.nil? + _device = _is_device ? _device_obj : _virtualized_obj + # if we can, follow the :assigned_object's "full" device URL to get more information + _device = (_device.has_key?(:url) && (_full_device = _nb.get(_device[:url].delete_prefix(_url_base).delete_prefix(_url_suffix).delete_prefix("/")).body)) ? _full_device : _device + _device_id = _device.fetch(:id, nil) + _device_site = ((_site = _device.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil) + next unless (_device_site.to_s.downcase == _lookup_site.to_s.downcase) || _lookup_site.nil? || _lookup_site.empty? || _device_site.nil? || _device_site.empty? + # look up service if requested (based on device/vm found and service port) + if (_lookup_service_port > 0) + _services = Array.new + _service_query = { (_is_device ? :device_id : :virtual_machine_id) => _device_id, :port => _lookup_service_port, :offset => 0, :limit => _page_size } + while true do + if (_services_response = _nb.get('ipam/services/', _service_query).body) && + _services_response.is_a?(Hash) + then + _tmp_services = _services_response.fetch(:results, []) + _services.unshift(*_tmp_services) unless _tmp_services.nil? || _tmp_services.empty? + _service_query[:offset] += _tmp_services.length() + break unless (_tmp_services.length() >= _page_size) + else + break + end + end + _device[:service] = _services + end + # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } + # if _verbose, include entire object as :details + _devices << { :name => _device.fetch(:name, _device.fetch(:display, nil)), + :id => _device_id, + :url => _device.fetch(:url, nil), + :service => _device.fetch(:service, []).map {|s| s.fetch(:name, s.fetch(:display, nil)) }, + :site => _device_site, + :role => ((_role = _device.fetch(:role, nil)) && _role&.has_key?(:name)) ? _role[:name] : _role&.fetch(:display, nil), + :cluster => ((_cluster = _device.fetch(:cluster, nil)) && _cluster&.has_key?(:name)) ? _cluster[:name] : _cluster&.fetch(:display, nil), + :device_type => ((_dtype = _device.fetch(:device_type, nil)) && _dtype&.has_key?(:name)) ? _dtype[:name] : _dtype&.fetch(:display, nil), + :manufacturer => ((_manuf = _device.dig(:device_type, :manufacturer)) && _manuf&.has_key?(:name)) ? _manuf[:name] : _manuf&.fetch(:display, nil), + :details => _verbose ? _device : nil } + end + end + _query[:offset] += _tmp_ip_addresses.length() + break unless (_tmp_ip_addresses.length() >= _page_size) else + # weird/bad response, bail + _exception_error = true break end - end - _device[:service] = _services + end # while true + rescue Faraday::Error + # give up aka do nothing + _exception_error = true end - # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } - # if _verbose, include entire object as :details - _devices << { :name => _device.fetch(:name, _device.fetch(:display, nil)), - :id => _device_id, - :url => _device.fetch(:url, nil), - :service => _device.fetch(:service, []).map {|s| s.fetch(:name, s.fetch(:display, nil)) }, - :site => _device_site, - :role => ((_role = _device.fetch(:role, nil)) && _role&.has_key?(:name)) ? _role[:name] : _role&.fetch(:display, nil), - :cluster => ((_cluster = _device.fetch(:cluster, nil)) && _cluster&.has_key?(:name)) ? _cluster[:name] : _cluster&.fetch(:display, nil), - :device_type => ((_dtype = _device.fetch(:device_type, nil)) && _dtype&.has_key?(:name)) ? _dtype[:name] : _dtype&.fetch(:display, nil), - :manufacturer => ((_manuf = _device.dig(:device_type, :manufacturer)) && _manuf&.has_key?(:name)) ? _manuf[:name] : _manuf&.fetch(:display, nil), - :details => _verbose ? _device : nil } - end - end - _query[:offset] += _tmp_ip_addresses.length() - break unless (_tmp_ip_addresses.length() >= _page_size) - else - # weird/bad response, bail - _exception_error_general = true - break - end - end # while true - rescue Faraday::Error - # give up aka do nothing - _exception_error_general = true - end - - if _autopopulate && (_query[:offset] == 0) && !_exception_error_general && !_exception_error_connection && _key_ip&.private? - - # no results found, autopopulate enabled, private-space IP address... - # let's create an entry for this device - - # if MAC is set but OUI is not, do a quick lookup - if (!_autopopulate_mac.nil? && !_autopopulate_mac.empty?) && - (_autopopulate_oui.nil? || _autopopulate_oui.empty?) - then - case _autopopulate_mac - when String - if @macregex.match?(_autopopulate_mac) - _macint = mac_string_to_integer(_autopopulate_mac) - _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} - _autopopulate_oui = _vendor[2] unless _vendor.nil? - end # _autopopulate_mac matches @macregex - when Array - _autopopulate_mac.each do |_addr| - if @macregex.match?(_addr) - _macint = mac_string_to_integer(_addr) - _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} - if !_vendor.nil? - _autopopulate_oui = _vendor[2] - break - end # !_vendor.nil? - end # _addr matches @macregex - end # _autopopulate_mac.each do - end # case statement _autopopulate_mac String vs. Array - end # MAC is populated but OUI is not - - # match/look up manufacturer based on OUI - if !_autopopulate_oui.nil? && !_autopopulate_oui.empty? - - _autopopulate_oui = _autopopulate_oui.first() unless !_autopopulate_oui.is_a?(Array) - - # does it look like a VM or a regular device? - if @vm_namesarray.include?(_autopopulate_oui.downcase) - # looks like this is probably a virtual machine - _autopopulate_manuf = { :name => _autopopulate_oui, - :match => 1.0, - :vm => true, - :id => nil } - - else - # looks like this is not a virtual machine (or we can't tell) so assume its' a regular device - _autopopulate_manuf = @manuf_hash.getset(_autopopulate_oui) { - _fuzzy_matcher = FuzzyStringMatch::JaroWinkler.create( :pure ) - _manufs = Array.new - # fetch the manufacturers to do the comparison. this is a lot of work - # and not terribly fast but once the hash it populated it shouldn't happen too often - _query = { :offset => 0, - :limit => _page_size } - begin - while true do - if (_manufs_response = @netbox_conn.get('dcim/manufacturers/', _query).body) && - _manufs_response.is_a?(Hash) + + if _autopopulate && (_query[:offset] == 0) && !_exception_error && _key_ip&.private? + + # no results found, autopopulate enabled, private-space IP address... + # let's create an entry for this device + + # if MAC is set but OUI is not, do a quick lookup + if (!_autopopulate_mac.nil? && !_autopopulate_mac.empty?) && + (_autopopulate_oui.nil? || _autopopulate_oui.empty?) then - _tmp_manufs = _manufs_response.fetch(:results, []) - _tmp_manufs.each do |_manuf| - _tmp_name = _manuf.fetch(:name, _manuf.fetch(:display, nil)) - _manufs << { :name => _tmp_name, - :id => _manuf.fetch(:id, nil), - :url => _manuf.fetch(:url, nil), - :match => _fuzzy_matcher.getDistance(_tmp_name.to_s.downcase, _autopopulate_oui.to_s.downcase), - :vm => false - } + case _autopopulate_mac + when String + if @macregex.match?(_autopopulate_mac) + _macint = mac_string_to_integer(_autopopulate_mac) + _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} + _autopopulate_oui = _vendor[2] unless _vendor.nil? + end # _autopopulate_mac matches @macregex + when Array + _autopopulate_mac.each do |_addr| + if @macregex.match?(_addr) + _macint = mac_string_to_integer(_addr) + _vendor = @macarray.bsearch{ |_vendormac| (_macint < _vendormac[0]) ? -1 : ((_macint > _vendormac[1]) ? 1 : 0)} + if !_vendor.nil? + _autopopulate_oui = _vendor[2] + break + end # !_vendor.nil? + end # _addr matches @macregex + end # _autopopulate_mac.each do + end # case statement _autopopulate_mac String vs. Array + end # MAC is populated but OUI is not + + # match/look up manufacturer based on OUI + if !_autopopulate_oui.nil? && !_autopopulate_oui.empty? + + _autopopulate_oui = _autopopulate_oui.first() unless !_autopopulate_oui.is_a?(Array) + + # does it look like a VM or a regular device? + if @vm_namesarray.include?(_autopopulate_oui.downcase) + # looks like this is probably a virtual machine + _autopopulate_manuf = { :name => _autopopulate_oui, + :match => 1.0, + :vm => true, + :id => nil } + + else + # looks like this is not a virtual machine (or we can't tell) so assume its' a regular device + _autopopulate_manuf = @manuf_hash.getset(_autopopulate_oui) { + _fuzzy_matcher = FuzzyStringMatch::JaroWinkler.create( :pure ) + _manufs = Array.new + # fetch the manufacturers to do the comparison. this is a lot of work + # and not terribly fast but once the hash it populated it shouldn't happen too often + _query = { :offset => 0, + :limit => _page_size } + begin + while true do + if (_manufs_response = _nb.get('dcim/manufacturers/', _query).body) && + _manufs_response.is_a?(Hash) + then + _tmp_manufs = _manufs_response.fetch(:results, []) + _tmp_manufs.each do |_manuf| + _tmp_name = _manuf.fetch(:name, _manuf.fetch(:display, nil)) + _manufs << { :name => _tmp_name, + :id => _manuf.fetch(:id, nil), + :url => _manuf.fetch(:url, nil), + :match => _fuzzy_matcher.getDistance(_tmp_name.to_s.downcase, _autopopulate_oui.to_s.downcase), + :vm => false + } + end + _query[:offset] += _tmp_manufs.length() + break unless (_tmp_manufs.length() >= _page_size) + else + break + end + end + rescue Faraday::Error + # give up aka do nothing + _exception_error = true + end + # return the manuf with the highest match + !_manufs&.empty? ? _manufs.max_by{|k| k[:match] } : nil + } + end # virtual machine vs. regular device + end # _autopopulate_oui specified + + if !_autopopulate_manuf.is_a?(Hash) + # no match was found at ANY match level (empty database or no OUI specified), set default ("unspecified") manufacturer + _autopopulate_manuf = { :name => _autopopulate_create_manuf ? _autopopulate_oui : _autopopulate_default_manuf, + :match => 0.0, + :vm => false, + :id => nil} + end + + # make sure the site and role exists + + _autopopulate_site = @site_hash.getset(_autopopulate_default_site) { + begin + _site = nil + + # look it up first + _query = { :offset => 0, + :limit => 1, + :name => _autopopulate_default_site } + if (_sites_response = _nb.get('dcim/sites/', _query).body) && + _sites_response.is_a?(Hash) && + (_tmp_sites = _sites_response.fetch(:results, [])) && + (_tmp_sites.length() > 0) + then + _site = _tmp_sites.first + end + + if _site.nil? + # the device site is not found, create it + _site_data = { :name => _autopopulate_default_site, + :slug => _autopopulate_default_site.to_url, + :status => "active" } + if (_site_create_response = _nb.post('dcim/sites/', _site_data.to_json, _nb_headers).body) && + _site_create_response.is_a?(Hash) && + _site_create_response.has_key?(:id) + then + _site = _site_create_response + end + end + + rescue Faraday::Error + # give up aka do nothing + _exception_error = true end - _query[:offset] += _tmp_manufs.length() - break unless (_tmp_manufs.length() >= _page_size) - else - break + _site + } + + _autopopulate_role = @role_hash.getset(_autopopulate_default_role) { + begin + _role = nil + + # look it up first + _query = { :offset => 0, + :limit => 1, + :name => _autopopulate_default_role } + if (_roles_response = _nb.get('dcim/device-roles/', _query).body) && + _roles_response.is_a?(Hash) && + (_tmp_roles = _roles_response.fetch(:results, [])) && + (_tmp_roles.length() > 0) + then + _role = _tmp_roles.first + end + + if _role.nil? + # the role is not found, create it + _role_data = { :name => _autopopulate_default_role, + :slug => _autopopulate_default_role.to_url, + :color => "d3d3d3" } + if (_role_create_response = _nb.post('dcim/device-roles/', _role_data.to_json, _nb_headers).body) && + _role_create_response.is_a?(Hash) && + _role_create_response.has_key?(:id) + then + _role = _role_create_response + end + end + + rescue Faraday::Error + # give up aka do nothing + _exception_error = true + end + _role + } + + # we should have found or created the autopopulate role and site + begin + if _autopopulate_site&.fetch(:id, nil)&.nonzero? && + _autopopulate_role&.fetch(:id, nil)&.nonzero? + then + + if _autopopulate_manuf[:vm] + # a virtual machine + _device_name = _autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{_key}" : "#{_autopopulate_hostname} @ #{_key}" + _device_data = { :name => _device_name, + :site => _autopopulate_site[:id], + :status => "staged" } + if (_device_create_response = _nb.post('virtualization/virtual-machines/', _device_data.to_json, _nb_headers).body) && + _device_create_response.is_a?(Hash) && + _device_create_response.has_key?(:id) + then + _autopopulate_device = _device_create_response + end + + else + # a regular non-vm device + + if !_autopopulate_manuf.fetch(:id, nil)&.nonzero? + # the manufacturer was default (not found) so look it up first + _query = { :offset => 0, + :limit => 1, + :name => _autopopulate_manuf[:name] } + if (_manufs_response = _nb.get('dcim/manufacturers/', _query).body) && + _manufs_response.is_a?(Hash) && + (_tmp_manufs = _manufs_response.fetch(:results, [])) && + (_tmp_manufs.length() > 0) + then + _autopopulate_manuf[:id] = _tmp_manufs.first.fetch(:id, nil) + _autopopulate_manuf[:match] = 1.0 + end + end + + if !_autopopulate_manuf.fetch(:id, nil)&.nonzero? + # the manufacturer is still not found, create it + _manuf_data = { :name => _autopopulate_manuf[:name], + :slug => _autopopulate_manuf[:name].to_url } + if (_manuf_create_response = _nb.post('dcim/manufacturers/', _manuf_data.to_json, _nb_headers).body) && + _manuf_create_response.is_a?(Hash) + then + _autopopulate_manuf[:id] = _manuf_create_response.fetch(:id, nil) + _autopopulate_manuf[:match] = 1.0 + end + end + + # at this point we *must* have the manufacturer ID + if _autopopulate_manuf.fetch(:id, nil)&.nonzero? + + # make sure the desired device type also exists, look it up first + _query = { :offset => 0, + :limit => 1, + :manufacturer_id => _autopopulate_manuf[:id], + :model => _autopopulate_default_dtype } + if (_dtypes_response = _nb.get('dcim/device-types/', _query).body) && + _dtypes_response.is_a?(Hash) && + (_tmp_dtypes = _dtypes_response.fetch(:results, [])) && + (_tmp_dtypes.length() > 0) + then + _autopopulate_dtype = _tmp_dtypes.first + end + + if _autopopulate_dtype.nil? + # the device type is not found, create it + _dtype_data = { :manufacturer => _autopopulate_manuf[:id], + :model => _autopopulate_default_dtype, + :slug => _autopopulate_default_dtype.to_url } + if (_dtype_create_response = _nb.post('dcim/device-types/', _dtype_data.to_json, _nb_headers).body) && + _dtype_create_response.is_a?(Hash) && + _dtype_create_response.has_key?(:id) + then + _autopopulate_dtype = _dtype_create_response + end + end + + # # now we must also have the device type ID + if _autopopulate_dtype&.fetch(:id, nil)&.nonzero? + + # create the device + _device_name = _autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{_key}" : "#{_autopopulate_hostname} @ #{_key}" + _device_data = { :name => _device_name, + :device_type => _autopopulate_dtype[:id], + :role => _autopopulate_role[:id], + :site => _autopopulate_site[:id], + :status => "staged" } + if (_device_create_response = _nb.post('dcim/devices/', _device_data.to_json, _nb_headers).body) && + _device_create_response.is_a?(Hash) && + _device_create_response.has_key?(:id) + then + _autopopulate_device = _device_create_response + end + + end # _autopopulate_dtype[:id] is valid + + end # _autopopulate_manuf[:id] is valid + + end # virtual machine vs. regular device + + end # site and role are valid + + rescue Faraday::Error + # give up aka do nothing + _exception_error = true end - end - rescue Faraday::Error - # give up aka do nothing - _exception_error_general = true - end - # return the manuf with the highest match - !_manufs&.empty? ? _manufs.max_by{|k| k[:match] } : nil - } - end # virtual machine vs. regular device - end # _autopopulate_oui specified - - if !_autopopulate_manuf.is_a?(Hash) - # no match was found at ANY match level (empty database or no OUI specified), set default ("unspecified") manufacturer - _autopopulate_manuf = { :name => _autopopulate_create_manuf ? _autopopulate_oui : _autopopulate_default_manuf, - :match => 0.0, - :vm => false, - :id => nil} - end - - # make sure the site and role exists - - _autopopulate_site = @site_hash.getset(_autopopulate_default_site) { - begin - _site = nil - - # look it up first - _query = { :offset => 0, - :limit => 1, - :name => _autopopulate_default_site } - if (_sites_response = @netbox_conn.get('dcim/sites/', _query).body) && - _sites_response.is_a?(Hash) && - (_tmp_sites = _sites_response.fetch(:results, [])) && - (_tmp_sites.length() > 0) - then - _site = _tmp_sites.first - end - - if _site.nil? - # the device site is not found, create it - _site_data = { :name => _autopopulate_default_site, - :slug => _autopopulate_default_site.to_url, - :status => "active" } - if (_site_create_response = @netbox_conn.post('dcim/sites/', _site_data.to_json, @netbox_headers).body) && - _site_create_response.is_a?(Hash) && - _site_create_response.has_key?(:id) - then - _site = _site_create_response - end - end - - rescue Faraday::Error - # give up aka do nothing - _exception_error_general = true - end - _site - } - - _autopopulate_role = @role_hash.getset(_autopopulate_default_role) { - begin - _role = nil - - # look it up first - _query = { :offset => 0, - :limit => 1, - :name => _autopopulate_default_role } - if (_roles_response = @netbox_conn.get('dcim/device-roles/', _query).body) && - _roles_response.is_a?(Hash) && - (_tmp_roles = _roles_response.fetch(:results, [])) && - (_tmp_roles.length() > 0) - then - _role = _tmp_roles.first - end - - if _role.nil? - # the role is not found, create it - _role_data = { :name => _autopopulate_default_role, - :slug => _autopopulate_default_role.to_url, - :color => "d3d3d3" } - if (_role_create_response = @netbox_conn.post('dcim/device-roles/', _role_data.to_json, @netbox_headers).body) && - _role_create_response.is_a?(Hash) && - _role_create_response.has_key?(:id) - then - _role = _role_create_response - end - end - - rescue Faraday::ConnectionFailed - # give up aka do nothing (and connect next time) - _exception_error_connection = true - rescue Faraday::Error - # give up aka do nothing - _exception_error_general = true - end - _role - } - - # we should have found or created the autopopulate role and site - begin - if _autopopulate_site&.fetch(:id, nil)&.nonzero? && - _autopopulate_role&.fetch(:id, nil)&.nonzero? - then - - if _autopopulate_manuf[:vm] - # a virtual machine - _device_name = _autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{_key}" : "#{_autopopulate_hostname} @ #{_key}" - _device_data = { :name => _device_name, - :site => _autopopulate_site[:id], - :status => "staged" } - if (_device_create_response = @netbox_conn.post('virtualization/virtual-machines/', _device_data.to_json, @netbox_headers).body) && - _device_create_response.is_a?(Hash) && - _device_create_response.has_key?(:id) - then - _autopopulate_device = _device_create_response - end - else - # a regular non-vm device - - if !_autopopulate_manuf.fetch(:id, nil)&.nonzero? - # the manufacturer was default (not found) so look it up first - _query = { :offset => 0, - :limit => 1, - :name => _autopopulate_manuf[:name] } - if (_manufs_response = @netbox_conn.get('dcim/manufacturers/', _query).body) && - _manufs_response.is_a?(Hash) && - (_tmp_manufs = _manufs_response.fetch(:results, [])) && - (_tmp_manufs.length() > 0) - then - _autopopulate_manuf[:id] = _tmp_manufs.first.fetch(:id, nil) - _autopopulate_manuf[:match] = 1.0 + if !_autopopulate_device.nil? + # we created a device, so send it back out as the result for the event as well + _devices << { :name => _autopopulate_device&.fetch(:name, _autopopulate_device&.fetch(:display, nil)), + :id => _autopopulate_device&.fetch(:id, nil), + :url => _autopopulate_device&.fetch(:url, nil), + :site => _autopopulate_site&.fetch(:name, nil), + :role => _autopopulate_role&.fetch(:name, nil), + :device_type => _autopopulate_dtype&.fetch(:name, nil), + :manufacturer => _autopopulate_manuf&.fetch(:name, nil), + :details => _verbose ? _autopopulate_device : nil } + end # _autopopulate_device was not nil (i.e., we autocreated a device) + + end # _autopopulate turned on and no results found + + _devices = collect_values(crush(_devices)) + _devices.fetch(:service, [])&.flatten!&.uniq! + _lookup_result = _devices + end # _lookup_type == :ip_device + + # this || is because we are going to need to do the VRF lookup if we're autopopulating + # as well as if we're specifically requested to do that enrichment + + if (_lookup_type == :ip_vrf) || !_autopopulate_device.nil? + ################################################################################# + # retrieve the list VRFs containing IP address prefixes containing the search key + _vrfs = Array.new + _query = { :contains => _key, + :offset => 0, + :limit => _page_size } + _query[:site_n] = _lookup_site unless _lookup_site.nil? || _lookup_site.empty? + begin + while true do + if (_prefixes_response = _nb.get('ipam/prefixes/', _query).body) && + _prefixes_response.is_a?(Hash) + then + _tmp_prefixes = _prefixes_response.fetch(:results, []) + _tmp_prefixes.each do |p| + if (_vrf = p.fetch(:vrf, nil)) + # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } + # if _verbose, include entire object as :details + _vrfs << { :name => _vrf.fetch(:name, _vrf.fetch(:display, nil)), + :id => _vrf.fetch(:id, nil), + :site => ((_site = p.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil), + :tenant => ((_tenant = p.fetch(:tenant, nil)) && _tenant&.has_key?(:name)) ? _tenant[:name] : _tenant&.fetch(:display, nil), + :url => p.fetch(:url, _vrf.fetch(:url, nil)), + :details => _verbose ? _vrf.merge({:prefix => p.tap { |h| h.delete(:vrf) }}) : nil } + end + end + _query[:offset] += _tmp_prefixes.length() + break unless (_tmp_prefixes.length() >= _page_size) + else + break + end + end + rescue Faraday::Error + # give up aka do nothing + _exception_error = true end - end - - if !_autopopulate_manuf.fetch(:id, nil)&.nonzero? - # the manufacturer is still not found, create it - _manuf_data = { :name => _autopopulate_manuf[:name], - :slug => _autopopulate_manuf[:name].to_url } - if (_manuf_create_response = @netbox_conn.post('dcim/manufacturers/', _manuf_data.to_json, @netbox_headers).body) && - _manuf_create_response.is_a?(Hash) - then - _autopopulate_manuf[:id] = _manuf_create_response.fetch(:id, nil) - _autopopulate_manuf[:match] = 1.0 + _vrfs = collect_values(crush(_vrfs)) + _lookup_result = _vrfs unless (_lookup_type != :ip_vrf) + end # _lookup_type == :ip_vrf + + if !_autopopulate_device.nil? && _autopopulate_device.fetch(:id, nil)&.nonzero? + # device has been created, we need to create an interface for it + _interface_data = { _autopopulate_manuf[:vm] ? :virtual_machine : :device => _autopopulate_device[:id], + :name => "e0", + :type => "other" } + if !_autopopulate_mac.nil? && !_autopopulate_mac.empty? + _interface_data[:mac_address] = _autopopulate_mac.is_a?(Array) ? _autopopulate_mac.first : _autopopulate_mac end - end - - # at this point we *must* have the manufacturer ID - if _autopopulate_manuf.fetch(:id, nil)&.nonzero? - - # make sure the desired device type also exists, look it up first - _query = { :offset => 0, - :limit => 1, - :manufacturer_id => _autopopulate_manuf[:id], - :model => _autopopulate_default_dtype } - if (_dtypes_response = @netbox_conn.get('dcim/device-types/', _query).body) && - _dtypes_response.is_a?(Hash) && - (_tmp_dtypes = _dtypes_response.fetch(:results, [])) && - (_tmp_dtypes.length() > 0) + if !_vrfs.nil? && !_vrfs.empty? + _interface_data[:vrf] = _vrfs.fetch(:id, []).first + end + if (_interface_create_reponse = _nb.post(_autopopulate_manuf[:vm] ? 'virtualization/interfaces/' : 'dcim/interfaces/', _interface_data.to_json, _nb_headers).body) && + _interface_create_reponse.is_a?(Hash) && + _interface_create_reponse.has_key?(:id) then - _autopopulate_dtype = _tmp_dtypes.first + _autopopulate_interface = _interface_create_reponse end - if _autopopulate_dtype.nil? - # the device type is not found, create it - _dtype_data = { :manufacturer => _autopopulate_manuf[:id], - :model => _autopopulate_default_dtype, - :slug => _autopopulate_default_dtype.to_url } - if (_dtype_create_response = @netbox_conn.post('dcim/device-types/', _dtype_data.to_json, @netbox_headers).body) && - _dtype_create_response.is_a?(Hash) && - _dtype_create_response.has_key?(:id) + if !_autopopulate_interface.nil? && _autopopulate_interface.fetch(:id, nil)&.nonzero? + # interface has been created, we need to create an IP address for it + _ip_data = { :address => "#{_key}/#{_key_ip&.prefix()}", + :assigned_object_type => _autopopulate_manuf[:vm] ? "virtualization.vminterface" : "dcim.interface", + :assigned_object_id => _autopopulate_interface[:id], + :status => "active" } + if (_vrf = _autopopulate_interface.fetch(:vrf, nil)) && + (_vrf.has_key?(:id)) then - _autopopulate_dtype = _dtype_create_response + _ip_data[:vrf] = _vrf[:id] end - end - - # # now we must also have the device type ID - if _autopopulate_dtype&.fetch(:id, nil)&.nonzero? - - # create the device - _device_name = _autopopulate_hostname.to_s.empty? ? "#{_autopopulate_manuf[:name]} @ #{_key}" : "#{_autopopulate_hostname} @ #{_key}" - _device_data = { :name => _device_name, - :device_type => _autopopulate_dtype[:id], - :role => _autopopulate_role[:id], - :site => _autopopulate_site[:id], - :status => "staged" } - if (_device_create_response = @netbox_conn.post('dcim/devices/', _device_data.to_json, @netbox_headers).body) && - _device_create_response.is_a?(Hash) && - _device_create_response.has_key?(:id) + if (_ip_create_reponse = _nb.post('ipam/ip-addresses/', _ip_data.to_json, _nb_headers).body) && + _ip_create_reponse.is_a?(Hash) && + _ip_create_reponse.has_key?(:id) then - _autopopulate_device = _device_create_response + _autopopulate_ip = _ip_create_reponse end + end # check if interface was created and has ID + + if !_autopopulate_ip.nil? && _autopopulate_ip.fetch(:id, nil)&.nonzero? + # IP address was created, need to associate it as the primary IP for the device + _primary_ip_data = { _key_ip&.ipv6? ? :primary_ip6 : :primary_ip4 => _autopopulate_ip[:id] } + if (_ip_primary_reponse = _nb.patch("#{_autopopulate_manuf[:vm] ? 'virtualization/virtual-machines' : 'dcim/devices'}/#{_autopopulate_device[:id]}/", _primary_ip_data.to_json, _nb_headers).body) && + _ip_primary_reponse.is_a?(Hash) && + _ip_primary_reponse.has_key?(:id) + then + _autopopulate_device = _ip_create_reponse + end + end # check if the IP address was created and has an ID - end # _autopopulate_dtype[:id] is valid - - end # _autopopulate_manuf[:id] is valid - - end # virtual machine vs. regular device - - end # site and role are valid - - rescue Faraday::Error - # give up aka do nothing - _exception_error_general = true - end - - if !_autopopulate_device.nil? - # we created a device, so send it back out as the result for the event as well - _devices << { :name => _autopopulate_device&.fetch(:name, _autopopulate_device&.fetch(:display, nil)), - :id => _autopopulate_device&.fetch(:id, nil), - :url => _autopopulate_device&.fetch(:url, nil), - :site => _autopopulate_site&.fetch(:name, nil), - :role => _autopopulate_role&.fetch(:name, nil), - :device_type => _autopopulate_dtype&.fetch(:name, nil), - :manufacturer => _autopopulate_manuf&.fetch(:name, nil), - :details => _verbose ? _autopopulate_device : nil } - end # _autopopulate_device was not nil (i.e., we autocreated a device) - - end # _autopopulate turned on and no results found - - _devices = collect_values(crush(_devices)) - _devices.fetch(:service, [])&.flatten!&.uniq! - _result = _devices - end # _lookup_type == :ip_device - - # this || is because we are going to need to do the VRF lookup if we're autopopulating - # as well as if we're specifically requested to do that enrichment - - if (_lookup_type == :ip_vrf) || !_autopopulate_device.nil? - ################################################################################# - # retrieve the list VRFs containing IP address prefixes containing the search key - _vrfs = Array.new - _query = { :contains => _key, - :offset => 0, - :limit => _page_size } - _query[:site_n] = _lookup_site unless _lookup_site.nil? || _lookup_site.empty? - begin - while true do - if (_prefixes_response = @netbox_conn.get('ipam/prefixes/', _query).body) && - _prefixes_response.is_a?(Hash) - then - _tmp_prefixes = _prefixes_response.fetch(:results, []) - _tmp_prefixes.each do |p| - if (_vrf = p.fetch(:vrf, nil)) - # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } - # if _verbose, include entire object as :details - _vrfs << { :name => _vrf.fetch(:name, _vrf.fetch(:display, nil)), - :id => _vrf.fetch(:id, nil), - :site => ((_site = p.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil), - :tenant => ((_tenant = p.fetch(:tenant, nil)) && _tenant&.has_key?(:name)) ? _tenant[:name] : _tenant&.fetch(:display, nil), - :url => p.fetch(:url, _vrf.fetch(:url, nil)), - :details => _verbose ? _vrf.merge({:prefix => p.tap { |h| h.delete(:vrf) }}) : nil } - end - end - _query[:offset] += _tmp_prefixes.length() - break unless (_tmp_prefixes.length() >= _page_size) - else - break - end - end - rescue Faraday::Error - # give up aka do nothing - _exception_error_general = true - end - _vrfs = collect_values(crush(_vrfs)) - _result = _vrfs unless (_lookup_type != :ip_vrf) - end # _lookup_type == :ip_vrf - - if !_autopopulate_device.nil? && _autopopulate_device.fetch(:id, nil)&.nonzero? - # device has been created, we need to create an interface for it - _interface_data = { _autopopulate_manuf[:vm] ? :virtual_machine : :device => _autopopulate_device[:id], - :name => "e0", - :type => "other" } - if !_autopopulate_mac.nil? && !_autopopulate_mac.empty? - _interface_data[:mac_address] = _autopopulate_mac.is_a?(Array) ? _autopopulate_mac.first : _autopopulate_mac - end - if !_vrfs.nil? && !_vrfs.empty? - _interface_data[:vrf] = _vrfs.fetch(:id, []).first - end - if (_interface_create_reponse = @netbox_conn.post(_autopopulate_manuf[:vm] ? 'virtualization/interfaces/' : 'dcim/interfaces/', _interface_data.to_json, @netbox_headers).body) && - _interface_create_reponse.is_a?(Hash) && - _interface_create_reponse.has_key?(:id) - then - _autopopulate_interface = _interface_create_reponse - end - - if !_autopopulate_interface.nil? && _autopopulate_interface.fetch(:id, nil)&.nonzero? - # interface has been created, we need to create an IP address for it - _ip_data = { :address => "#{_key}/#{_key_ip&.prefix()}", - :assigned_object_type => _autopopulate_manuf[:vm] ? "virtualization.vminterface" : "dcim.interface", - :assigned_object_id => _autopopulate_interface[:id], - :status => "active" } - if (_vrf = _autopopulate_interface.fetch(:vrf, nil)) && - (_vrf.has_key?(:id)) - then - _ip_data[:vrf] = _vrf[:id] - end - if (_ip_create_reponse = @netbox_conn.post('ipam/ip-addresses/', _ip_data.to_json, @netbox_headers).body) && - _ip_create_reponse.is_a?(Hash) && - _ip_create_reponse.has_key?(:id) - then - _autopopulate_ip = _ip_create_reponse - end - end # check if interface was created and has ID - - if !_autopopulate_ip.nil? && _autopopulate_ip.fetch(:id, nil)&.nonzero? - # IP address was created, need to associate it as the primary IP for the device - _primary_ip_data = { _key_ip&.ipv6? ? :primary_ip6 : :primary_ip4 => _autopopulate_ip[:id] } - if (_ip_primary_reponse = @netbox_conn.patch("#{_autopopulate_manuf[:vm] ? 'virtualization/virtual-machines' : 'dcim/devices'}/#{_autopopulate_device[:id]}/", _primary_ip_data.to_json, @netbox_headers).body) && - _ip_primary_reponse.is_a?(Hash) && - _ip_primary_reponse.has_key?(:id) - then - _autopopulate_device = _ip_create_reponse - end - end # check if the IP address was created and has an ID - - end # check if device was created and has ID - - if _exception_error_connection && !@netbox_conn_resetting - @netbox_conn_lock.release_read_lock - @netbox_conn_lock.acquire_write_lock - begin - if !@netbox_conn_resetting - @netbox_conn_needs_reset = true - end - ensure - @netbox_conn_lock.release_write_lock - @netbox_conn_lock.acquire_read_lock - end - end - ensure - @netbox_conn_lock.release_read_lock - end + end # check if device was created and has ID + + # yield return value for cache_hash getset + _lookup_result + } if !_result.nil? && _result.has_key?(:url) && !_result[:url]&.empty? _result[:url].map! { |u| u.delete_prefix(@netbox_url_base).gsub('/api/', '/') } From 12b4f57bef273bc132b993f8d56a5c8aa5af6fc1 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Fri, 27 Oct 2023 10:26:34 -0600 Subject: [PATCH 11/82] bump Werkzeug to v3.0.1 as patch for DoS 'High resource usage when parsing multipart/form-data containing a large part with CR/LF character at the beginning' --- sensor-iso/interface/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sensor-iso/interface/requirements.txt b/sensor-iso/interface/requirements.txt index d1b71390d..da47d54c9 100644 --- a/sensor-iso/interface/requirements.txt +++ b/sensor-iso/interface/requirements.txt @@ -13,4 +13,4 @@ python-dotenv==1.0.0 requests==2.31.0 six==1.16.0 urllib3==1.26.18 -Werkzeug==2.3.3 +Werkzeug==3.0.1 From 86a41d05e022e8244870a60e0c4a8578df7fef7f Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Fri, 27 Oct 2023 11:23:56 -0600 Subject: [PATCH 12/82] allow specifying capture parameters when orchmode is kubernetes --- scripts/install.py | 63 ++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/scripts/install.py b/scripts/install.py index ad3b6b022..890225a1f 100755 --- a/scripts/install.py +++ b/scripts/install.py @@ -1288,41 +1288,38 @@ def tweak_malcolm_runtime(self, malcolm_install_path): else 'unset' ) - if self.orchMode is OrchestrationFramework.DOCKER_COMPOSE: - captureOptions = ('no', 'yes', 'customize') - loopBreaker = CountUntilException(MaxAskForValueCount) - while captureSelection not in [x[0] for x in captureOptions] and loopBreaker.increment(): - captureSelection = InstallerChooseOne( - 'Should Malcolm capture live network traffic?', - choices=[(x, '', x == captureOptions[0]) for x in captureOptions], - )[0] - if captureSelection == 'y': - pcapNetSniff = True - liveSuricata = True - liveZeek = True - elif captureSelection == 'c': - if InstallerYesOrNo( - 'Should Malcolm capture live network traffic to PCAP files for analysis with Arkime?', - default=args.pcapNetSniff or args.pcapTcpDump or (malcolmProfile == PROFILE_HEDGEHOG), - ): - pcapNetSniff = InstallerYesOrNo('Capture packets using netsniff-ng?', default=args.pcapNetSniff) - if not pcapNetSniff: - pcapTcpDump = InstallerYesOrNo('Capture packets using tcpdump?', default=args.pcapTcpDump) - liveSuricata = InstallerYesOrNo( - 'Should Malcolm analyze live network traffic with Suricata?', default=args.liveSuricata + captureOptions = ('no', 'yes', 'customize') + loopBreaker = CountUntilException(MaxAskForValueCount) + while captureSelection not in [x[0] for x in captureOptions] and loopBreaker.increment(): + captureSelection = InstallerChooseOne( + 'Should Malcolm capture live network traffic?', + choices=[(x, '', x == captureOptions[0]) for x in captureOptions], + )[0] + if captureSelection == 'y': + pcapNetSniff = True + liveSuricata = True + liveZeek = True + elif captureSelection == 'c': + if InstallerYesOrNo( + 'Should Malcolm capture live network traffic to PCAP files for analysis with Arkime?', + default=args.pcapNetSniff or args.pcapTcpDump or (malcolmProfile == PROFILE_HEDGEHOG), + ): + pcapNetSniff = InstallerYesOrNo('Capture packets using netsniff-ng?', default=args.pcapNetSniff) + if not pcapNetSniff: + pcapTcpDump = InstallerYesOrNo('Capture packets using tcpdump?', default=args.pcapTcpDump) + liveSuricata = InstallerYesOrNo( + 'Should Malcolm analyze live network traffic with Suricata?', default=args.liveSuricata + ) + liveZeek = InstallerYesOrNo('Should Malcolm analyze live network traffic with Zeek?', default=args.liveZeek) + if pcapNetSniff or pcapTcpDump or liveZeek or liveSuricata: + pcapFilter = InstallerAskForString( + 'Capture filter (tcpdump-like filter expression; leave blank to capture all traffic)', + default=args.pcapFilter, ) - liveZeek = InstallerYesOrNo( - 'Should Malcolm analyze live network traffic with Zeek?', default=args.liveZeek + tweakIface = InstallerYesOrNo( + 'Disable capture interface hardware offloading and adjust ring buffer sizes?', + default=args.tweakIface, ) - if pcapNetSniff or pcapTcpDump or liveZeek or liveSuricata: - pcapFilter = InstallerAskForString( - 'Capture filter (tcpdump-like filter expression; leave blank to capture all traffic)', - default=args.pcapFilter, - ) - tweakIface = InstallerYesOrNo( - 'Disable capture interface hardware offloading and adjust ring buffer sizes?', - default=args.tweakIface, - ) if pcapNetSniff or pcapTcpDump or liveZeek or liveSuricata: pcapIface = '' From b4f8d4436171ea51940e4b8af0cd61e6ba67f886 Mon Sep 17 00:00:00 2001 From: SG Date: Mon, 30 Oct 2023 13:54:22 -0600 Subject: [PATCH 13/82] Update Zeek to v6.1.0 --- Dockerfiles/zeek.Dockerfile | 10 ++++++---- config/zeek.env.example | 1 + .../includes.chroot/usr/local/etc/zeek/local.zeek | 7 ++++++- sensor-iso/interface/sensor_ctl/control_vars.conf | 1 + sensor-iso/zeek/build-zeek-deb.sh | 2 +- shared/bin/zeek_install_plugins.sh | 7 +++---- zeek/config/local.zeek | 7 ++++++- 7 files changed, 24 insertions(+), 11 deletions(-) diff --git a/Dockerfiles/zeek.Dockerfile b/Dockerfiles/zeek.Dockerfile index 0dbad75e9..b9e43d863 100644 --- a/Dockerfiles/zeek.Dockerfile +++ b/Dockerfiles/zeek.Dockerfile @@ -4,7 +4,7 @@ ENV DEBIAN_FRONTEND noninteractive ENV TERM xterm # for build -ARG ZEEK_VERSION=6.0.1 +ARG ZEEK_VERSION=6.1.0 ENV ZEEK_VERSION $ZEEK_VERSION ARG ZEEK_DBG=0 ENV ZEEK_DBG $ZEEK_DBG @@ -101,7 +101,7 @@ ENV SUPERCRONIC_SHA1SUM "7dadd4ac827e7bd60b386414dfefc898ae5b6c63" ENV SUPERCRONIC_CRONTAB "/etc/crontab" # for download and install -ARG ZEEK_VERSION=6.0.0 +ARG ZEEK_VERSION=6.1.0 ENV ZEEK_VERSION $ZEEK_VERSION # put Zeek and Spicy in PATH @@ -226,8 +226,8 @@ ADD shared/bin/nic-capture-setup.sh /usr/local/bin/ # sanity checks to make sure the plugins installed and copied over correctly # these ENVs should match the number of third party scripts/plugins installed by zeek_install_plugins.sh -ENV ZEEK_THIRD_PARTY_PLUGINS_COUNT 23 -ENV ZEEK_THIRD_PARTY_PLUGINS_GREP "(Zeek::Spicy|ANALYZER_SPICY_DHCP|ANALYZER_SPICY_DNS|ANALYZER_SPICY_HTTP|ANALYZER_SPICY_OSPF|ANALYZER_SPICY_OPENVPN_UDP\b|ANALYZER_SPICY_IPSEC_UDP\b|ANALYZER_SPICY_TFTP|ANALYZER_SPICY_WIREGUARD|ANALYZER_SPICY_LDAP_TCP|ANALYZER_SPICY_SYNCHROPHASOR_TCP|ANALYZER_SPICY_GENISYS_TCP|ANALYZER_S7COMM_TCP|Corelight::CommunityID|Corelight::PE_XOR|ICSNPP::BACnet|ICSNPP::BSAP|ICSNPP::ENIP|ICSNPP::ETHERCAT|ICSNPP::OPCUA_Binary|Salesforce::GQUIC|Zeek::PROFINET|Zeek::TDS)" +ENV ZEEK_THIRD_PARTY_PLUGINS_COUNT 22 +ENV ZEEK_THIRD_PARTY_PLUGINS_GREP "(Zeek::Spicy|ANALYZER_SPICY_DHCP|ANALYZER_SPICY_DNS|ANALYZER_SPICY_HTTP|ANALYZER_SPICY_OSPF|ANALYZER_SPICY_OPENVPN_UDP\b|ANALYZER_SPICY_IPSEC_UDP\b|ANALYZER_SPICY_TFTP|ANALYZER_SPICY_WIREGUARD|ANALYZER_SPICY_SYNCHROPHASOR_TCP|ANALYZER_SPICY_GENISYS_TCP|ANALYZER_S7COMM_TCP|Corelight::CommunityID|Corelight::PE_XOR|ICSNPP::BACnet|ICSNPP::BSAP|ICSNPP::ENIP|ICSNPP::ETHERCAT|ICSNPP::OPCUA_Binary|Salesforce::GQUIC|Zeek::PROFINET|Zeek::TDS)" ENV ZEEK_THIRD_PARTY_SCRIPTS_COUNT 25 ENV ZEEK_THIRD_PARTY_SCRIPTS_GREP "(bro-is-darknet/main|bro-simple-scan/scan|bzar/main|callstranger-detector/callstranger|cve-2020-0601/cve-2020-0601|cve-2020-13777/cve-2020-13777|CVE-2020-16898/CVE-2020-16898|CVE-2021-38647/omigod|CVE-2021-31166/detect|CVE-2021-41773/CVE_2021_41773|CVE-2021-42292/main|cve-2021-44228/CVE_2021_44228|cve-2022-22954/main|cve-2022-26809/main|CVE-2022-3602/__load__|hassh/hassh|http-more-files-names/main|ja3/ja3|pingback/detect|ripple20/ripple20|SIGRed/CVE-2020-1350|zeek-EternalSafety/main|zeek-httpattacks/main|zeek-sniffpass/__load__|zerologon/main)\.(zeek|bro)" @@ -309,6 +309,7 @@ ARG ZEEK_DISABLE_SPICY_HTTP=true ARG ZEEK_DISABLE_SPICY_IPSEC= ARG ZEEK_DISABLE_SPICY_LDAP= ARG ZEEK_DISABLE_SPICY_OPENVPN= +ARG ZEEK_DISABLE_SPICY_QUIC=true ARG ZEEK_DISABLE_SPICY_STUN= ARG ZEEK_DISABLE_SPICY_TAILSCALE= ARG ZEEK_DISABLE_SPICY_TFTP= @@ -327,6 +328,7 @@ ENV ZEEK_DISABLE_SPICY_HTTP $ZEEK_DISABLE_SPICY_HTTP ENV ZEEK_DISABLE_SPICY_IPSEC $ZEEK_DISABLE_SPICY_IPSEC ENV ZEEK_DISABLE_SPICY_LDAP $ZEEK_DISABLE_SPICY_LDAP ENV ZEEK_DISABLE_SPICY_OPENVPN $ZEEK_DISABLE_SPICY_OPENVPN +ENV ZEEK_DISABLE_SPICY_QUIC $ZEEK_DISABLE_SPICY_QUIC ENV ZEEK_DISABLE_SPICY_STUN $ZEEK_DISABLE_SPICY_STUN ENV ZEEK_DISABLE_SPICY_TAILSCALE $ZEEK_DISABLE_SPICY_TAILSCALE ENV ZEEK_DISABLE_SPICY_TFTP $ZEEK_DISABLE_SPICY_TFTP diff --git a/config/zeek.env.example b/config/zeek.env.example index e676366df..795c37583 100644 --- a/config/zeek.env.example +++ b/config/zeek.env.example @@ -56,6 +56,7 @@ ZEEK_DISABLE_SPICY_HTTP=true ZEEK_DISABLE_SPICY_IPSEC= ZEEK_DISABLE_SPICY_LDAP= ZEEK_DISABLE_SPICY_OPENVPN= +ZEEK_DISABLE_SPICY_QUIC=true ZEEK_DISABLE_SPICY_STUN= ZEEK_DISABLE_SPICY_TAILSCALE= ZEEK_DISABLE_SPICY_TFTP= diff --git a/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek b/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek index cc8674860..500612e7b 100644 --- a/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek +++ b/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek @@ -20,6 +20,7 @@ global disable_spicy_http = (getenv("ZEEK_DISABLE_SPICY_HTTP") == "") ? F : T; global disable_spicy_ipsec = (getenv("ZEEK_DISABLE_SPICY_IPSEC") == "") ? F : T; global disable_spicy_ldap = (getenv("ZEEK_DISABLE_SPICY_LDAP") == "") ? F : T; global disable_spicy_openvpn = (getenv("ZEEK_DISABLE_SPICY_OPENVPN") == "") ? F : T; +global disable_spicy_quic = (getenv("ZEEK_DISABLE_SPICY_QUIC") == "") ? F : T; global disable_spicy_stun = (getenv("ZEEK_DISABLE_SPICY_STUN") == "") ? F : T; global disable_spicy_tailscale = (getenv("ZEEK_DISABLE_SPICY_TAILSCALE") == "") ? F : T; global disable_spicy_tftp = (getenv("ZEEK_DISABLE_SPICY_TFTP") == "") ? F : T; @@ -141,7 +142,8 @@ event zeek_init() &priority=-5 { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_IPSEC_IKE_UDP); } if (disable_spicy_ldap) { - Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_LDAP_TCP); + Analyzer::disable_analyzer(Analyzer::ANALYZER_LDAP_TCP); + Analyzer::disable_analyzer(Analyzer::ANALYZER_LDAP_UDP); } if (disable_spicy_openvpn) { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_OPENVPN_TCP); @@ -155,6 +157,9 @@ event zeek_init() &priority=-5 { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_OPENVPN_UDP_HMAC_SHA256); Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_OPENVPN_UDP_HMAC_SHA512); } + if (disable_spicy_quic) { + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_QUIC); + } if (disable_spicy_stun) { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_STUN); Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_STUN_TCP); diff --git a/sensor-iso/interface/sensor_ctl/control_vars.conf b/sensor-iso/interface/sensor_ctl/control_vars.conf index e0773a666..255a2d22d 100644 --- a/sensor-iso/interface/sensor_ctl/control_vars.conf +++ b/sensor-iso/interface/sensor_ctl/control_vars.conf @@ -62,6 +62,7 @@ export ZEEK_DISABLE_SPICY_HTTP=true export ZEEK_DISABLE_SPICY_IPSEC= export ZEEK_DISABLE_SPICY_LDAP= export ZEEK_DISABLE_SPICY_OPENVPN= +export ZEEK_DISABLE_SPICY_QUIC=true export ZEEK_DISABLE_SPICY_STUN= export ZEEK_DISABLE_SPICY_TAILSCALE= export ZEEK_DISABLE_SPICY_TFTP= diff --git a/sensor-iso/zeek/build-zeek-deb.sh b/sensor-iso/zeek/build-zeek-deb.sh index be76ad437..4be53b1b8 100755 --- a/sensor-iso/zeek/build-zeek-deb.sh +++ b/sensor-iso/zeek/build-zeek-deb.sh @@ -13,7 +13,7 @@ export PYTHONDONTWRITEBYTECODE=1 export PYTHONUNBUFFERED=1 ZEEK_URL=https://github.com/zeek/zeek.git -ZEEK_VERSION=6.0.1 +ZEEK_VERSION=6.1.0 ZEEK_DIR=/opt/zeek BUILD_JOBS=0 OUTPUT_DIR=/tmp diff --git a/shared/bin/zeek_install_plugins.sh b/shared/bin/zeek_install_plugins.sh index 4931d874b..682b41be5 100755 --- a/shared/bin/zeek_install_plugins.sh +++ b/shared/bin/zeek_install_plugins.sh @@ -98,10 +98,10 @@ ZKG_GITHUB_URLS=( "https://github.com/corelight/zeek-spicy-ospf" "https://github.com/corelight/zeek-spicy-stun" "https://github.com/corelight/zeek-spicy-wireguard" - "https://github.com/corelight/zeek-xor-exe-plugin" + "https://github.com/mmguero-dev/zeek-xor-exe-plugin" "https://github.com/corelight/zerologon" - "https://github.com/cybera/zeek-sniffpass" - "https://github.com/mitre-attack/bzar" + "https://github.com/mmguero-dev/zeek-sniffpass" + "https://github.com/mmguero-dev/bzar" "https://github.com/ncsa/bro-is-darknet" "https://github.com/ncsa/bro-simple-scan" "https://github.com/precurse/zeek-httpattacks" @@ -111,7 +111,6 @@ ZKG_GITHUB_URLS=( "https://github.com/zeek/spicy-dhcp" "https://github.com/zeek/spicy-dns" "https://github.com/zeek/spicy-http" - "https://github.com/zeek/spicy-ldap" "https://github.com/zeek/spicy-pe" "https://github.com/zeek/spicy-tftp" "https://github.com/zeek/spicy-zip" diff --git a/zeek/config/local.zeek b/zeek/config/local.zeek index 4450fe3ad..af8208318 100644 --- a/zeek/config/local.zeek +++ b/zeek/config/local.zeek @@ -20,6 +20,7 @@ global disable_spicy_http = (getenv("ZEEK_DISABLE_SPICY_HTTP") == "") ? F : T; global disable_spicy_ipsec = (getenv("ZEEK_DISABLE_SPICY_IPSEC") == "") ? F : T; global disable_spicy_ldap = (getenv("ZEEK_DISABLE_SPICY_LDAP") == "") ? F : T; global disable_spicy_openvpn = (getenv("ZEEK_DISABLE_SPICY_OPENVPN") == "") ? F : T; +global disable_spicy_quic = (getenv("ZEEK_DISABLE_SPICY_QUIC") == "") ? F : T; global disable_spicy_stun = (getenv("ZEEK_DISABLE_SPICY_STUN") == "") ? F : T; global disable_spicy_tailscale = (getenv("ZEEK_DISABLE_SPICY_TAILSCALE") == "") ? F : T; global disable_spicy_tftp = (getenv("ZEEK_DISABLE_SPICY_TFTP") == "") ? F : T; @@ -141,7 +142,8 @@ event zeek_init() &priority=-5 { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_IPSEC_IKE_UDP); } if (disable_spicy_ldap) { - Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_LDAP_TCP); + Analyzer::disable_analyzer(Analyzer::ANALYZER_LDAP_TCP); + Analyzer::disable_analyzer(Analyzer::ANALYZER_LDAP_UDP); } if (disable_spicy_openvpn) { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_OPENVPN_TCP); @@ -155,6 +157,9 @@ event zeek_init() &priority=-5 { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_OPENVPN_UDP_HMAC_SHA256); Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_OPENVPN_UDP_HMAC_SHA512); } + if (disable_spicy_quic) { + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_QUIC); + } if (disable_spicy_stun) { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_STUN); Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_STUN_TCP); From ed99af51fb1cd4dc1717d024f0ed5a673fc3a410 Mon Sep 17 00:00:00 2001 From: SG Date: Mon, 30 Oct 2023 15:05:40 -0600 Subject: [PATCH 14/82] point downstream to mmguero-dev/icsnpp-modbus until https://github.com/cisagov/icsnpp-modbus/pull/8 is pulled --- shared/bin/zeek_install_plugins.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared/bin/zeek_install_plugins.sh b/shared/bin/zeek_install_plugins.sh index 682b41be5..4d56ba8b3 100755 --- a/shared/bin/zeek_install_plugins.sh +++ b/shared/bin/zeek_install_plugins.sh @@ -74,7 +74,7 @@ ZKG_GITHUB_URLS=( "https://github.com/cisagov/icsnpp-enip" "https://github.com/cisagov/icsnpp-ethercat" "https://github.com/cisagov/icsnpp-genisys" - "https://github.com/cisagov/icsnpp-modbus" + "https://github.com/mmguero-dev/icsnpp-modbus" "https://github.com/cisagov/icsnpp-opcua-binary" "https://github.com/cisagov/icsnpp-s7comm" "https://github.com/cisagov/icsnpp-synchrophasor" From da45848d7c1c89a3a1b272b6d976a07cf52884b6 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Mon, 30 Oct 2023 21:41:08 -0600 Subject: [PATCH 15/82] Working on zeek v6.1.0 build --- Dockerfiles/zeek.Dockerfile | 2 +- .../config/includes.chroot/usr/local/etc/zeek/local.zeek | 6 +++--- shared/bin/zeek_install_plugins.sh | 4 ++-- zeek/config/local.zeek | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Dockerfiles/zeek.Dockerfile b/Dockerfiles/zeek.Dockerfile index b9e43d863..248ae4789 100644 --- a/Dockerfiles/zeek.Dockerfile +++ b/Dockerfiles/zeek.Dockerfile @@ -227,7 +227,7 @@ ADD shared/bin/nic-capture-setup.sh /usr/local/bin/ # sanity checks to make sure the plugins installed and copied over correctly # these ENVs should match the number of third party scripts/plugins installed by zeek_install_plugins.sh ENV ZEEK_THIRD_PARTY_PLUGINS_COUNT 22 -ENV ZEEK_THIRD_PARTY_PLUGINS_GREP "(Zeek::Spicy|ANALYZER_SPICY_DHCP|ANALYZER_SPICY_DNS|ANALYZER_SPICY_HTTP|ANALYZER_SPICY_OSPF|ANALYZER_SPICY_OPENVPN_UDP\b|ANALYZER_SPICY_IPSEC_UDP\b|ANALYZER_SPICY_TFTP|ANALYZER_SPICY_WIREGUARD|ANALYZER_SPICY_SYNCHROPHASOR_TCP|ANALYZER_SPICY_GENISYS_TCP|ANALYZER_S7COMM_TCP|Corelight::CommunityID|Corelight::PE_XOR|ICSNPP::BACnet|ICSNPP::BSAP|ICSNPP::ENIP|ICSNPP::ETHERCAT|ICSNPP::OPCUA_Binary|Salesforce::GQUIC|Zeek::PROFINET|Zeek::TDS)" +ENV ZEEK_THIRD_PARTY_PLUGINS_GREP "(Zeek::Spicy|ANALYZER_SPICY_DHCP|ANALYZER_SPICY_DNS|ANALYZER_SPICY_HTTP|ANALYZER_SPICY_OSPF|ANALYZER_SPICY_OPENVPN_UDP\b|ANALYZER_SPICY_IPSEC_UDP\b|ANALYZER_SPICY_TFTP|ANALYZER_SPICY_WIREGUARD|ANALYZER_SYNCHROPHASOR_TCP|ANALYZER_GENISYS_TCP|ANALYZER_S7COMM_TCP|Corelight::CommunityID|Corelight::PE_XOR|ICSNPP::BACnet|ICSNPP::BSAP|ICSNPP::ENIP|ICSNPP::ETHERCAT|ICSNPP::OPCUA_Binary|Salesforce::GQUIC|Zeek::PROFINET|Zeek::TDS)" ENV ZEEK_THIRD_PARTY_SCRIPTS_COUNT 25 ENV ZEEK_THIRD_PARTY_SCRIPTS_GREP "(bro-is-darknet/main|bro-simple-scan/scan|bzar/main|callstranger-detector/callstranger|cve-2020-0601/cve-2020-0601|cve-2020-13777/cve-2020-13777|CVE-2020-16898/CVE-2020-16898|CVE-2021-38647/omigod|CVE-2021-31166/detect|CVE-2021-41773/CVE_2021_41773|CVE-2021-42292/main|cve-2021-44228/CVE_2021_44228|cve-2022-22954/main|cve-2022-26809/main|CVE-2022-3602/__load__|hassh/hassh|http-more-files-names/main|ja3/ja3|pingback/detect|ripple20/ripple20|SIGRed/CVE-2020-1350|zeek-EternalSafety/main|zeek-httpattacks/main|zeek-sniffpass/__load__|zerologon/main)\.(zeek|bro)" diff --git a/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek b/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek index 500612e7b..71728bf7a 100644 --- a/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek +++ b/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek @@ -109,7 +109,7 @@ event zeek_init() &priority=-5 { PacketAnalyzer::__disable_analyzer(PacketAnalyzer::ANALYZER_ETHERCAT); } if (disable_ics_all || disable_ics_genisys) { - Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_GENISYS_TCP); + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_GENISYS_TCP); } if (disable_ics_all || disable_ics_opcua_binary) { Analyzer::disable_analyzer(Analyzer::ANALYZER_ICSNPP_OPCUA_BINARY); @@ -124,8 +124,8 @@ event zeek_init() &priority=-5 { Analyzer::disable_analyzer(Analyzer::ANALYZER_S7COMM_TCP); } if (disable_ics_all || disable_ics_synchrophasor) { - Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_SYNCHROPHASOR_TCP); - Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_SYNCHROPHASOR_UDP); + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SYNCHROPHASOR_TCP); + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SYNCHROPHASOR_UDP); } if (disable_spicy_dhcp) { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_DHCP); diff --git a/shared/bin/zeek_install_plugins.sh b/shared/bin/zeek_install_plugins.sh index 4d56ba8b3..47ac71e7d 100755 --- a/shared/bin/zeek_install_plugins.sh +++ b/shared/bin/zeek_install_plugins.sh @@ -73,11 +73,11 @@ ZKG_GITHUB_URLS=( "https://github.com/cisagov/icsnpp-dnp3" "https://github.com/cisagov/icsnpp-enip" "https://github.com/cisagov/icsnpp-ethercat" - "https://github.com/cisagov/icsnpp-genisys" + "https://github.com/mmguero-dev/icsnpp-genisys" "https://github.com/mmguero-dev/icsnpp-modbus" "https://github.com/cisagov/icsnpp-opcua-binary" "https://github.com/cisagov/icsnpp-s7comm" - "https://github.com/cisagov/icsnpp-synchrophasor" + "https://github.com/mmguero-dev/icsnpp-synchrophasor" "https://github.com/corelight/callstranger-detector" "https://github.com/corelight/CVE-2020-16898" "https://github.com/corelight/CVE-2021-31166" diff --git a/zeek/config/local.zeek b/zeek/config/local.zeek index af8208318..deadc9ace 100644 --- a/zeek/config/local.zeek +++ b/zeek/config/local.zeek @@ -109,7 +109,7 @@ event zeek_init() &priority=-5 { PacketAnalyzer::__disable_analyzer(PacketAnalyzer::ANALYZER_ETHERCAT); } if (disable_ics_all || disable_ics_genisys) { - Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_GENISYS_TCP); + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_GENISYS_TCP); } if (disable_ics_all || disable_ics_opcua_binary) { Analyzer::disable_analyzer(Analyzer::ANALYZER_ICSNPP_OPCUA_BINARY); @@ -124,8 +124,8 @@ event zeek_init() &priority=-5 { Analyzer::disable_analyzer(Analyzer::ANALYZER_S7COMM_TCP); } if (disable_ics_all || disable_ics_synchrophasor) { - Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_SYNCHROPHASOR_TCP); - Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_SYNCHROPHASOR_UDP); + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SYNCHROPHASOR_TCP); + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SYNCHROPHASOR_UDP); } if (disable_spicy_dhcp) { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_DHCP); From 70c77737b0b95d3c8afd195226b0d55c3736de00 Mon Sep 17 00:00:00 2001 From: SG Date: Tue, 31 Oct 2023 08:36:25 -0600 Subject: [PATCH 16/82] update some of the documentation for docker compose (v2) as a plugin rather than a standalone script --- docs/authsetup.md | 4 ++-- docs/contributing-dashboards.md | 2 +- docs/development.md | 2 +- docs/host-config-linux.md | 4 ++-- docs/host-config-macos.md | 15 ++++++++++++--- docs/kubernetes.md | 2 +- docs/malcolm-iso.md | 4 ++-- docs/malcolm-upgrade.md | 6 +++--- docs/quickstart.md | 4 ++-- docs/running.md | 4 ++-- docs/ubuntu-install-example.md | 8 ++++---- docs/zeek-intel.md | 2 +- 12 files changed, 33 insertions(+), 24 deletions(-) diff --git a/docs/authsetup.md b/docs/authsetup.md index 4fb27eba8..49b707643 100644 --- a/docs/authsetup.md +++ b/docs/authsetup.md @@ -71,7 +71,7 @@ The contents of `nginx_ldap.conf` will vary depending on how the LDAP server is * **`group_attribute_is_dn`** - whether or not to search for the user's full distinguished name as the value in the group's member attribute * **`require`** and **`satisfy`** - `require user`, `require group` and `require valid_user` can be used in conjunction with `satisfy any` or `satisfy all` to limit the users that are allowed to access the Malcolm instance -Before starting Malcolm, edit `nginx/nginx_ldap.conf` according to the specifics of your LDAP server and directory tree structure. Using a LDAP search tool such as [`ldapsearch`](https://www.openldap.org/software/man.cgi?query=ldapsearch) in Linux or [`dsquery`](https://social.technet.microsoft.com/wiki/contents/articles/2195.active-directory-dsquery-commands.aspx) in Windows may be of help as you formulate the configuration. Your changes should be made within the curly braces of the `ldap_server ad_server { … }` section. You can troubleshoot configuration file syntax errors and LDAP connection or credentials issues by running `./scripts/logs` (or `docker-compose logs nginx`) and examining the output of the `nginx` container. +Before starting Malcolm, edit `nginx/nginx_ldap.conf` according to the specifics of your LDAP server and directory tree structure. Using a LDAP search tool such as [`ldapsearch`](https://www.openldap.org/software/man.cgi?query=ldapsearch) in Linux or [`dsquery`](https://social.technet.microsoft.com/wiki/contents/articles/2195.active-directory-dsquery-commands.aspx) in Windows may be of help as you formulate the configuration. Your changes should be made within the curly braces of the `ldap_server ad_server { … }` section. You can troubleshoot configuration file syntax errors and LDAP connection or credentials issues by running `./scripts/logs` (or `docker compose logs nginx`) and examining the output of the `nginx` container. The **Malcolm User Management** page described above is not available when using LDAP authentication. @@ -119,7 +119,7 @@ options: -v [DEBUG], --verbose [DEBUG] Verbose output -f , --file - docker-compose or kubeconfig YML file + Docker compose or kubeconfig YML file -e , --environment-dir Directory containing Malcolm's .env files diff --git a/docs/contributing-dashboards.md b/docs/contributing-dashboards.md index 5cb37c561..25baa0e4a 100644 --- a/docs/contributing-dashboards.md +++ b/docs/contributing-dashboards.md @@ -10,7 +10,7 @@ Visualizations and dashboards can be [easily created](dashboards.md#BuildDashboa 1. Export the dashboard with that ID and save it in the `./dashboards./dashboards/` directory with the following command: ``` export DASHID=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx && \ - docker-compose exec dashboards curl -XGET \ + docker compose exec dashboards curl -XGET \ "http://localhost:5601/dashboards/api/opensearch-dashboards/dashboards/export?dashboard=$DASHID" > \ ./dashboards/dashboards/$DASHID.json ``` diff --git a/docs/development.md b/docs/development.md index 37da486d6..8207e3582 100644 --- a/docs/development.md +++ b/docs/development.md @@ -38,7 +38,7 @@ Checking out the [Malcolm source code]({{ site.github.repository_url }}/tree/{{ and the following files of special note: -* `docker-compose.yml` - the configuration file used by `docker-compose` to build, start, and stop an instance of the Malcolm appliance +* `docker-compose.yml` - the configuration file used by `docker compose` to build, start, and stop an instance of the Malcolm appliance * `docker-compose-standalone.yml` - similar to `docker-compose.yml`, only used for the ["packaged"](#Packager) installation of Malcolm ## Building from source diff --git a/docs/host-config-linux.md b/docs/host-config-linux.md index 7bea01eb6..57350c335 100644 --- a/docs/host-config-linux.md +++ b/docs/host-config-linux.md @@ -21,9 +21,9 @@ Docker starts automatically on DEB-based distributions. On RPM-based distributio You can test Docker by running `docker info`, or (assuming you have internet access), `docker run --rm hello-world`. -## Installing docker-compose +## Installing docker compose -Please follow [this link](https://docs.docker.com/compose/install/) on docker.com for instructions on installing `docker-compose`. +Please follow [this link](https://docs.docker.com/compose/install/) on docker.com for instructions on installing the Docker Compose plugin. ## Operating system configuration diff --git a/docs/host-config-macos.md b/docs/host-config-macos.md index 9ab66480e..e3b8d0f72 100644 --- a/docs/host-config-macos.md +++ b/docs/host-config-macos.md @@ -25,16 +25,25 @@ $ brew upgrade --cask --no-quarantine docker-edge ``` You can now run Docker from the Applications folder. -## Install docker-compose +## Install docker compose ``` $ brew install docker-compose ``` -This will install the latest version of the `docker-compose` plugin. It can be upgraded later using `brew` as well: + +This will install the latest version of the `docker-compose` plugin. It can be upgraded later using [`brew`] as well: + ``` $ brew upgrade --no-quarantine docker-compose ``` -You can now run `docker-compose` (at `/usr/local/opt/docker-compose/bin/docker-compose`) from the command-line + +The [brew formula for docker-compose notes](https://formulae.brew.sh/formula/docker-compose) has the following note about needing to symlink for Docker to find the compose plugin: + +``` +Compose is now a Docker plugin. For Docker to find this plugin, symlink it: + mkdir -p ~/.docker/cli-plugins + ln -sfn $HOMEBREW_PREFIX/opt/docker-compose/bin/docker-compose ~/.docker/cli-plugins/docker-compose +``` ## Configure docker daemon option diff --git a/docs/kubernetes.md b/docs/kubernetes.md index 91294d6c1..28b631d65 100644 --- a/docs/kubernetes.md +++ b/docs/kubernetes.md @@ -219,7 +219,7 @@ Settings that likely need to be changed in the underlying host running Kubernete The steps to configure and tune Malcolm for a Kubernetes deployment are [very similar](malcolm-config.md#ConfigAndTuning) to those for a Docker-based deployment. Both methods use [environment variable files](malcolm-config.md#MalcolmConfigEnvVars) for Malcolm's runtime configuration. -Malcolm's configuration and runtime scripts (e.g., `./scripts/configure`, `./scripts/auth_setup`, `./scripts/start`, etc.) are used for both Docker- and Kubernetes-based deployments. In order to indicate to these scripts that Kubernetes is being used rather than `docker-compose`, users can provide the script with the [kubeconfig file](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) used to communicate with the API server of the Kubernetes cluster (e.g., `./scripts/configure -f k3s.yaml` or `./scripts/start -f kubeconfig.yaml`, etc.). The scripts will detect whether the YAML file specified is a kubeconfig file or a Docker compose file and act accordingly. +Malcolm's configuration and runtime scripts (e.g., `./scripts/configure`, `./scripts/auth_setup`, `./scripts/start`, etc.) are used for both Docker- and Kubernetes-based deployments. In order to indicate to these scripts that Kubernetes is being used rather than `docker compose`, users can provide the script with the [kubeconfig file](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) used to communicate with the API server of the Kubernetes cluster (e.g., `./scripts/configure -f k3s.yaml` or `./scripts/start -f kubeconfig.yaml`, etc.). The scripts will detect whether the YAML file specified is a kubeconfig file or a Docker compose file and act accordingly. Run `./scripts/configure` and answer the questions to configure Malcolm. For an in-depth treatment of these configuration questions, see the **Configuration** section in **[End-to-end Malcolm and Hedgehog Linux ISO Installation](malcolm-hedgehog-e2e-iso-install.md#MalcolmConfig)**. Users will need to run [`./scripts/auth_setup`](authsetup.md#AuthSetup) to configure authentication. diff --git a/docs/malcolm-iso.md b/docs/malcolm-iso.md index 858930ab3..aaad018ad 100644 --- a/docs/malcolm-iso.md +++ b/docs/malcolm-iso.md @@ -45,7 +45,7 @@ Finished, created "/malcolm-build/malcolm-iso/malcolm-23.11.0.iso" … ``` -By default, Malcolm's Docker images are not packaged with the installer ISO. Malcolm assumes instead that users will pull the [latest images](https://github.com/orgs/idaholab/packages?repo_name=Malcolm) with a `docker-compose pull` command as described in the [Quick start](quickstart.md#QuickStart) section. To build an ISO with the latest Malcolm images included, follow the directions to create [pre-packaged installation files](development.md#Packager), which include a tarball with a name such as `malcolm_YYYYMMDD_HHNNSS_xxxxxxx_images.tar.xz`. Then, pass that images tarball to the ISO build script with a `-d`, like this: +By default, Malcolm's Docker images are not packaged with the installer ISO. Malcolm assumes instead that users will pull the [latest images](https://github.com/orgs/idaholab/packages?repo_name=Malcolm) with a `docker compose --profile malcolm pull` command as described in the [Quick start](quickstart.md#QuickStart) section. To build an ISO with the latest Malcolm images included, follow the directions to create [pre-packaged installation files](development.md#Packager), which include a tarball with a name such as `malcolm_YYYYMMDD_HHNNSS_xxxxxxx_images.tar.xz`. Then, pass that images tarball to the ISO build script with a `-d`, like this: ``` $ ./malcolm-iso/build_via_vagrant.sh -f -d malcolm_YYYYMMDD_HHNNSS_xxxxxxx_images.tar.xz @@ -83,6 +83,6 @@ Following these prompts, the installer will reboot and the Malcolm base operatin When the system boots for the first time, the Malcolm Docker images will load if the installer was built with pre-packaged installation files as described above. Wait for this operation to continue (the progress dialog will disappear when they have finished loading) before continuing the setup. -Open a terminal (click the red terminal 🗔 icon next to the Debian swirl logo 🍥 menu button in the menu bar). At this point, setup is similar to the steps described in the [Quick start](quickstart.md#QuickStart) section. Navigate to the Malcolm directory (`cd ~/Malcolm`) and run [`auth_setup`](authsetup.md#AuthSetup) to configure authentication. If the ISO does not include pre-packaged Malcolm images, or to retrieve the latest updates, run `docker-compose pull`. Finalize the configuration by running `scripts/configure` and follow the prompts as illustrated in the [installation example](malcolm-hedgehog-e2e-iso-install.md#MalcolmConfig). +Open a terminal (click the red terminal 🗔 icon next to the Debian swirl logo 🍥 menu button in the menu bar). At this point, setup is similar to the steps described in the [Quick start](quickstart.md#QuickStart) section. Navigate to the Malcolm directory (`cd ~/Malcolm`) and run [`auth_setup`](authsetup.md#AuthSetup) to configure authentication. If the ISO does not include pre-packaged Malcolm images, or to retrieve the latest updates, run `docker compose --profile malcolm pull`. Finalize the configuration by running `scripts/configure` and follow the prompts as illustrated in the [installation example](malcolm-hedgehog-e2e-iso-install.md#MalcolmConfig). Once Malcolm is configured, users can [start Malcolm](running.md#Starting) via the command line or by clicking the circular yellow Malcolm icon in the menu bar. \ No newline at end of file diff --git a/docs/malcolm-upgrade.md b/docs/malcolm-upgrade.md index db4bcda1b..284ad1359 100644 --- a/docs/malcolm-upgrade.md +++ b/docs/malcolm-upgrade.md @@ -20,7 +20,7 @@ Here are the basic steps to perform an upgrade if Malcolm was checked with a `gi 1. pull changes from GitHub repository * `git pull --rebase` 1. pull new Docker images (this will take a while) - * `docker-compose pull` + * `docker compose --profile malcolm pull` 1. apply saved configuration change stashed earlier * `git stash pop` 1. if `Merge conflict` messages appear, resolve the [conflicts](https://git-scm.com/book/en/v2/Git-Branching-Basic-Branching-and-Merging#_basic_merge_conflicts) with a text editor @@ -51,7 +51,7 @@ If Malcolm was installed from [pre-packaged installation files]({{ site.github.r + using a file comparison tool (e.g., `diff`, `meld`, `Beyond Compare`, etc.), compare `docker-compose.yml` and the `docker-compare.yml` file backed up in Step 3, and manually migrate over any customizations in file + compare the contents of each `.env` file Malcolm's `./config/` directory with its corresponding `.env.example` file 1. pull the new docker images (this will take a while) - * `docker-compose pull` to pull them from [GitHub](https://github.com/orgs/idaholab/packages?repo_name=Malcolm) or `docker-compose load -i malcolm_YYYYMMDD_HHNNSS_xxxxxxx_images.tar.xz` if an offline tarball of the Malcolm docker images is available + * `docker compose --profile malcolm pull` to pull them from [GitHub](https://github.com/orgs/idaholab/packages?repo_name=Malcolm) or `docker compose load -i malcolm_YYYYMMDD_HHNNSS_xxxxxxx_images.tar.xz` if an offline tarball of the Malcolm docker images is available 1. start Malcolm * `./scripts/start` 1. users may be prompted to [configure authentication](authsetup.md#AuthSetup) if there are new authentication-related files that need to be generated @@ -63,7 +63,7 @@ If Malcolm was installed from [pre-packaged installation files]({{ site.github.r Technically minded users may wish to follow the debug output provided by `./scripts/start` (use `./scripts/logs` to re-open the log stream after it's been closed), although there is a lot there and it may be hard to distinguish whether or not something is okay. -Running `docker-compose ps -a` should provide a good indication that all Malcolm's Docker containers started up and, in some cases, may be able to indicate if the containers are "healthy" or not. +Running `docker compose ps -a` should provide a good indication that all Malcolm's Docker containers started up and, in some cases, may be able to indicate if the containers are "healthy" or not. After upgrading following one of the previous outlines, give Malcolm several minutes to get started. Once things are up and running, open one of Malcolm's [web interfaces](quickstart.md#UserInterfaceURLs) to verify that things are working. diff --git a/docs/quickstart.md b/docs/quickstart.md index 7262ab8d4..fa180672e 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -26,9 +26,9 @@ You must run [`auth_setup`](authsetup.md#AuthSetup) prior to pulling Malcolm's D ### Pull Malcolm's Docker images -Malcolm's Docker images are periodically built and hosted on [GitHub](https://github.com/orgs/idaholab/packages?repo_name=Malcolm). If you already have [Docker](https://www.docker.com/) and [Docker Compose](https://docs.docker.com/compose/), these prebuilt images can be pulled by navigating into the Malcolm directory (containing the `docker-compose.yml` file) and running `docker-compose pull` like this: +Malcolm's Docker images are periodically built and hosted on [GitHub](https://github.com/orgs/idaholab/packages?repo_name=Malcolm). If you already have [Docker](https://www.docker.com/) and [Docker Compose](https://docs.docker.com/compose/), these prebuilt images can be pulled by navigating into the Malcolm directory (containing the `docker-compose.yml` file) and running `docker compose --profile malcolm pull` like this: ``` -$ docker-compose pull +$ docker compose --profile malcolm pull Pulling api ... done Pulling arkime ... done Pulling dashboards ... done diff --git a/docs/running.md b/docs/running.md index 48b633446..832be809f 100644 --- a/docs/running.md +++ b/docs/running.md @@ -37,13 +37,13 @@ To temporarily set the Malcolm user interfaces into read-only configuration, run First, to configure [Nginx](https://nginx.org/) to disable access to the upload and other interfaces for changing Malcolm settings, and to deny HTTP methods other than `GET` and `POST`: ``` -docker-compose exec nginx-proxy bash -c "cp /etc/nginx/nginx_readonly.conf /etc/nginx/nginx.conf && nginx -s reload" +docker compose exec nginx-proxy bash -c "cp /etc/nginx/nginx_readonly.conf /etc/nginx/nginx.conf && nginx -s reload" ``` Second, to set the existing OpenSearch data store to read-only: ``` -docker-compose exec dashboards-helper /data/opensearch_read_only.py -i _cluster +docker compose exec dashboards-helper /data/opensearch_read_only.py -i _cluster ``` These commands must be re-run every time Malcolm is restarted. diff --git a/docs/ubuntu-install-example.md b/docs/ubuntu-install-example.md index c08ddc0d4..c23a67b91 100644 --- a/docs/ubuntu-install-example.md +++ b/docs/ubuntu-install-example.md @@ -47,10 +47,10 @@ Enter user account: user Add another non-root user to the "docker" group?: n -"docker-compose version" failed, attempt to install docker-compose? (Y / n): y +"docker compose version" failed, attempt to install docker compose? (Y / n): y -Install docker-compose directly from docker github? (Y / n): y -Download and installation of docker-compose apparently succeeded +Install docker compose directly from docker github? (Y / n): y +Download and installation of docker compose apparently succeeded fs.file-max increases allowed maximum for file handles fs.file-max= appears to be missing from /etc/sysctl.conf, append it? (Y / n): y @@ -227,7 +227,7 @@ As an alternative to manually copying the files to the sensor, Malcolm can facil In this example, rather than [building Malcolm from scratch](development.md#Build), images may be pulled from [GitHub](https://github.com/orgs/idaholab/packages?repo_name=Malcolm): ``` -user@host:~/Malcolm$ docker-compose pull +user@host:~/Malcolm$ docker compose pull Pulling api ... done Pulling arkime ... done Pulling dashboards ... done diff --git a/docs/zeek-intel.md b/docs/zeek-intel.md index 065b7e701..20cd52427 100644 --- a/docs/zeek-intel.md +++ b/docs/zeek-intel.md @@ -13,7 +13,7 @@ Note that Malcolm does not manage updates for these intelligence files. You shou Adding and deleting intelligence files under this directory will take effect upon [restarting Malcolm](running.md#StopAndRestart). Alternately, you can use the `ZEEK_INTEL_REFRESH_CRON_EXPRESSION` environment variable containing a [cron expression](https://en.wikipedia.org/wiki/Cron#CRON_expression) to specify the interval at which the intel files should be refreshed. This can also be done manually without restarting Malcolm by running the following command from the Malcolm installation directory: ``` -docker-compose exec --user $(id -u) zeek /usr/local/bin/entrypoint.sh true +docker compose exec --user $(id -u) zeek /usr/local/bin/entrypoint.sh true ``` For a public example of Zeek intelligence files, see Critical Path Security's [repository](https://github.com/CriticalPathSecurity/Zeek-Intelligence-Feeds), which aggregates data from various other threat feeds into Zeek's format. From c9742bba335942feac28c913303d6688c0d6cbe1 Mon Sep 17 00:00:00 2001 From: SG Date: Tue, 31 Oct 2023 08:40:07 -0600 Subject: [PATCH 17/82] update some of the documentation for docker compose (v2) as a plugin rather than a standalone script --- docs/host-config-macos.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/host-config-macos.md b/docs/host-config-macos.md index e3b8d0f72..0f86696e2 100644 --- a/docs/host-config-macos.md +++ b/docs/host-config-macos.md @@ -31,7 +31,7 @@ You can now run Docker from the Applications folder. $ brew install docker-compose ``` -This will install the latest version of the `docker-compose` plugin. It can be upgraded later using [`brew`] as well: +This will install the latest version of the Docker Compose plugin. It can be upgraded later using [`brew`] as well: ``` $ brew upgrade --no-quarantine docker-compose From be6a9dfd72025067b5a0bb0557816b6e79f4f871 Mon Sep 17 00:00:00 2001 From: SG Date: Tue, 31 Oct 2023 09:11:13 -0600 Subject: [PATCH 18/82] fix zeek build --- .../config/includes.chroot/usr/local/etc/zeek/local.zeek | 6 +++--- zeek/config/local.zeek | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek b/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek index 71728bf7a..76dd9b86b 100644 --- a/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek +++ b/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek @@ -190,10 +190,10 @@ event zeek_init() &priority=-5 { } } if (|synch_ports_tcp| > 0) { - Analyzer::register_for_ports(Analyzer::ANALYZER_SPICY_SYNCHROPHASOR_TCP, synch_ports_tcp); + Analyzer::register_for_ports(Analyzer::ANALYZER_SYNCHROPHASOR_TCP, synch_ports_tcp); } if (|synch_ports_udp| > 0) { - Analyzer::register_for_ports(Analyzer::ANALYZER_SPICY_SYNCHROPHASOR_UDP, synch_ports_udp); + Analyzer::register_for_ports(Analyzer::ANALYZER_SYNCHROPHASOR_UDP, synch_ports_udp); } } } @@ -209,7 +209,7 @@ event zeek_init() &priority=-5 { } } if (|gen_ports_tcp| > 0) { - Analyzer::register_for_ports(Analyzer::ANALYZER_SPICY_GENISYS_TCP, gen_ports_tcp); + Analyzer::register_for_ports(Analyzer::ANALYZER_GENISYS_TCP, gen_ports_tcp); } } } diff --git a/zeek/config/local.zeek b/zeek/config/local.zeek index deadc9ace..1a7842447 100644 --- a/zeek/config/local.zeek +++ b/zeek/config/local.zeek @@ -190,10 +190,10 @@ event zeek_init() &priority=-5 { } } if (|synch_ports_tcp| > 0) { - Analyzer::register_for_ports(Analyzer::ANALYZER_SPICY_SYNCHROPHASOR_TCP, synch_ports_tcp); + Analyzer::register_for_ports(Analyzer::ANALYZER_SYNCHROPHASOR_TCP, synch_ports_tcp); } if (|synch_ports_udp| > 0) { - Analyzer::register_for_ports(Analyzer::ANALYZER_SPICY_SYNCHROPHASOR_UDP, synch_ports_udp); + Analyzer::register_for_ports(Analyzer::ANALYZER_SYNCHROPHASOR_UDP, synch_ports_udp); } } } @@ -209,7 +209,7 @@ event zeek_init() &priority=-5 { } } if (|gen_ports_tcp| > 0) { - Analyzer::register_for_ports(Analyzer::ANALYZER_SPICY_GENISYS_TCP, gen_ports_tcp); + Analyzer::register_for_ports(Analyzer::ANALYZER_GENISYS_TCP, gen_ports_tcp); } } } From 3aa72fc42167f9fae31f606d12f71a42449024c6 Mon Sep 17 00:00:00 2001 From: SG Date: Tue, 31 Oct 2023 12:18:51 -0600 Subject: [PATCH 19/82] use spicy disable protocol analyzer for LDAP --- .../config/includes.chroot/usr/local/etc/zeek/local.zeek | 4 ++-- zeek/config/local.zeek | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek b/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek index 76dd9b86b..f34b2a398 100644 --- a/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek +++ b/sensor-iso/config/includes.chroot/usr/local/etc/zeek/local.zeek @@ -142,8 +142,8 @@ event zeek_init() &priority=-5 { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_IPSEC_IKE_UDP); } if (disable_spicy_ldap) { - Analyzer::disable_analyzer(Analyzer::ANALYZER_LDAP_TCP); - Analyzer::disable_analyzer(Analyzer::ANALYZER_LDAP_UDP); + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_LDAP_TCP); + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_LDAP_UDP); } if (disable_spicy_openvpn) { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_OPENVPN_TCP); diff --git a/zeek/config/local.zeek b/zeek/config/local.zeek index 1a7842447..284a8f905 100644 --- a/zeek/config/local.zeek +++ b/zeek/config/local.zeek @@ -142,8 +142,8 @@ event zeek_init() &priority=-5 { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_IPSEC_IKE_UDP); } if (disable_spicy_ldap) { - Analyzer::disable_analyzer(Analyzer::ANALYZER_LDAP_TCP); - Analyzer::disable_analyzer(Analyzer::ANALYZER_LDAP_UDP); + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_LDAP_TCP); + Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_LDAP_UDP); } if (disable_spicy_openvpn) { Spicy::disable_protocol_analyzer(Analyzer::ANALYZER_SPICY_OPENVPN_TCP); From 30a05d90a5787a08bc81e67e9fc4194e4ee82b53 Mon Sep 17 00:00:00 2001 From: SG Date: Tue, 31 Oct 2023 12:37:54 -0600 Subject: [PATCH 20/82] point back upstream for a few of the plugins --- shared/bin/zeek_install_plugins.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/shared/bin/zeek_install_plugins.sh b/shared/bin/zeek_install_plugins.sh index 47ac71e7d..0458dbc62 100755 --- a/shared/bin/zeek_install_plugins.sh +++ b/shared/bin/zeek_install_plugins.sh @@ -73,11 +73,11 @@ ZKG_GITHUB_URLS=( "https://github.com/cisagov/icsnpp-dnp3" "https://github.com/cisagov/icsnpp-enip" "https://github.com/cisagov/icsnpp-ethercat" - "https://github.com/mmguero-dev/icsnpp-genisys" + "https://github.com/cisagov/icsnpp-genisys" "https://github.com/mmguero-dev/icsnpp-modbus" "https://github.com/cisagov/icsnpp-opcua-binary" "https://github.com/cisagov/icsnpp-s7comm" - "https://github.com/mmguero-dev/icsnpp-synchrophasor" + "https://github.com/cisagov/icsnpp-synchrophasor" "https://github.com/corelight/callstranger-detector" "https://github.com/corelight/CVE-2020-16898" "https://github.com/corelight/CVE-2021-31166" @@ -100,7 +100,7 @@ ZKG_GITHUB_URLS=( "https://github.com/corelight/zeek-spicy-wireguard" "https://github.com/mmguero-dev/zeek-xor-exe-plugin" "https://github.com/corelight/zerologon" - "https://github.com/mmguero-dev/zeek-sniffpass" + "https://github.com/cybera/zeek-sniffpass" "https://github.com/mmguero-dev/bzar" "https://github.com/ncsa/bro-is-darknet" "https://github.com/ncsa/bro-simple-scan" From 2a804c62a83ac02c20ab382ac7b95cf06169af4b Mon Sep 17 00:00:00 2001 From: SG Date: Tue, 31 Oct 2023 14:13:31 -0600 Subject: [PATCH 21/82] use prefix.description instead of VRF for identifying subnets in NetBox (idaholab/Malcolm#280); needs testing --- config/netbox-common.env.example | 2 +- docs/asset-interaction-analysis.md | 9 ++- logstash/pipelines/enrichment/21_netbox.conf | 4 +- logstash/ruby/netbox_enrich.rb | 46 ++++++---------- netbox/preload/prefixes_defaults.yml | 3 - netbox/preload/vrfs_defaults.yml | 6 -- netbox/scripts/netbox_init.py | 58 +++----------------- scripts/install.py | 2 +- 8 files changed, 33 insertions(+), 97 deletions(-) delete mode 100644 netbox/preload/vrfs_defaults.yml diff --git a/config/netbox-common.env.example b/config/netbox-common.env.example index 0caba9062..000500b0c 100644 --- a/config/netbox-common.env.example +++ b/config/netbox-common.env.example @@ -3,7 +3,7 @@ # The name of the default "site" to be created upon NetBox initialization, and to be queried # for enrichment (see LOGSTASH_NETBOX_ENRICHMENT) NETBOX_DEFAULT_SITE=Malcolm -# Whether or not to create catch-all VRFs/IP Prefixes for private IP space +# Whether or not to create catch-all IP Prefixes for private IP space NETBOX_PRELOAD_PREFIXES=false # Whether to disable Malcolm's NetBox instance ('true') or not ('false') NETBOX_DISABLED=true diff --git a/docs/asset-interaction-analysis.md b/docs/asset-interaction-analysis.md index dced1e2ce..228541cde 100644 --- a/docs/asset-interaction-analysis.md +++ b/docs/asset-interaction-analysis.md @@ -31,11 +31,11 @@ As Zeek logs and Suricata alerts are parsed and enriched (if the `LOGSTASH_NETBO - `destination.device.site` (`/dcim/sites/`) - `destination.device.url` (`/dcim/devices/`) - `destination.device.details` (full JSON object, [only with `LOGSTASH_NETBOX_ENRICHMENT_VERBOSE: 'true'`](malcolm-config.md#MalcolmConfigEnvVars)) - - `destination.segment.id` (`/ipam/vrfs/{id}`) - - `destination.segment.name` (`/ipam/vrfs/`) + - `destination.segment.id` (`/ipam/prefixes/{id}`) + - `destination.segment.name` (`/ipam/prefixes/{description}`) - `destination.segment.site` (`/dcim/sites/`) - `destination.segment.tenant` (`/tenancy/tenants/`) - - `destination.segment.url` (`/ipam/vrfs/`) + - `destination.segment.url` (`/ipam/prefixes/`) - `destination.segment.details` (full JSON object, [only with `LOGSTASH_NETBOX_ENRICHMENT_VERBOSE: 'true'`](malcolm-config.md#MalcolmConfigEnvVars)) * `source.…` same as `destination.…` * collected as `related` fields (the [same approach](https://www.elastic.co/guide/en/ecs/current/ecs-related.html) used in ECS) @@ -78,7 +78,6 @@ The [Populating Data](https://docs.netbox.dev/en/stable/getting-started/populati The following elements of the NetBox data model are used by Malcolm for Asset Interaction Analysis. * Network segments - - [Virtual Routing and Forwarding (VRF)](https://docs.netbox.dev/en/stable/models/ipam/vrf/) - [Prefixes](https://docs.netbox.dev/en/stable/models/ipam/prefix/) * Network Hosts - [Devices](https://docs.netbox.dev/en/stable/models/dcim/device/) @@ -99,7 +98,7 @@ However, careful consideration should be made before enabling this feature: the Devices created using this autopopulate method will have their `status` field set to `staged`. It is recommended that users periodically review automatically-created devices for correctness and to fill in known details that couldn't be determined from network traffic. For example, the `manufacturer` field for automatically-created devices will be set based on the organizational unique identifier (OUI) determined from the first three bytes of the observed MAC address, which may not be accurate if the device's traffic was observed across a router. If possible, observed hostnames will be used in the naming of the automatically-created devices, falling back to the device manufacturer otherwise (e.g., `MYHOSTNAME @ 10.10.0.123` vs. `Schweitzer Engineering @ 10.10.0.123`). -Since device autocreation is based on IP address, information about network segments (including [virtual routing and forwarding (VRF)](https://docs.netbox.dev/en/stable/models/ipam/vrf/) and [prefixes](https://docs.netbox.dev/en/stable/models/ipam/prefix/)) must be first [manually specified](#NetBoxPopManual) in NetBox in order for devices to be automatically populated. +Since device autocreation is based on IP address, information about network segments (IP [prefixes](https://docs.netbox.dev/en/stable/models/ipam/prefix/)) must be first [manually specified](#NetBoxPopManual) in NetBox in order for devices to be automatically populated. Users should populate the `description` field in the NetBox IPAM Prefixes data model to specify a name to be used for NetBox network segment autopopulation and enrichment, otherwise the IP prefix itself will be used. Although network devices can be automatically created using this method, [services](https://demo.netbox.dev/static/docs/core-functionality/services/#service-templates) should inventoried manually. The **Uninventoried Observed Services** visualization in the [**Zeek Known Summary** dashboard](dashboards.md#DashboardsVisualizations) can help users review network services to be created in NetBox. diff --git a/logstash/pipelines/enrichment/21_netbox.conf b/logstash/pipelines/enrichment/21_netbox.conf index 66c0f34db..a4370ab8b 100644 --- a/logstash/pipelines/enrichment/21_netbox.conf +++ b/logstash/pipelines/enrichment/21_netbox.conf @@ -27,7 +27,7 @@ filter { script_params => { "source" => "[source][ip]" "target" => "[source][segment]" - "lookup_type" => "ip_vrf" + "lookup_type" => "ip_prefix" "lookup_site_env" => "NETBOX_DEFAULT_SITE" "verbose_env" => "LOGSTASH_NETBOX_ENRICHMENT_VERBOSE" "netbox_token_env" => "SUPERUSER_API_TOKEN" @@ -66,7 +66,7 @@ filter { script_params => { "source" => "[destination][ip]" "target" => "[destination][segment]" - "lookup_type" => "ip_vrf" + "lookup_type" => "ip_prefix" "lookup_site_env" => "NETBOX_DEFAULT_SITE" "verbose_env" => "LOGSTASH_NETBOX_ENRICHMENT_VERBOSE" "netbox_token_env" => "SUPERUSER_API_TOKEN" diff --git a/logstash/ruby/netbox_enrich.rb b/logstash/ruby/netbox_enrich.rb index fedc370dd..7ca8d0e13 100644 --- a/logstash/ruby/netbox_enrich.rb +++ b/logstash/ruby/netbox_enrich.rb @@ -20,7 +20,7 @@ def register(params) @source = params["source"] # lookup type - # valid values are: ip_device, ip_vrf + # valid values are: ip_device, ip_prefix @lookup_type = params.fetch("lookup_type", "").to_sym # site value to include in queries for enrichment lookups, either specified directly or read from ENV @@ -257,12 +257,12 @@ def filter(event) _autopopulate_ip = nil _autopopulate_manuf = nil _autopopulate_site = nil - _vrfs = nil + _prefixes = nil _devices = nil _exception_error = false # handle :ip_device first, because if we're doing autopopulate we're also going to use - # some of the logic from :ip_vrf + # some of the logic from :ip_prefix if (_lookup_type == :ip_device) ################################################################################# @@ -630,13 +630,13 @@ def filter(event) _lookup_result = _devices end # _lookup_type == :ip_device - # this || is because we are going to need to do the VRF lookup if we're autopopulating + # this || is because we are going to need to do the prefix lookup if we're autopopulating # as well as if we're specifically requested to do that enrichment - if (_lookup_type == :ip_vrf) || !_autopopulate_device.nil? + if (_lookup_type == :ip_prefix) || !_autopopulate_device.nil? ################################################################################# - # retrieve the list VRFs containing IP address prefixes containing the search key - _vrfs = Array.new + # retrieve the list of IP address prefixes containing the search key + _prefixes = Array.new _query = { :contains => _key, :offset => 0, :limit => _page_size } @@ -648,16 +648,14 @@ def filter(event) then _tmp_prefixes = _prefixes_response.fetch(:results, []) _tmp_prefixes.each do |p| - if (_vrf = p.fetch(:vrf, nil)) - # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } - # if _verbose, include entire object as :details - _vrfs << { :name => _vrf.fetch(:name, _vrf.fetch(:display, nil)), - :id => _vrf.fetch(:id, nil), - :site => ((_site = p.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil), - :tenant => ((_tenant = p.fetch(:tenant, nil)) && _tenant&.has_key?(:name)) ? _tenant[:name] : _tenant&.fetch(:display, nil), - :url => p.fetch(:url, _vrf.fetch(:url, nil)), - :details => _verbose ? _vrf.merge({:prefix => p.tap { |h| h.delete(:vrf) }}) : nil } - end + # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } + # if _verbose, include entire object as :details + _prefixes << { :name => p.fetch(:description, p.fetch(:display, nil)), + :id => p.fetch(:id, nil), + :site => ((_site = p.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil), + :tenant => ((_tenant = p.fetch(:tenant, nil)) && _tenant&.has_key?(:name)) ? _tenant[:name] : _tenant&.fetch(:display, nil), + :url => p.fetch(:url, p.fetch(:url, nil)), + :details => _verbose ? p : nil } end _query[:offset] += _tmp_prefixes.length() break unless (_tmp_prefixes.length() >= _page_size) @@ -669,9 +667,9 @@ def filter(event) # give up aka do nothing _exception_error = true end - _vrfs = collect_values(crush(_vrfs)) - _lookup_result = _vrfs unless (_lookup_type != :ip_vrf) - end # _lookup_type == :ip_vrf + _prefixes = collect_values(crush(_prefixes)) + _lookup_result = _prefixes unless (_lookup_type != :ip_prefix) + end # _lookup_type == :ip_prefix if !_autopopulate_device.nil? && _autopopulate_device.fetch(:id, nil)&.nonzero? # device has been created, we need to create an interface for it @@ -681,9 +679,6 @@ def filter(event) if !_autopopulate_mac.nil? && !_autopopulate_mac.empty? _interface_data[:mac_address] = _autopopulate_mac.is_a?(Array) ? _autopopulate_mac.first : _autopopulate_mac end - if !_vrfs.nil? && !_vrfs.empty? - _interface_data[:vrf] = _vrfs.fetch(:id, []).first - end if (_interface_create_reponse = _nb.post(_autopopulate_manuf[:vm] ? 'virtualization/interfaces/' : 'dcim/interfaces/', _interface_data.to_json, _nb_headers).body) && _interface_create_reponse.is_a?(Hash) && _interface_create_reponse.has_key?(:id) @@ -697,11 +692,6 @@ def filter(event) :assigned_object_type => _autopopulate_manuf[:vm] ? "virtualization.vminterface" : "dcim.interface", :assigned_object_id => _autopopulate_interface[:id], :status => "active" } - if (_vrf = _autopopulate_interface.fetch(:vrf, nil)) && - (_vrf.has_key?(:id)) - then - _ip_data[:vrf] = _vrf[:id] - end if (_ip_create_reponse = _nb.post('ipam/ip-addresses/', _ip_data.to_json, _nb_headers).body) && _ip_create_reponse.is_a?(Hash) && _ip_create_reponse.has_key?(:id) diff --git a/netbox/preload/prefixes_defaults.yml b/netbox/preload/prefixes_defaults.yml index 6e9fe981f..0fdb935f1 100644 --- a/netbox/preload/prefixes_defaults.yml +++ b/netbox/preload/prefixes_defaults.yml @@ -1,6 +1,3 @@ - prefix: 10.0.0.0/8 - vrf: 10.0.0.0/8 - prefix: 172.16.0.0/12 - vrf: 172.16.0.0/12 - prefix: 192.168.0.0/16 - vrf: 192.168.0.0/16 diff --git a/netbox/preload/vrfs_defaults.yml b/netbox/preload/vrfs_defaults.yml deleted file mode 100644 index d83018c72..000000000 --- a/netbox/preload/vrfs_defaults.yml +++ /dev/null @@ -1,6 +0,0 @@ -- enforce_unique: true - name: 10.0.0.0/8 -- enforce_unique: true - name: 172.16.0.0/12 -- enforce_unique: true - name: 192.168.0.0/16 diff --git a/netbox/scripts/netbox_init.py b/netbox/scripts/netbox_init.py index a41063f9b..ab1aba957 100755 --- a/netbox/scripts/netbox_init.py +++ b/netbox/scripts/netbox_init.py @@ -249,7 +249,7 @@ def main(): nargs='?', const=True, default=malcolm_utils.str2bool(os.getenv('NETBOX_PRELOAD_PREFIXES', default='False')), - help="Preload IPAM VRFs/IP Prefixes for private IP space", + help="Preload IPAM IP Prefixes for private IP space", ) try: parser.error = parser.exit @@ -277,7 +277,6 @@ def main(): sites = {} groups = {} permissions = {} - vrfs = {} prefixes = {} devices = {} interfaces = {} @@ -481,33 +480,7 @@ def main(): with open(args.netMapFileName) as f: netMapJson = json.load(f) if netMapJson is not None: - # create new VRFs - vrfPreExisting = {x.name: x for x in nb.ipam.vrfs.all()} - logging.debug(f"VRFs (before): { {k:v.id for k, v in vrfPreExisting.items()} }") - - for segment in [ - x - for x in get_iterable(netMapJson) - if isinstance(x, dict) - and (x.get('type', '') == "segment") - and x.get('name', None) - and is_ip_network(x.get('address', None)) - and x['name'] not in vrfPreExisting - ]: - try: - nb.ipam.vrfs.create( - { - "name": segment['name'], - "enforce_unique": True, - }, - ) - except pynetbox.RequestError as nbe: - logging.warning(f"{type(nbe).__name__} processing VRF \"{segment['name']}\": {nbe}") - - vrfs = {x.name: x for x in nb.ipam.vrfs.all()} - logging.debug(f"VRFs (after): { {k:v.id for k, v in vrfs.items()} }") - - # create prefixes in VRFs + # create IP prefixes prefixesPreExisting = {x.prefix: x for x in nb.ipam.prefixes.all()} logging.debug(f"prefixes (before): { {k:v.id for k, v in prefixesPreExisting.items()} }") @@ -519,7 +492,6 @@ def main(): and (x.get('type', '') == "segment") and x.get('name', None) and is_ip_network(x.get('address', None)) - and x['name'] in vrfs ]: try: site = min_hash_value_by_value(sites) @@ -527,7 +499,7 @@ def main(): { "prefix": segment['address'], "site": site.id if site else None, - "vrf": vrfs[segment['name']].id, + "description": segment['name'], }, ) except pynetbox.RequestError as nbe: @@ -566,19 +538,11 @@ def main(): if deviceCreated is not None: # create interface for the device if is_ip_address(host['address']): - hostVrf = max_hash_value_by_key( - { - ipaddress.ip_network(k): v - for k, v in prefixes.items() - if ipaddress.ip_address(host['address']) in ipaddress.ip_network(k) - } - ) nb.dcim.interfaces.create( { "device": deviceCreated.id, "name": "default", "type": "other", - "vrf": hostVrf.id if hostVrf else None, }, ) elif re.match(r'^([0-9a-f]{2}[:-]){5}([0-9a-f]{2})$', host['address'].lower()): @@ -600,7 +564,7 @@ def main(): logging.debug(f"interfaces (after): { {k:v.id for k, v in interfaces.items()} }") # and associate IP addresses with them - ipAddressesPreExisting = {f"{x.address}:{x.vrf.id if x.vrf else ''}": x for x in nb.ipam.ip_addresses.all()} + ipAddressesPreExisting = {x.address: x for x in nb.ipam.ip_addresses.all()} logging.debug(f"IP addresses (before): { {k:v.id for k, v in ipAddressesPreExisting.items()} }") for host in [ @@ -613,19 +577,11 @@ def main(): and x['name'] in devices ]: try: - hostVrf = max_hash_value_by_key( - { - ipaddress.ip_network(k): v - for k, v in prefixes.items() - if ipaddress.ip_address(host['address']) in ipaddress.ip_network(k) - } - ) - hostKey = f"{host['address']}/{'32' if is_ip_v4_address(host['address']) else '128'}:{hostVrf.id if hostVrf else ''}" + hostKey = f"{host['address']}/{'32' if is_ip_v4_address(host['address']) else '128'}" if hostKey not in ipAddressesPreExisting: ipCreated = nb.ipam.ip_addresses.create( { "address": host['address'], - "vrf": hostVrf.id if hostVrf else None, "assigned_object_type": "dcim.interface", "assigned_object_id": interfaces[devices[host['name']].id].id, }, @@ -643,7 +599,7 @@ def main(): except pynetbox.RequestError as nbe: logging.warning(f"{type(nbe).__name__} processing address \"{host['address']}\": {nbe}") - ipAddresses = {f"{x.address}:{x.vrf}": x for x in nb.ipam.ip_addresses.all()} + ipAddresses = {x.address: x for x in nb.ipam.ip_addresses.all()} logging.debug(f"IP addresses (after): { {k:v.id for k, v in ipAddresses.items()} }") except Exception as e: @@ -659,7 +615,7 @@ def main(): with tempfile.TemporaryDirectory() as tmpPreloadDir: copy_tree(args.preloadDir, tmpPreloadDir) - # only preload catch-all VRFs and IP Prefixes if explicitly specified and they don't already exist + # only preload catch-all IP Prefixes if explicitly specified and they don't already exist if args.preloadPrefixes: for loadType in ('vrfs', 'prefixes'): defaultFileName = os.path.join(tmpPreloadDir, f'{loadType}_defaults.yml') diff --git a/scripts/install.py b/scripts/install.py index 890225a1f..dbf96fc7f 100755 --- a/scripts/install.py +++ b/scripts/install.py @@ -3685,7 +3685,7 @@ def main(): nargs='?', const=True, default=False, - help="Preload NetBox IPAM VRFs/IP Prefixes for private IP space", + help="Preload NetBox IPAM IP Prefixes for private IP space", ) netboxArgGroup.add_argument( '--netbox-site-name', From 69ce77ca783ddb61415f4223cf5d53dd2698d8f5 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Wed, 1 Nov 2023 08:36:15 -0600 Subject: [PATCH 22/82] use docker compose as a plugin rather than docker-compose when possible --- docs/host-config-macos.md | 6 +-- logstash/pipelines/zeek/11_zeek_parse.conf | 4 +- .../includes.chroot/etc/bash.bash_functions | 6 +-- scripts/build.sh | 31 +++++++----- scripts/control.py | 40 +++++++++------ scripts/demo/reset_and_auto_populate.sh | 45 ++++++++++++----- scripts/install.py | 49 ++++++++++--------- scripts/malcolm_common.py | 2 + scripts/malcolm_utils.py | 20 ++++++-- .../aws/ami/scripts/Malcolm_AMI_Setup.sh | 4 +- 10 files changed, 132 insertions(+), 75 deletions(-) diff --git a/docs/host-config-macos.md b/docs/host-config-macos.md index 0f86696e2..200b7c485 100644 --- a/docs/host-config-macos.md +++ b/docs/host-config-macos.md @@ -14,14 +14,14 @@ $ brew install cask $ brew tap homebrew/cask-versions ``` -## Install docker-edge +## Install docker ``` -$ brew install --cask docker-edge +$ brew install --cask docker ``` This will install the latest version of `docker`. It can be upgraded later using `brew` as well: ``` -$ brew upgrade --cask --no-quarantine docker-edge +$ brew upgrade --cask --no-quarantine docker ``` You can now run Docker from the Applications folder. diff --git a/logstash/pipelines/zeek/11_zeek_parse.conf b/logstash/pipelines/zeek/11_zeek_parse.conf index 2853a8485..0c9716b53 100644 --- a/logstash/pipelines/zeek/11_zeek_parse.conf +++ b/logstash/pipelines/zeek/11_zeek_parse.conf @@ -6,9 +6,9 @@ # # to profile, debug: # - get filters sorted by execution time (where in > 0) -# $ docker-compose exec logstash curl -XGET http://localhost:9600/_node/stats/pipelines | jq -r '.. | .filters? // empty | .[] | objects | select (.events.in > 0) | [.id, .events.in, .events.out, .events.duration_in_millis] | join (";")' | sort -n -t ';' -k4 +# $ docker compose exec logstash curl -XGET http://localhost:9600/_node/stats/pipelines | jq -r '.. | .filters? // empty | .[] | objects | select (.events.in > 0) | [.id, .events.in, .events.out, .events.duration_in_millis] | join (";")' | sort -n -t ';' -k4 # - get filters where in != out -# $ docker-compose exec logstash curl -XGET http://localhost:9600/_node/stats/pipelines | jq -r '.. | .filters? // empty | .[] | objects | select (.events.in != .events.out) | [.id, .events.in, .events.out, .events.duration_in_millis] | join (";")' +# $ docker compose exec logstash curl -XGET http://localhost:9600/_node/stats/pipelines | jq -r '.. | .filters? // empty | .[] | objects | select (.events.in != .events.out) | [.id, .events.in, .events.out, .events.duration_in_millis] | join (";")' # # Copyright (c) 2023 Battelle Energy Alliance, LLC. All rights reserved. ####################### diff --git a/malcolm-iso/config/includes.chroot/etc/bash.bash_functions b/malcolm-iso/config/includes.chroot/etc/bash.bash_functions index bfc6b2487..6dba56c0c 100644 --- a/malcolm-iso/config/includes.chroot/etc/bash.bash_functions +++ b/malcolm-iso/config/includes.chroot/etc/bash.bash_functions @@ -477,7 +477,7 @@ function drun() { } # docker compose -alias dc="docker-compose" +alias dc="docker compose" # Get latest container ID alias dl="docker ps -l -q" @@ -562,9 +562,9 @@ function malcolmmonitor () { select-pane -t 5 \; \ send-keys 'while true; do clear; free -m | grep ^Mem: | cut -d" " -f2- | sed "s/[[:space:]]\+/,/g" | sed "s/^,//" ; sleep 60; done' C-m \; \ select-pane -t 6 \; \ - send-keys "while true; do clear; pushd ~/Malcolm >/dev/null 2>&1; docker-compose exec -u $(id -u) api curl -sSL 'http://localhost:5000/mapi/agg/event.dataset?from=1970' | python3 -m json.tool | grep -P '\b(doc_count|key)\b' | tr -d '\", ' | cut -d: -f2 | paste - - -d'\t\t' | head -n $(( (MAX_HEIGHT / 2) - 1 )) ; popd >/dev/null 2>&1; sleep 60; done" C-m \; \ + send-keys "while true; do clear; pushd ~/Malcolm >/dev/null 2>&1; docker compose exec -u $(id -u) api curl -sSL 'http://localhost:5000/mapi/agg/event.dataset?from=1970' | python3 -m json.tool | grep -P '\b(doc_count|key)\b' | tr -d '\", ' | cut -d: -f2 | paste - - -d'\t\t' | head -n $(( (MAX_HEIGHT / 2) - 1 )) ; popd >/dev/null 2>&1; sleep 60; done" C-m \; \ select-pane -t 7 \; \ - send-keys "while true; do clear; pushd ~/Malcolm >/dev/null 2>&1; docker-compose exec -u $(id -u) api curl -sSL 'http://localhost:5000/mapi/agg?from=1970' | python3 -m json.tool | grep -P '\b(doc_count|key)\b' | tr -d '\", ' | cut -d: -f2 | paste - - -d'\t\t' ; popd >/dev/null 2>&1; sleep 60; done" C-m \; \ + send-keys "while true; do clear; pushd ~/Malcolm >/dev/null 2>&1; docker compose exec -u $(id -u) api curl -sSL 'http://localhost:5000/mapi/agg?from=1970' | python3 -m json.tool | grep -P '\b(doc_count|key)\b' | tr -d '\", ' | cut -d: -f2 | paste - - -d'\t\t' ; popd >/dev/null 2>&1; sleep 60; done" C-m \; \ split-window -v \; \ select-pane -t 8 \; \ send-keys "while true; do clear; find ~/Malcolm/zeek-logs/extract_files -type f | sed 's@.*/\(.*\)/.*@\1@' | sort | uniq -c | sort -nr; sleep 60; done" C-m \; \ diff --git a/scripts/build.sh b/scripts/build.sh index 1fd2251e1..a4ff97c27 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -15,21 +15,30 @@ if ! (type "$REALPATH" && type "$DIRNAME" && type "$GREP") > /dev/null; then exit 1 fi -if docker-compose version >/dev/null 2>&1; then - DOCKER_COMPOSE_BIN=docker-compose +DOCKER_COMPOSE_BIN=() +if docker compose version >/dev/null 2>&1; then + DOCKER_COMPOSE_BIN=(docker compose) DOCKER_BIN=docker -elif $GREP -q Microsoft /proc/version && docker-compose.exe version >/dev/null 2>&1; then - DOCKER_COMPOSE_BIN=docker-compose.exe - DOCKER_BIN=docker.exe +elif docker-compose version >/dev/null 2>&1; then + DOCKER_COMPOSE_BIN=(docker-compose) + DOCKER_BIN=docker +elif $GREP -q Microsoft /proc/version; then + if docker.exe compose version >/dev/null 2>&1; then + DOCKER_COMPOSE_BIN=(docker.exe compose) + DOCKER_BIN=docker.exe + elif docker-compose.exe version >/dev/null 2>&1; then + DOCKER_COMPOSE_BIN=(docker-compose.exe) + DOCKER_BIN=docker.exe + fi fi if [[ -f "$1" ]]; then CONFIG_FILE="$1" - DOCKER_COMPOSE_COMMAND="$DOCKER_COMPOSE_BIN --profile malcolm -f "$CONFIG_FILE"" + DOCKER_COMPOSE_COMMAND="${DOCKER_COMPOSE_BIN[@]} --profile malcolm -f "$CONFIG_FILE"" shift # use remainder of arguments for services else CONFIG_FILE="docker-compose.yml" - DOCKER_COMPOSE_COMMAND="$DOCKER_COMPOSE_BIN --profile malcolm" + DOCKER_COMPOSE_COMMAND="${DOCKER_COMPOSE_BIN[@]} --profile malcolm" fi function filesize_in_image() { @@ -52,14 +61,14 @@ pushd "$SCRIPT_PATH/.." >/dev/null 2>&1 # make sure docker is installed, at this point it's required if ! $DOCKER_BIN info >/dev/null 2>&1 ; then echo "Docker is not installed, or not runable as this user." - echo "Install docker (install.py may help with that) and try again later." + echo "Install Docker (install.py may help with that) and try again." exit 1 fi -# make sure docker-compose is installed, at this point it's required -if ! $DOCKER_COMPOSE_BIN version >/dev/null 2>&1 ; then +# make sure docker compose is installed, at this point it's required +if (( ${#DOCKER_COMPOSE_BIN[@]} == 0 )); then echo "Docker Compose is not installed, or not runable as this user." - echo "Install docker-compose (install.py may help with that) and try again later." + echo "Install Docker Compose (install.py may help with that) and try again." exit 1 fi diff --git a/scripts/control.py b/scripts/control.py index 798ab9da1..d72119094 100755 --- a/scripts/control.py +++ b/scripts/control.py @@ -114,6 +114,8 @@ def __exit__(self, *args): args = None dockerBin = None +# dockerComposeBin might be e.g., ('docker', 'compose') or 'docker-compose', +# it will be flattened in run_process dockerComposeBin = None dockerComposeYaml = None kubeImported = None @@ -2267,30 +2269,38 @@ def main(): osEnv['TMPDIR'] = MalcolmTmpPath if orchMode is OrchestrationFramework.DOCKER_COMPOSE: - # make sure docker/docker-compose is available + # make sure docker and docker compose are available dockerBin = 'docker.exe' if ((pyPlatform == PLATFORM_WINDOWS) and which('docker.exe')) else 'docker' - if (pyPlatform == PLATFORM_WINDOWS) and which('docker-compose.exe'): - dockerComposeBin = 'docker-compose.exe' - elif which('docker-compose'): - dockerComposeBin = 'docker-compose' - elif os.path.isfile('/usr/libexec/docker/cli-plugins/docker-compose'): - dockerComposeBin = '/usr/libexec/docker/cli-plugins/docker-compose' - elif os.path.isfile('/usr/local/opt/docker-compose/bin/docker-compose'): - dockerComposeBin = '/usr/local/opt/docker-compose/bin/docker-compose' - elif os.path.isfile('/usr/local/bin/docker-compose'): - dockerComposeBin = '/usr/local/bin/docker-compose' - elif os.path.isfile('/usr/bin/docker-compose'): - dockerComposeBin = '/usr/bin/docker-compose' - else: - dockerComposeBin = 'docker-compose' err, out = run_process([dockerBin, 'info'], debug=args.debug) if err != 0: raise Exception(f'{ScriptName} requires docker, please run install.py') + # first check if compose is available as a docker plugin + dockerComposeBin = (dockerBin, 'compose') err, out = run_process( [dockerComposeBin, '--profile', PROFILE_MALCOLM, '-f', args.composeFile, 'version'], env=osEnv, debug=args.debug, ) + if err != 0: + if (pyPlatform == PLATFORM_WINDOWS) and which('docker-compose.exe'): + dockerComposeBin = 'docker-compose.exe' + elif which('docker-compose'): + dockerComposeBin = 'docker-compose' + elif os.path.isfile('/usr/libexec/docker/cli-plugins/docker-compose'): + dockerComposeBin = '/usr/libexec/docker/cli-plugins/docker-compose' + elif os.path.isfile('/usr/local/opt/docker-compose/bin/docker-compose'): + dockerComposeBin = '/usr/local/opt/docker-compose/bin/docker-compose' + elif os.path.isfile('/usr/local/bin/docker-compose'): + dockerComposeBin = '/usr/local/bin/docker-compose' + elif os.path.isfile('/usr/bin/docker-compose'): + dockerComposeBin = '/usr/bin/docker-compose' + else: + dockerComposeBin = 'docker-compose' + err, out = run_process( + [dockerComposeBin, '--profile', PROFILE_MALCOLM, '-f', args.composeFile, 'version'], + env=osEnv, + debug=args.debug, + ) if err != 0: raise Exception(f'{ScriptName} requires docker-compose, please run install.py') diff --git a/scripts/demo/reset_and_auto_populate.sh b/scripts/demo/reset_and_auto_populate.sh index d07634217..38dea9057 100755 --- a/scripts/demo/reset_and_auto_populate.sh +++ b/scripts/demo/reset_and_auto_populate.sh @@ -57,6 +57,7 @@ NUMERIC_REGEX='^[0-9]+$' # get directory script is executing from [[ -n $MACOS ]] && REALPATH=grealpath || REALPATH=realpath [[ -n $MACOS ]] && DIRNAME=gdirname || DIRNAME=dirname +[[ -n $MACOS ]] && GREP=ggrep || GREP=grep if ! (type "$REALPATH" && type "$DIRNAME") > /dev/null; then echo "$(basename "${BASH_SOURCE[0]}") requires $REALPATH and $DIRNAME" >&2 exit 1 @@ -77,6 +78,7 @@ RESTART="false" READ_ONLY="false" NGINX_DISABLE="false" MALCOLM_DOCKER_COMPOSE="$FULL_PWD"/docker-compose.yml +MALCOLM_PROFILE=malcolm NETBOX_BACKUP_FILE="" PCAP_FILES=() PCAP_ADJUST_SCRIPT="" @@ -86,7 +88,7 @@ PCAP_PROCESS_PRE_WAIT=120 PCAP_PROCESS_IDLE_SECONDS=180 PCAP_PROCESS_IDLE_MAX_SECONDS=3600 NETBOX_INIT_MAX_SECONDS=300 -while getopts 'vwronlb:m:i:x:s:d:' OPTION; do +while getopts 'vwronlb:m:i:x:s:d:p:' OPTION; do case "$OPTION" in v) VERBOSE_FLAG="-v" @@ -120,6 +122,10 @@ while getopts 'vwronlb:m:i:x:s:d:' OPTION; do MALCOLM_DOCKER_COMPOSE="$OPTARG" ;; + p) + MALCOLM_PROFILE="$OPTARG" + ;; + s) if [[ $OPTARG =~ $NUMERIC_REGEX ]] ; then PCAP_PROCESS_IDLE_SECONDS=$OPTARG @@ -161,6 +167,19 @@ else PCAP_ADJUST_SCRIPT="" fi +DOCKER_COMPOSE_BIN=() +if docker compose version >/dev/null 2>&1; then + DOCKER_COMPOSE_BIN=(docker compose) +elif docker-compose version >/dev/null 2>&1; then + DOCKER_COMPOSE_BIN=(docker-compose) +elif [[ -n $WINDOWS ]]; then + if docker.exe compose version >/dev/null 2>&1; then + DOCKER_COMPOSE_BIN=(docker.exe compose) + elif docker-compose.exe version >/dev/null 2>&1; then + DOCKER_COMPOSE_BIN=(docker-compose.exe) + fi +fi + [[ -n $VERBOSE_FLAG ]] && echo "$(basename "${BASH_SOURCE[0]}") in \"${SCRIPT_PATH}\" called from \"${FULL_PWD}\"" >&2 && set -x ############################################################################### @@ -199,7 +218,7 @@ function urlencode() { trap clean_up EXIT if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ - which docker-compose >/dev/null 2>&1 && \ + (( ${#DOCKER_COMPOSE_BIN[@]} > 0 )) && \ which jq >/dev/null 2>&1; then mkdir -p "$WORKDIR" @@ -267,11 +286,11 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ fi if [[ "$NGINX_DISABLE" == "true" ]]; then - docker-compose -f "$MALCOLM_FILE" pause nginx-proxy + ${DOCKER_COMPOSE_BIN[@]} --profile "$MALCOLM_PROFILE" -f "$MALCOLM_FILE" pause nginx-proxy fi # wait for logstash to be ready for Zeek logs to be ingested - until docker-compose -f "$MALCOLM_FILE" logs logstash 2>/dev/null | grep -q "Pipelines running"; do + until ${DOCKER_COMPOSE_BIN[@]} --profile "$MALCOLM_PROFILE" -f "$MALCOLM_FILE" logs logstash 2>/dev/null | $GREP -q "Pipelines running"; do [[ -n $VERBOSE_FLAG ]] && echo "waiting for Malcolm to become ready for PCAP data..." >&2 sleep 10 done @@ -283,7 +302,7 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ # wait for NetBox to be ready with the initial startup before we go mucking around CURRENT_TIME=$(date -u +%s) FIRST_NETBOX_INIT_CHECK_TIME=$CURRENT_TIME - until docker-compose -f "$MALCOLM_FILE" logs netbox 2>/dev/null | grep -q "Unit configuration loaded successfully"; do + until ${DOCKER_COMPOSE_BIN[@]} --profile "$MALCOLM_PROFILE" -f "$MALCOLM_FILE" logs netbox 2>/dev/null | $GREP -q "Unit configuration loaded successfully"; do [[ -n $VERBOSE_FLAG ]] && echo "waiting for NetBox initialization to complete..." >&2 sleep 10 # if it's been more than the maximum wait time, bail @@ -318,7 +337,7 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ fi # get the total number of session records in the database - NEW_LOG_COUNT=$(( docker-compose -f "$MALCOLM_FILE" exec -u $(id -u) -T api \ + NEW_LOG_COUNT=$(( ${DOCKER_COMPOSE_BIN[@]} --profile "$MALCOLM_PROFILE" -f "$MALCOLM_FILE" exec -u $(id -u) -T api \ curl -sSL "http://localhost:5000/mapi/agg/event.provider?from=1970" | \ jq -r '.. | .buckets? // empty | .[] | objects | [.doc_count|tostring] | join ("")' | \ awk '{s+=$1} END {print s}') 2>/dev/null ) @@ -349,7 +368,7 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ fi if [[ "$NGINX_DISABLE" == "true" ]]; then - docker-compose -f "$MALCOLM_FILE" unpause nginx-proxy + ${DOCKER_COMPOSE_BIN[@]} --profile "$MALCOLM_PROFILE" -f "$MALCOLM_FILE" unpause nginx-proxy sleep 10 fi @@ -357,8 +376,8 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ [[ -n $VERBOSE_FLAG ]] && echo "Ensuring creation of user accounts prior to setting to read-only" >&2 for USER in \ $(cat nginx/htpasswd | cut -d: -f1) \ - $(grep -q -P "NGINX_BASIC_AUTH\s*=s*no_authentication" "$MALCOLM_PATH"/config/auth-common.env && echo guest); do - docker-compose -f "$MALCOLM_FILE" exec -T arkime curl -ksSL -XGET \ + $($GREP -q -P "NGINX_BASIC_AUTH\s*=s*no_authentication" "$MALCOLM_PATH"/config/auth-common.env && echo guest); do + ${DOCKER_COMPOSE_BIN[@]} --profile "$MALCOLM_PROFILE" -f "$MALCOLM_FILE" exec -T arkime curl -ksSL -XGET \ --header 'Content-type:application/json' \ --header "http_auth_http_user:$USER" \ --header "Authorization:" \ @@ -366,12 +385,12 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ done sleep 5 [[ -n $VERBOSE_FLAG ]] && echo "Setting cluster to read-only" >&2 - docker-compose -f "$MALCOLM_FILE" exec -T nginx-proxy bash -c "cp /etc/nginx/nginx_readonly.conf /etc/nginx/nginx.conf && nginx -s reload" + ${DOCKER_COMPOSE_BIN[@]} --profile "$MALCOLM_PROFILE" -f "$MALCOLM_FILE" exec -T nginx-proxy bash -c "cp /etc/nginx/nginx_readonly.conf /etc/nginx/nginx.conf && nginx -s reload" sleep 5 - docker-compose -f "$MALCOLM_FILE" exec -T dashboards-helper /data/opensearch_read_only.py -i _cluster + ${DOCKER_COMPOSE_BIN[@]} --profile "$MALCOLM_PROFILE" -f "$MALCOLM_FILE" exec -T dashboards-helper /data/opensearch_read_only.py -i _cluster sleep 5 for CONTAINER in htadmin filebeat logstash upload pcap-monitor zeek zeek-live suricata suricata-live pcap-capture freq; do - docker-compose -f "$MALCOLM_FILE" pause "$CONTAINER" || true + ${DOCKER_COMPOSE_BIN[@]} --profile "$MALCOLM_PROFILE" -f "$MALCOLM_FILE" pause "$CONTAINER" || true done sleep 5 fi @@ -381,6 +400,6 @@ if [[ -f "$MALCOLM_DOCKER_COMPOSE" ]] && \ [[ -n $VERBOSE_FLAG ]] && echo "Finished" >&2 else echo "must specify docker-compose.yml file with -m and PCAP file(s)" >&2 - echo "also, pcap_time_shift.py, docker-compose and jq must be available" + echo "also, pcap_time_shift.py, docker compose and jq must be available" exit 1 fi \ No newline at end of file diff --git a/scripts/install.py b/scripts/install.py index dbf96fc7f..6a6479923 100755 --- a/scripts/install.py +++ b/scripts/install.py @@ -72,6 +72,7 @@ DATABASE_MODE_ENUMS, deep_get, eprint, + flatten, run_process, same_file_or_dir, str2bool, @@ -80,11 +81,12 @@ ) ################################################################################################### -DOCKER_COMPOSE_INSTALL_VERSION = "2.20.3" +DOCKER_COMPOSE_INSTALL_VERSION = "2.23.0" DEB_GPG_KEY_FINGERPRINT = '0EBFCD88' # used to verify GPG key for Docker Debian repository -MAC_BREW_DOCKER_PACKAGE = 'docker-edge' +MAC_BREW_DOCKER_PACKAGE = 'docker' +MAC_BREW_DOCKER_COMPOSE_PACKAGE = 'docker-compose' MAC_BREW_DOCKER_SETTINGS = '/Users/{}/Library/Group Containers/group.com.docker/settings.json' LOGSTASH_JAVA_OPTS_DEFAULT = '-server -Xms2500m -Xmx2500m -Xss1536k -XX:-HeapDumpOnOutOfMemoryError -Djava.security.egd=file:/dev/./urandom -Dlog4j.formatMsgNoLookups=true' @@ -2570,22 +2572,27 @@ def install_docker_compose(self): result = False if self.orchMode is OrchestrationFramework.DOCKER_COMPOSE: - dockerComposeCmd = 'docker-compose' - if not which(dockerComposeCmd, debug=self.debug): - if os.path.isfile('/usr/libexec/docker/cli-plugins/docker-compose'): - dockerComposeCmd = '/usr/libexec/docker/cli-plugins/docker-compose' - elif os.path.isfile('/usr/local/bin/docker-compose'): - dockerComposeCmd = '/usr/local/bin/docker-compose' - - # first see if docker-compose is already installed and runnable (try non-root and root) + # first see if docker compose/docker-compose is already installed and runnable + # (try non-root and root) + dockerComposeCmd = ('docker', 'compose') err, out = self.run_process([dockerComposeCmd, 'version'], privileged=False) if err != 0: err, out = self.run_process([dockerComposeCmd, 'version'], privileged=True) + if err != 0: + dockerComposeCmd = 'docker-compose' + if not which(dockerComposeCmd, debug=self.debug): + if os.path.isfile('/usr/libexec/docker/cli-plugins/docker-compose'): + dockerComposeCmd = '/usr/libexec/docker/cli-plugins/docker-compose' + elif os.path.isfile('/usr/local/bin/docker-compose'): + dockerComposeCmd = '/usr/local/bin/docker-compose' + err, out = self.run_process([dockerComposeCmd, 'version'], privileged=False) + if err != 0: + err, out = self.run_process([dockerComposeCmd, 'version'], privileged=True) if (err != 0) and InstallerYesOrNo( - '"docker-compose version" failed, attempt to install docker-compose?', default=True + 'docker compose failed, attempt to install docker compose?', default=True ): - if InstallerYesOrNo('Install docker-compose directly from docker github?', default=True): + if InstallerYesOrNo('Install docker compose directly from docker github?', default=True): # download docker-compose from github and put it in /usr/local/bin # need to know some linux platform info @@ -2617,7 +2624,7 @@ def install_docker_compose(self): elif InstallerYesOrNo('Install docker-compose via pip (privileged)?', default=False): # install docker-compose via pip (as root) - err, out = self.run_process([self.pipCmd, 'install', dockerComposeCmd], privileged=True) + err, out = self.run_process([self.pipCmd, 'install', 'docker-compose'], privileged=True) if err == 0: eprint("Installation of docker-compose apparently succeeded") else: @@ -2625,7 +2632,7 @@ def install_docker_compose(self): elif InstallerYesOrNo('Install docker-compose via pip (user)?', default=True): # install docker-compose via pip (regular user) - err, out = self.run_process([self.pipCmd, 'install', dockerComposeCmd], privileged=False) + err, out = self.run_process([self.pipCmd, 'install', 'docker-compose'], privileged=False) if err == 0: eprint("Installation of docker-compose apparently succeeded") else: @@ -2639,11 +2646,11 @@ def install_docker_compose(self): if err == 0: result = True if self.debug: - eprint('"docker-compose version" succeeded') + eprint('docker compose succeeded') else: raise Exception( - f'{ScriptName} requires docker-compose, please see {DOCKER_COMPOSE_INSTALL_URLS[self.platform]}' + f'{ScriptName} requires docker compose, please see {DOCKER_COMPOSE_INSTALL_URLS[self.platform]}' ) return result @@ -2894,7 +2901,7 @@ def install_docker(self): elif InstallerYesOrNo('"docker info" failed, attempt to install Docker?', default=True): if self.useBrew: # install docker via brew cask (requires user interaction) - dockerPackages = [MAC_BREW_DOCKER_PACKAGE, "docker-compose"] + dockerPackages = [MAC_BREW_DOCKER_PACKAGE, MAC_BREW_DOCKER_COMPOSE_PACKAGE] eprint(f"Installing docker packages: {dockerPackages}") if self.install_package(dockerPackages): eprint("Installation of docker packages apparently succeeded") @@ -2915,7 +2922,7 @@ def install_docker(self): else: tempFileName = os.path.join(self.tempDirName, 'Docker.dmg') if DownloadToFile( - 'https://download.docker.com/mac/edge/Docker.dmg', tempFileName, debug=self.debug + 'https://desktop.docker.com/mac/main/amd64/Docker.dmg', tempFileName, debug=self.debug ): while True: response = InstallerAskForString( @@ -2932,12 +2939,10 @@ def install_docker(self): eprint('"docker info" succeeded') elif err != 0: - raise Exception( - f'{ScriptName} requires docker edge, please see {DOCKER_INSTALL_URLS[self.platform]}' - ) + raise Exception(f'{ScriptName} requires docker, please see {DOCKER_INSTALL_URLS[self.platform]}') elif err != 0: - raise Exception(f'{ScriptName} requires docker edge, please see {DOCKER_INSTALL_URLS[self.platform]}') + raise Exception(f'{ScriptName} requires docker, please see {DOCKER_INSTALL_URLS[self.platform]}') # tweak CPU/RAM usage for Docker in Mac settingsFile = MAC_BREW_DOCKER_SETTINGS.format(self.scriptUser) diff --git a/scripts/malcolm_common.py b/scripts/malcolm_common.py index 552557eaf..843af368a 100644 --- a/scripts/malcolm_common.py +++ b/scripts/malcolm_common.py @@ -99,6 +99,8 @@ class UserInterfaceMode(IntFlag): DOCKER_INSTALL_URLS[PLATFORM_MAC] = [ 'https://www.code2bits.com/how-to-install-docker-on-macos-using-homebrew/', 'https://docs.docker.com/docker-for-mac/install/', + 'https://formulae.brew.sh/formula/docker', + 'https://formulae.brew.sh/formula/docker-compose', ] DOCKER_COMPOSE_INSTALL_URLS = defaultdict(lambda: 'https://docs.docker.com/compose/install/') HOMEBREW_INSTALL_URLS = defaultdict(lambda: 'https://brew.sh/') diff --git a/scripts/malcolm_utils.py b/scripts/malcolm_utils.py index f7c80e85a..a89849165 100644 --- a/scripts/malcolm_utils.py +++ b/scripts/malcolm_utils.py @@ -297,6 +297,17 @@ def EVP_BytesToKey(key_length: int, iv_length: int, md, salt: bytes, data: bytes return key, iv +################################################################################################### +# flatten a collection, but don't split strings +def flatten(coll): + for i in coll: + if isinstance(i, Iterable) and not isinstance(i, str): + for subc in flatten(i): + yield subc + else: + yield i + + ################################################################################################### # if the object is an iterable, return it, otherwise return a tuple with it as a single element. # useful if you want to user either a scalar or an array in a loop, etc. @@ -634,11 +645,12 @@ def run_process( ): retcode = -1 output = [] + flat_command = list(flatten(get_iterable(command))) try: # run the command retcode, cmdout, cmderr = check_output_input( - command, + flat_command, input=stdin.encode() if stdin else None, cwd=cwd, env=env, @@ -652,11 +664,11 @@ def run_process( except (FileNotFoundError, OSError, IOError): if stderr: - output.append("Command {} not found or unable to execute".format(command)) + output.append("Command {} not found or unable to execute".format(flat_command)) if debug: dbgStr = "{}{} returned {}: {}".format( - command, "({})".format(stdin[:80] + bool(stdin[80:]) * '...' if stdin else ""), retcode, output + flat_command, "({})".format(stdin[:80] + bool(stdin[80:]) * '...' if stdin else ""), retcode, output ) if logger is not None: logger.debug(dbgStr) @@ -666,7 +678,7 @@ def run_process( if (retcode != 0) and retry and (retry > 0): # sleep then retry time.sleep(retrySleepSec) - return run_process(command, stdout, stderr, stdin, retry - 1, retrySleepSec, cwd, env, debug, logger) + return run_process(flat_command, stdout, stderr, stdin, retry - 1, retrySleepSec, cwd, env, debug, logger) else: return retcode, output diff --git a/scripts/third-party-environments/aws/ami/scripts/Malcolm_AMI_Setup.sh b/scripts/third-party-environments/aws/ami/scripts/Malcolm_AMI_Setup.sh index 44113adf2..a1c4d1f80 100755 --- a/scripts/third-party-environments/aws/ami/scripts/Malcolm_AMI_Setup.sh +++ b/scripts/third-party-environments/aws/ami/scripts/Malcolm_AMI_Setup.sh @@ -32,7 +32,7 @@ fi # -u UID (user UID, e.g., 1000) VERBOSE_FLAG= MALCOLM_REPO=${MALCOLM_REPO:-idaholab/Malcolm} -MALCOLM_TAG=${MALCOLM_TAG:-v23.05.1} +MALCOLM_TAG=${MALCOLM_TAG:-v23.10.0} [[ -z "$MALCOLM_UID" ]] && ( [[ $EUID -eq 0 ]] && MALCOLM_UID=1000 || MALCOLM_UID="$(id -u)" ) while getopts 'vr:t:u:' OPTION; do case "$OPTION" in @@ -217,7 +217,7 @@ function InstallMalcolm { mv docker-compose-standalone.yml docker-compose.yml for ENVEXAMPLE in ./config/*.example; do ENVFILE="${ENVEXAMPLE%.*}"; cp "$ENVEXAMPLE" "$ENVFILE"; done echo "Pulling Docker images..." >&2 - docker-compose pull >/dev/null 2>&1 + docker-compose --profile malcolm pull >/dev/null 2>&1 rm -f ./config/*.env docker images popd >/dev/null 2>&1 From 4be0d17cc2e1f4798fc6d13e9b61a2c520621741 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Wed, 1 Nov 2023 09:55:53 -0600 Subject: [PATCH 23/82] fix issue with logs --- scripts/control.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/control.py b/scripts/control.py index d72119094..0ab1f8e50 100755 --- a/scripts/control.py +++ b/scripts/control.py @@ -60,6 +60,7 @@ deep_get, dictsearch, eprint, + flatten, EscapeAnsi, EscapeForCurl, get_iterable, @@ -784,7 +785,7 @@ def logs(): if cmd: process = Popen( - cmd, + flatten(cmd), env=osEnv, stdout=PIPE, stderr=None if args.debug else DEVNULL, From fd69cad435c0562ac06a3442a8f11e864065d215 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Wed, 1 Nov 2023 10:48:51 -0600 Subject: [PATCH 24/82] fix issue parsing dns.ip --- logstash/pipelines/zeek/12_zeek_mutate.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/logstash/pipelines/zeek/12_zeek_mutate.conf b/logstash/pipelines/zeek/12_zeek_mutate.conf index fd9fdf627..6d3b61636 100644 --- a/logstash/pipelines/zeek/12_zeek_mutate.conf +++ b/logstash/pipelines/zeek/12_zeek_mutate.conf @@ -410,7 +410,7 @@ filter { ruby { id => "ruby_zeek_dns_answers_ip_extract" # todo: adjust this regex so it at least sort of catches IPv6 as well - code => "event.set('[@metadata][answers_ip]', event.get('[zeek][dns][answers]').scan(/\d+\.\d+\.\d+\.\d+/).join(','))" + code => "event.set('[@metadata][answers_ip]', event.get('[zeek][dns][answers]').scan(/\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b/).join(','))" } mutate { id => "mutate_split_zeek_dns_answers" split => { "[zeek][dns][answers]" => "," } } From e5188807dd1dc2aa3de8ab8b462aa08e2e7edd9d Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Wed, 1 Nov 2023 15:28:37 -0600 Subject: [PATCH 25/82] make sure preloaded prefixes get populated with default site name (idaholab/Malcolm#279) --- netbox/preload/prefixes_defaults.yml | 3 +++ netbox/scripts/netbox_init.py | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/netbox/preload/prefixes_defaults.yml b/netbox/preload/prefixes_defaults.yml index 0fdb935f1..1788bb584 100644 --- a/netbox/preload/prefixes_defaults.yml +++ b/netbox/preload/prefixes_defaults.yml @@ -1,3 +1,6 @@ - prefix: 10.0.0.0/8 + site: NETBOX_DEFAULT_SITE - prefix: 172.16.0.0/12 + site: NETBOX_DEFAULT_SITE - prefix: 192.168.0.0/16 + site: NETBOX_DEFAULT_SITE diff --git a/netbox/scripts/netbox_init.py b/netbox/scripts/netbox_init.py index ab1aba957..65d6746b4 100755 --- a/netbox/scripts/netbox_init.py +++ b/netbox/scripts/netbox_init.py @@ -617,12 +617,16 @@ def main(): # only preload catch-all IP Prefixes if explicitly specified and they don't already exist if args.preloadPrefixes: + defaultSiteName = next(iter([x for x in args.netboxSites]), None) for loadType in ('vrfs', 'prefixes'): defaultFileName = os.path.join(tmpPreloadDir, f'{loadType}_defaults.yml') loadFileName = os.path.join(tmpPreloadDir, f'{loadType}.yml') if os.path.isfile(defaultFileName) and (not os.path.isfile(loadFileName)): try: - shutil.copyfile(defaultFileName, loadFileName) + with open(defaultFileName, 'r') as infile: + with open(loadFileName, 'w') as outfile: + for line in infile: + outfile.write(line.replace("NETBOX_DEFAULT_SITE", defaultSiteName)) except Exception: pass From c2f043e21152be4e089493879f5712b88be21ac7 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Wed, 1 Nov 2023 15:47:36 -0600 Subject: [PATCH 26/82] fix start not stopping log display --- scripts/control.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/control.py b/scripts/control.py index 0ab1f8e50..8eedfc858 100755 --- a/scripts/control.py +++ b/scripts/control.py @@ -785,7 +785,7 @@ def logs(): if cmd: process = Popen( - flatten(cmd), + list(flatten(cmd)), env=osEnv, stdout=PIPE, stderr=None if args.debug else DEVNULL, @@ -807,6 +807,7 @@ def logs(): and (not args.cmdLogs) and finishedStartingRegEx.match(output) ): + shuttingDown[0] = True process.terminate() try: process.wait(timeout=5.0) From 570fce916028ac57456774ee1170a73c5f51a50c Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Wed, 1 Nov 2023 16:53:31 -0600 Subject: [PATCH 27/82] fix issue with prefix name not being used for segments correctly (idaholab/Malcolm#280) --- logstash/ruby/netbox_enrich.rb | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/logstash/ruby/netbox_enrich.rb b/logstash/ruby/netbox_enrich.rb index 7ca8d0e13..4eabb0bb4 100644 --- a/logstash/ruby/netbox_enrich.rb +++ b/logstash/ruby/netbox_enrich.rb @@ -650,7 +650,11 @@ def filter(event) _tmp_prefixes.each do |p| # non-verbose output is flatter with just names { :name => "name", :id => "id", ... } # if _verbose, include entire object as :details - _prefixes << { :name => p.fetch(:description, p.fetch(:display, nil)), + _prefixName = p.fetch(:description, nil) + if _prefixName.nil? || _prefixName.empty? + _prefixName = p.fetch(:display, p.fetch(:prefix, nil)) + end + _prefixes << { :name => _prefixName, :id => p.fetch(:id, nil), :site => ((_site = p.fetch(:site, nil)) && _site&.has_key?(:name)) ? _site[:name] : _site&.fetch(:display, nil), :tenant => ((_tenant = p.fetch(:tenant, nil)) && _tenant&.has_key?(:name)) ? _tenant[:name] : _tenant&.fetch(:display, nil), From 326b8f6febb2909e0059f4a9a7480ac218cbc44b Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Wed, 8 Nov 2023 07:21:17 -0700 Subject: [PATCH 28/82] update logstash (https://www.elastic.co/guide/en/logstash/current/logstash-8-11-0.html) and beats (https://www.elastic.co/guide/en/beats/libbeat/current/release-notes-8.11.0.html) to v8.11.0 --- Dockerfiles/filebeat.Dockerfile | 2 +- Dockerfiles/logstash.Dockerfile | 2 +- sensor-iso/build.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfiles/filebeat.Dockerfile b/Dockerfiles/filebeat.Dockerfile index 38fed5933..8d4e2e163 100644 --- a/Dockerfiles/filebeat.Dockerfile +++ b/Dockerfiles/filebeat.Dockerfile @@ -1,4 +1,4 @@ -FROM docker.elastic.co/beats/filebeat-oss:8.10.4 +FROM docker.elastic.co/beats/filebeat-oss:8.11.0 # Copyright (c) 2023 Battelle Energy Alliance, LLC. All rights reserved. LABEL maintainer="malcolm@inl.gov" diff --git a/Dockerfiles/logstash.Dockerfile b/Dockerfiles/logstash.Dockerfile index 0c2c9258c..fa930f676 100644 --- a/Dockerfiles/logstash.Dockerfile +++ b/Dockerfiles/logstash.Dockerfile @@ -1,4 +1,4 @@ -FROM docker.elastic.co/logstash/logstash-oss:8.10.4 +FROM docker.elastic.co/logstash/logstash-oss:8.11.0 LABEL maintainer="malcolm@inl.gov" LABEL org.opencontainers.image.authors='malcolm@inl.gov' diff --git a/sensor-iso/build.sh b/sensor-iso/build.sh index 36d50c276..a2575756e 100755 --- a/sensor-iso/build.sh +++ b/sensor-iso/build.sh @@ -5,7 +5,7 @@ IMAGE_PUBLISHER=idaholab IMAGE_VERSION=1.0.0 IMAGE_DISTRIBUTION=bookworm -BEATS_VER="8.10.4" +BEATS_VER="8.11.0" BEATS_OSS="-oss" BUILD_ERROR_CODE=1 From 5dbb346d0e28ed616e3da4d74cadbd8418bc24ff Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Thu, 9 Nov 2023 09:41:40 -0700 Subject: [PATCH 29/82] Fluent-bit to v2.2.0 (https://github.com/fluent/fluent-bit/releases/tag/v2.2.0) --- docs/asset-interaction-analysis.md | 2 -- scripts/control.py | 34 +++++++++++++++++++ scripts/third-party-logs/fluent-bit-setup.ps1 | 4 +-- 3 files changed, 36 insertions(+), 4 deletions(-) diff --git a/docs/asset-interaction-analysis.md b/docs/asset-interaction-analysis.md index 228541cde..651af3350 100644 --- a/docs/asset-interaction-analysis.md +++ b/docs/asset-interaction-analysis.md @@ -133,5 +133,3 @@ To clear the existing NetBox database and restore a previous backup, run the fol ./scripts/netbox-restore --netbox-restore ./malcolm_netbox_backup_20230110-125756.gz ``` - -Note that some of the data in the NetBox database is cryptographically signed with the value of the `SECRET_KEY` environment variable in the `./netbox/env/netbox-secret.env` environment file. A restored NetBox backup **will not work** if this value is different from when it was created. diff --git a/scripts/control.py b/scripts/control.py index 8eedfc858..dce0e2aab 100755 --- a/scripts/control.py +++ b/scripts/control.py @@ -608,6 +608,19 @@ def netboxRestore(backupFileName=None): if err != 0: raise Exception('Error creating new NetBox database') + # make sure permissions are set up right + dockerCmd = dockerCmdBase + [ + 'netbox-postgres', + 'psql', + '-U', + 'netbox', + '-c', + 'GRANT ALL PRIVILEGES ON DATABASE netbox TO netbox', + ] + err, results = run_process(dockerCmd, env=osEnv, debug=args.debug) + if err != 0: + raise Exception('Error setting NetBox database permissions') + # load the backed-up psql dump dockerCmd = dockerCmdBase + ['netbox-postgres', 'psql', '-U', 'netbox'] with gzip.open(backupFileName, 'rt') as f: @@ -673,6 +686,27 @@ def netboxRestore(backupFileName=None): if err != 0: raise Exception(f'Error creating new NetBox database: {results}') + # make sure permissions are set up right + if podsResults := PodExec( + service='netbox-postgres', + namespace=args.namespace, + command=[ + 'netbox-postgres', + 'psql', + '-U', + 'netbox', + '-c', + 'GRANT ALL PRIVILEGES ON DATABASE netbox TO netbox', + ], + ): + err = 0 if all([deep_get(v, ['err'], 1) == 0 for k, v in podsResults.items()]) else 1 + results = list(chain(*[deep_get(v, ['output'], '') for k, v in podsResults.items()])) + else: + err = 1 + results = [] + if err != 0: + raise Exception(f'Error setting NetBox database permissions: {results}') + # load the backed-up psql dump with gzip.open(backupFileName, 'rt') as f: if podsResults := PodExec( diff --git a/scripts/third-party-logs/fluent-bit-setup.ps1 b/scripts/third-party-logs/fluent-bit-setup.ps1 index bde65a2d5..2fa6f40dd 100644 --- a/scripts/third-party-logs/fluent-bit-setup.ps1 +++ b/scripts/third-party-logs/fluent-bit-setup.ps1 @@ -8,8 +8,8 @@ # Copyright (c) 2023 Battelle Energy Alliance, LLC. All rights reserved. ############################################################################### -$fluent_bit_version = '2.1' -$fluent_bit_full_version = '2.1.10' +$fluent_bit_version = '2.2' +$fluent_bit_full_version = '2.2.0' ############################################################################### # select an item from a menu provided in an array From 0a6565d18b7a68632a5490cc3f556ce523dfd52f Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Fri, 10 Nov 2023 12:07:26 -0700 Subject: [PATCH 30/82] various updates for v23.11.0 development: * replace master/slave with client/server for modbus (idaholab/Malcolm#291) * modbus updates for icsnpp-modbus (idaholab/Malcolm#289) * point some Zeek plugins back upstream * added new visualizations to modbus dashboard --- Dockerfiles/netbox.Dockerfile | 2 +- arkime/etc/config.ini | 13 +- arkime/wise/source.zeeklogs.js | 7 + .../152f29dc-51a2-4f53-93e9-6e92765567b8.json | 148 ++++++++++++++---- .../composable/component/zeek_ot.json | 7 + logstash/pipelines/zeek/11_zeek_parse.conf | 78 +++++++-- logstash/pipelines/zeek/12_zeek_mutate.conf | 23 ++- .../pipelines/zeek/13_zeek_normalize.conf | 13 +- scripts/zeek_script_to_malcolm_boilerplate.py | 4 + shared/bin/zeek_install_plugins.sh | 4 +- 10 files changed, 246 insertions(+), 53 deletions(-) diff --git a/Dockerfiles/netbox.Dockerfile b/Dockerfiles/netbox.Dockerfile index 4d5dbc7fb..49df64e4f 100644 --- a/Dockerfiles/netbox.Dockerfile +++ b/Dockerfiles/netbox.Dockerfile @@ -1,4 +1,4 @@ -FROM netboxcommunity/netbox:v3.6.4 +FROM netboxcommunity/netbox:v3.6.5 # Copyright (c) 2023 Battelle Energy Alliance, LLC. All rights reserved. LABEL maintainer="malcolm@inl.gov" diff --git a/arkime/etc/config.ini b/arkime/etc/config.ini index 1175de8b5..3caf5a270 100644 --- a/arkime/etc/config.ini +++ b/arkime/etc/config.ini @@ -675,6 +675,7 @@ zeek.modbus.exception=db:zeek.modbus.exception;group:zeek_modbus;kind:termfield; zeek.modbus.unit_id=db:zeek.modbus.unit_id;group:zeek_modbus;kind:integer;viewerOnly:true;friendly:Unit/Server ID;help:Unit/Server ID zeek.modbus.trans_id=db:zeek.modbus.trans_id;group:zeek_modbus;kind:integer;viewerOnly:true;friendly:Transaction ID;help:Transaction ID zeek.modbus.network_direction=db:zeek.modbus.network_direction;group:zeek_modbus;kind:termfield;viewerOnly:true;friendly:PDU Type;help:Request or Response +zeek.modbus.mei_type=db:zeek.modbus.mei_type;group:modbus;kind:termfield;friendly:MEI Type;help:MEI Type # modbus_detailed.log # https://github.com/cisagov/ICSNPP @@ -687,6 +688,15 @@ zeek.modbus_detailed.values=db:zeek.modbus_detailed.values;group:zeek_modbus;kin zeek.modbus_mask_write_register.and_mask=db:zeek.modbus_mask_write_register.and_mask;group:zeek_modbus;kind:integer;viewerOnly:true;friendly:Boolean AND mask to apply to target register;help:Boolean AND mask to apply to target register zeek.modbus_mask_write_register.or_mask=db:zeek.modbus_mask_write_register.or_mask;group:zeek_modbus;kind:integer;viewerOnly:true;friendly:Boolean OR mask to apply to target register;help:Boolean OR mask to apply to target register +# modbus_read_device_identification.log +# https://github.com/cisagov/icsnpp-modbus +zeek.modbus_read_device_identification.conformity_level_code=db:zeek.modbus_read_device_identification.conformity_level_code;group:zeek_modbus_read_device_identification;kind:termfield;friendly:Conformity Level Code;help:Conformity Level Code +zeek.modbus_read_device_identification.conformity_level=db:zeek.modbus_read_device_identification.conformity_level;group:zeek_modbus_read_device_identification;kind:termfield;friendly:Conformity Level;help:Conformity Level +zeek.modbus_read_device_identification.device_id_code=db:zeek.modbus_read_device_identification.device_id_code;group:zeek_modbus_read_device_identification;kind:integer;friendly:Device ID Code;help:Device ID Code +zeek.modbus_read_device_identification.object_id_code=db:zeek.modbus_read_device_identification.object_id_code;group:zeek_modbus_read_device_identification;kind:termfield;friendly:Object ID Code;help:Object ID Code +zeek.modbus_read_device_identification.object_id=db:zeek.modbus_read_device_identification.object_id;group:zeek_modbus_read_device_identification;kind:termfield;friendly:Object ID;help:Object ID +zeek.modbus_read_device_identification.object_value=db:zeek.modbus_read_device_identification.object_value;group:zeek_modbus_read_device_identification;kind:termfield;friendly:Object Value;help:Object Value + # modbus_read_write_multiple_registers.log # https://github.com/cisagov/ICSNPP zeek.modbus_read_write_multiple_registers.write_start_address=db:zeek.modbus_read_write_multiple_registers.write_start_address;group:zeek_modbus;kind:integer;viewerOnly:true;friendly:Starting address of the registers to write to;help:Starting address of the registers to write to @@ -2600,9 +2610,10 @@ o_zeek_known_modbus=require:zeek.known_modbus;title:Zeek zeek.known_modbus.log;f o_zeek_ldap=require:zeek.ldap;title:Zeek ldap.log;fields:zeek.ldap.message_id,zeek.ldap.version,zeek.ldap.operation,zeek.ldap.result_code,zeek.ldap.result_message,zeek.ldap.object,zeek.ldap.argument o_zeek_ldap_search=require:zeek.ldap_search;title:Zeek ldap_search.log;fields:zeek.ldap_search.message_id,zeek.ldap_search.filter,zeek.ldap_search.attributes,zeek.ldap_search.scope,zeek.ldap_search.deref,zeek.ldap_search.base_object,zeek.ldap_search.result_count,zeek.ldap_search.result_code,zeek.ldap_search.result_message o_zeek_login=require:zeek.login;title:Zeek login.log;fields:zeek.login.client_user,zeek.login.confused,zeek.login.success -o_zeek_modbus=require:zeek.modbus;title:Zeek modbus.log;fields:zeek.modbus.trans_id,zeek.modbus.unit_id,zeek.modbus.network_direction,zeek.modbus.func,zeek.modbus.exception +o_zeek_modbus=require:zeek.modbus;title:Zeek modbus.log;fields:zeek.modbus.trans_id,zeek.modbus.unit_id,zeek.modbus.network_direction,zeek.modbus.func,zeek.modbus.exception,zeek.modbus.mei_type, o_zeek_modbus_detailed=require:zeek.modbus_detailed;title:Zeek modbus_detailed.log;fields:zeek.modbus.unit_id,zeek.modbus.func,zeek.modbus.network_direction,zeek.modbus_detailed.address,zeek.modbus_detailed.quantity,zeek.modbus_detailed.values o_zeek_modbus_mask_write_register=require:zeek.modbus_mask_write_register;title:Zeek modbus_mask_write_register.log;fields:zeek.modbus_detailed.unit_id,zeek.modbus.func,zeek.modbus_detailed.network_direction,zeek.modbus_detailed.address,zeek.modbus_mask_write_register.and_mask,zeek.modbus_mask_write_register.or_mask +o_zeek_modbus_read_device_identification=require:zeek.modbus_read_device_identification;title:Zeek modbus_read_device_identification.log;fields:zeek.modbus_read_device_identification.conformity_level_code,zeek.modbus_read_device_identification.conformity_level,zeek.modbus_read_device_identification.device_id_code,zeek.modbus_read_device_identification.object_id_code,zeek.modbus_read_device_identification.object_id,zeek.modbus_read_device_identification.object_value o_zeek_modbus_read_write_multiple_registers=require:zeek.modbus_read_write_multiple_registers;title:Zeek modbus_read_write_multiple_registers.log;fields:zeek.modbus_detailed.unit_id,zeek.modbus.func,zeek.modbus_detailed.network_direction,zeek.modbus_read_write_multiple_registers.write_start_address,zeek.modbus_read_write_multiple_registers.write_registers,zeek.modbus_read_write_multiple_registers.read_start_address,zeek.modbus_read_write_multiple_registers.read_quantity,zeek.modbus_read_write_multiple_registers.read_registers o_zeek_mqtt_connect=require:zeek.mqtt_connect;title:Zeek mqtt_connect.log;fields:zeek.mqtt_connect.proto_name,zeek.mqtt_connect.proto_version,zeek.mqtt_connect.client_id,zeek.mqtt_connect.connect_status,zeek.mqtt_connect.will_topic,zeek.mqtt_connect.will_payload o_zeek_mqtt_publish=require:zeek.mqtt_publish;title:Zeek mqtt_publish.log;fields:zeek.mqtt_publish.from_client,zeek.mqtt_publish.retain,zeek.mqtt_publish.qos,zeek.mqtt_publish.status,zeek.mqtt_publish.topic,zeek.mqtt_publish.payload,zeek.mqtt_publish.payload_len,zeek.mqtt_publish.payload_dict.messageType diff --git a/arkime/wise/source.zeeklogs.js b/arkime/wise/source.zeeklogs.js index ecdfd3412..ddd1b0e1c 100644 --- a/arkime/wise/source.zeeklogs.js +++ b/arkime/wise/source.zeeklogs.js @@ -1116,11 +1116,18 @@ class MalcolmSource extends WISESource { "zeek.modbus.network_direction", "zeek.modbus.trans_id", "zeek.modbus.unit_id", + "zeek.modbus.mei_type", "zeek.modbus_detailed.address", "zeek.modbus_detailed.quantity", "zeek.modbus_detailed.values", "zeek.modbus_mask_write_register.and_mask", "zeek.modbus_mask_write_register.or_mask", + "zeek.modbus_read_device_identification.conformity_level_code", + "zeek.modbus_read_device_identification.conformity_level", + "zeek.modbus_read_device_identification.device_id_code", + "zeek.modbus_read_device_identification.object_id_code", + "zeek.modbus_read_device_identification.object_id", + "zeek.modbus_read_device_identification.object_value", "zeek.modbus_read_write_multiple_registers.read_quantity", "zeek.modbus_read_write_multiple_registers.read_registers", "zeek.modbus_read_write_multiple_registers.read_start_address", diff --git a/dashboards/dashboards/152f29dc-51a2-4f53-93e9-6e92765567b8.json b/dashboards/dashboards/152f29dc-51a2-4f53-93e9-6e92765567b8.json index adcd93ba0..ce98e6a07 100644 --- a/dashboards/dashboards/152f29dc-51a2-4f53-93e9-6e92765567b8.json +++ b/dashboards/dashboards/152f29dc-51a2-4f53-93e9-6e92765567b8.json @@ -7,13 +7,13 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:38:35.641Z", - "version": "Wzk0OSwxXQ==", + "updated_at": "2023-11-10T19:05:19.809Z", + "version": "Wzk1NywxXQ==", "attributes": { "title": "Modbus", "hits": 0, "description": "Dashboard for the Modbus Protocol", - "panelsJSON": "[{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":0,\"w\":8,\"h\":30,\"i\":\"2\"},\"panelIndex\":\"2\",\"embeddableConfig\":{},\"panelRefName\":\"panel_0\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":84,\"w\":48,\"h\":18,\"i\":\"14\"},\"panelIndex\":\"14\",\"embeddableConfig\":{},\"panelRefName\":\"panel_1\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":32,\"y\":23,\"w\":8,\"h\":18,\"i\":\"15\"},\"panelIndex\":\"15\",\"embeddableConfig\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"asc\"}},\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}}}},\"panelRefName\":\"panel_2\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":40,\"y\":23,\"w\":8,\"h\":18,\"i\":\"16\"},\"panelIndex\":\"16\",\"embeddableConfig\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"asc\"}},\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}}}},\"panelRefName\":\"panel_3\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":8,\"y\":23,\"w\":11,\"h\":18,\"i\":\"18\"},\"panelIndex\":\"18\",\"embeddableConfig\":{},\"panelRefName\":\"panel_4\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":30,\"w\":8,\"h\":11,\"i\":\"19\"},\"panelIndex\":\"19\",\"embeddableConfig\":{\"legendOpen\":true,\"table\":null,\"vis\":{\"legendOpen\":true}},\"panelRefName\":\"panel_5\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":8,\"y\":0,\"w\":11,\"h\":23,\"i\":\"90799aa8-a1f5-4f22-8ebd-fcc89d16f6de\"},\"panelIndex\":\"90799aa8-a1f5-4f22-8ebd-fcc89d16f6de\",\"embeddableConfig\":{},\"panelRefName\":\"panel_6\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":19,\"y\":0,\"w\":29,\"h\":23,\"i\":\"218010cf-a0d9-4864-815b-f562bb67949d\"},\"panelIndex\":\"218010cf-a0d9-4864-815b-f562bb67949d\",\"embeddableConfig\":{},\"panelRefName\":\"panel_7\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":19,\"y\":23,\"w\":13,\"h\":18,\"i\":\"5fd617f5-e213-4c2b-ae10-7a1643e739a7\"},\"panelIndex\":\"5fd617f5-e213-4c2b-ae10-7a1643e739a7\",\"embeddableConfig\":{},\"panelRefName\":\"panel_8\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":41,\"w\":16,\"h\":26,\"i\":\"f8941a7d-be4b-4782-b72b-808645d02139\"},\"panelIndex\":\"f8941a7d-be4b-4782-b72b-808645d02139\",\"embeddableConfig\":{},\"panelRefName\":\"panel_9\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":16,\"y\":41,\"w\":16,\"h\":43,\"i\":\"c0d7fb2c-a651-4054-b4cd-026d9f34ad44\"},\"panelIndex\":\"c0d7fb2c-a651-4054-b4cd-026d9f34ad44\",\"embeddableConfig\":{\"params\":{\"sort\":{\"columnIndex\":4,\"direction\":\"asc\"}},\"vis\":{\"params\":{\"sort\":{\"columnIndex\":4,\"direction\":\"desc\"}},\"sortColumn\":{\"colIndex\":4,\"direction\":\"desc\"}}},\"panelRefName\":\"panel_10\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":32,\"y\":41,\"w\":16,\"h\":43,\"i\":\"502f22a6-2e5c-44dd-afa8-39309464f3f2\"},\"panelIndex\":\"502f22a6-2e5c-44dd-afa8-39309464f3f2\",\"embeddableConfig\":{\"params\":{\"sort\":{\"columnIndex\":5,\"direction\":\"asc\"}},\"vis\":{\"params\":{\"sort\":{\"columnIndex\":5,\"direction\":\"desc\"}},\"sortColumn\":{\"colIndex\":5,\"direction\":\"desc\"}}},\"panelRefName\":\"panel_11\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":67,\"w\":16,\"h\":17,\"i\":\"a3049ec4-3c48-4a43-9899-99c018670773\"},\"panelIndex\":\"a3049ec4-3c48-4a43-9899-99c018670773\",\"embeddableConfig\":{},\"panelRefName\":\"panel_12\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":102,\"w\":48,\"h\":23,\"i\":\"1d1b2b12-c510-4b9e-9fbe-b65a2946fe13\"},\"panelIndex\":\"1d1b2b12-c510-4b9e-9fbe-b65a2946fe13\",\"embeddableConfig\":{\"sort\":[[\"firstPacket\",\"asc\"]]},\"panelRefName\":\"panel_13\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":125,\"w\":48,\"h\":15,\"i\":\"99311c07-fbae-4197-ab3f-f8ddf89deefc\"},\"panelIndex\":\"99311c07-fbae-4197-ab3f-f8ddf89deefc\",\"embeddableConfig\":{},\"panelRefName\":\"panel_14\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":140,\"w\":48,\"h\":15,\"i\":\"f50e3c18-31ce-482f-b6a0-c99215b5b5e9\"},\"panelIndex\":\"f50e3c18-31ce-482f-b6a0-c99215b5b5e9\",\"embeddableConfig\":{},\"panelRefName\":\"panel_15\"}]", + "panelsJSON": "[{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":0,\"w\":8,\"h\":30,\"i\":\"2\"},\"panelIndex\":\"2\",\"embeddableConfig\":{},\"panelRefName\":\"panel_0\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":85,\"w\":48,\"h\":18,\"i\":\"14\"},\"panelIndex\":\"14\",\"embeddableConfig\":{},\"panelRefName\":\"panel_1\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":32,\"y\":23,\"w\":8,\"h\":18,\"i\":\"15\"},\"panelIndex\":\"15\",\"embeddableConfig\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"asc\"}},\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}}}},\"panelRefName\":\"panel_2\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":40,\"y\":23,\"w\":8,\"h\":18,\"i\":\"16\"},\"panelIndex\":\"16\",\"embeddableConfig\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"asc\"}},\"vis\":{\"params\":{\"sort\":{\"columnIndex\":1,\"direction\":\"desc\"}}}},\"panelRefName\":\"panel_3\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":8,\"y\":23,\"w\":11,\"h\":18,\"i\":\"18\"},\"panelIndex\":\"18\",\"embeddableConfig\":{},\"panelRefName\":\"panel_4\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":30,\"w\":8,\"h\":11,\"i\":\"19\"},\"panelIndex\":\"19\",\"embeddableConfig\":{\"legendOpen\":true,\"table\":null,\"vis\":{\"legendOpen\":true}},\"panelRefName\":\"panel_5\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":8,\"y\":0,\"w\":11,\"h\":23,\"i\":\"90799aa8-a1f5-4f22-8ebd-fcc89d16f6de\"},\"panelIndex\":\"90799aa8-a1f5-4f22-8ebd-fcc89d16f6de\",\"embeddableConfig\":{},\"panelRefName\":\"panel_6\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":19,\"y\":0,\"w\":29,\"h\":23,\"i\":\"218010cf-a0d9-4864-815b-f562bb67949d\"},\"panelIndex\":\"218010cf-a0d9-4864-815b-f562bb67949d\",\"embeddableConfig\":{},\"panelRefName\":\"panel_7\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":19,\"y\":23,\"w\":13,\"h\":18,\"i\":\"5fd617f5-e213-4c2b-ae10-7a1643e739a7\"},\"panelIndex\":\"5fd617f5-e213-4c2b-ae10-7a1643e739a7\",\"embeddableConfig\":{},\"panelRefName\":\"panel_8\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":41,\"w\":16,\"h\":26,\"i\":\"f8941a7d-be4b-4782-b72b-808645d02139\"},\"panelIndex\":\"f8941a7d-be4b-4782-b72b-808645d02139\",\"embeddableConfig\":{},\"panelRefName\":\"panel_9\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":16,\"y\":41,\"w\":16,\"h\":26,\"i\":\"c0d7fb2c-a651-4054-b4cd-026d9f34ad44\"},\"panelIndex\":\"c0d7fb2c-a651-4054-b4cd-026d9f34ad44\",\"embeddableConfig\":{\"params\":{\"sort\":{\"columnIndex\":4,\"direction\":\"asc\"}},\"vis\":{\"params\":{\"sort\":{\"columnIndex\":4,\"direction\":\"desc\"}},\"sortColumn\":{\"colIndex\":4,\"direction\":\"desc\"}}},\"panelRefName\":\"panel_10\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":32,\"y\":41,\"w\":16,\"h\":26,\"i\":\"502f22a6-2e5c-44dd-afa8-39309464f3f2\"},\"panelIndex\":\"502f22a6-2e5c-44dd-afa8-39309464f3f2\",\"embeddableConfig\":{\"params\":{\"sort\":{\"columnIndex\":5,\"direction\":\"asc\"}},\"vis\":{\"params\":{\"sort\":{\"columnIndex\":5,\"direction\":\"desc\"}},\"sortColumn\":{\"colIndex\":5,\"direction\":\"desc\"}}},\"panelRefName\":\"panel_11\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":67,\"w\":16,\"h\":18,\"i\":\"a3049ec4-3c48-4a43-9899-99c018670773\"},\"panelIndex\":\"a3049ec4-3c48-4a43-9899-99c018670773\",\"embeddableConfig\":{},\"panelRefName\":\"panel_12\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":16,\"y\":67,\"w\":32,\"h\":18,\"i\":\"7efb9ae4-4913-4ae3-a945-0d83e27377d3\"},\"panelIndex\":\"7efb9ae4-4913-4ae3-a945-0d83e27377d3\",\"embeddableConfig\":{},\"panelRefName\":\"panel_13\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":103,\"w\":48,\"h\":23,\"i\":\"1d1b2b12-c510-4b9e-9fbe-b65a2946fe13\"},\"panelIndex\":\"1d1b2b12-c510-4b9e-9fbe-b65a2946fe13\",\"embeddableConfig\":{\"sort\":[[\"firstPacket\",\"asc\"]]},\"panelRefName\":\"panel_14\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":126,\"w\":48,\"h\":15,\"i\":\"99311c07-fbae-4197-ab3f-f8ddf89deefc\"},\"panelIndex\":\"99311c07-fbae-4197-ab3f-f8ddf89deefc\",\"embeddableConfig\":{},\"panelRefName\":\"panel_15\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":141,\"w\":48,\"h\":15,\"i\":\"f50e3c18-31ce-482f-b6a0-c99215b5b5e9\"},\"panelIndex\":\"f50e3c18-31ce-482f-b6a0-c99215b5b5e9\",\"embeddableConfig\":{},\"panelRefName\":\"panel_16\"},{\"version\":\"2.8.0\",\"gridData\":{\"x\":0,\"y\":156,\"w\":48,\"h\":19,\"i\":\"3711221b-ce64-447a-886b-6ad2c50322f9\"},\"panelIndex\":\"3711221b-ce64-447a-886b-6ad2c50322f9\",\"embeddableConfig\":{},\"panelRefName\":\"panel_17\"}]", "optionsJSON": "{\"useMargins\":true}", "version": 1, "timeRestore": false, @@ -89,18 +89,28 @@ }, { "name": "panel_13", + "type": "visualization", + "id": "f6d09e10-7ffb-11ee-9964-dd538601517e" + }, + { + "name": "panel_14", "type": "search", "id": "1cfb4e10-e0b7-11ea-8a49-0d5868b09681" }, { - "name": "panel_14", + "name": "panel_15", "type": "search", "id": "10e72aa0-0816-11eb-987d-c591a71f172b" }, { - "name": "panel_15", + "name": "panel_16", "type": "search", "id": "3ac0f900-0816-11eb-987d-c591a71f172b" + }, + { + "name": "panel_17", + "type": "search", + "id": "624a1d80-7ffa-11ee-9964-dd538601517e" } ], "migrationVersion": { @@ -113,7 +123,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:21:19.884Z", + "updated_at": "2023-11-10T18:35:25.331Z", "version": "Wzg1NywxXQ==", "attributes": { "title": "Network Logs", @@ -136,7 +146,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzEzNCwxXQ==", "attributes": { "title": "Modbus - Logs", @@ -181,7 +191,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzEzNSwxXQ==", "attributes": { "title": "Modbus - Source IP", @@ -211,7 +221,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzEzNiwxXQ==", "attributes": { "title": "Modbus - Destination IP", @@ -241,7 +251,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzEzNywxXQ==", "attributes": { "title": "Modbus - Observed Clients and Servers", @@ -271,7 +281,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzEzOCwxXQ==", "attributes": { "title": "Modbus - Observed Client/Server Ratio", @@ -301,7 +311,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzEzOSwxXQ==", "attributes": { "title": "Modbus - Log Count", @@ -330,7 +340,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzE0MCwxXQ==", "attributes": { "title": "Modbus - Logs Over Time", @@ -359,7 +369,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzE0MSwxXQ==", "attributes": { "title": "Modbus - Functions and Exceptions", @@ -389,8 +399,8 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", - "version": "WzE0NSwxXQ==", + "updated_at": "2023-11-10T18:34:22.366Z", + "version": "WzE0MiwxXQ==", "attributes": { "title": "Modbus Detailed - Request and Response", "visState": "{\"title\":\"Modbus Detailed - Request and Response\",\"type\":\"horizontal_bar\",\"params\":{\"type\":\"histogram\",\"grid\":{\"categoryLines\":false},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":200},\"title\":{}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"square root\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":75,\"filter\":true,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":true,\"type\":\"histogram\",\"mode\":\"stacked\",\"data\":{\"label\":\"Count\",\"id\":\"1\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"lineWidth\":2,\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"bottom\",\"times\":[],\"addTimeMarker\":false,\"labels\":{},\"thresholdLine\":{\"show\":false,\"value\":10,\"width\":1,\"style\":\"full\",\"color\":\"#E7664C\"},\"dimensions\":{\"x\":{\"accessor\":0,\"format\":{\"id\":\"terms\",\"params\":{\"id\":\"drilldown\",\"otherBucketLabel\":\"Other\",\"missingBucketLabel\":\"Unknown\"}},\"params\":{},\"label\":\"zeek.modbus.network_direction: Descending\",\"aggType\":\"terms\"}}},\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"schema\":\"metric\",\"params\":{}},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"segment\",\"params\":{\"field\":\"event.action\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":10,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":true,\"missingBucketLabel\":\"Unknown\",\"customLabel\":\"Function\"}},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"schema\":\"group\",\"params\":{\"field\":\"zeek.modbus.network_direction\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\"}}]}", @@ -419,11 +429,11 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:36:30.972Z", - "version": "Wzk0NywxXQ==", + "updated_at": "2023-11-10T18:56:50.612Z", + "version": "Wzk1NCwxXQ==", "attributes": { "title": "Modbus - Reads", - "visState": "{\"title\":\"Modbus - Reads\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"event.action\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":200,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Function\"},\"schema\":\"bucket\"},{\"id\":\"6\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"destination.ip\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"IP\"},\"schema\":\"bucket\"},{\"id\":\"5\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus.unit_id\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"-\",\"customLabel\":\"Unit ID\"},\"schema\":\"bucket\"},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus_detailed.values\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":200,\"otherBucket\":false,\"otherBucketLabel\":\"-\",\"missingBucket\":false,\"missingBucketLabel\":\"-\",\"customLabel\":\"Values\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":30,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\",\"dimensions\":{\"metrics\":[{\"accessor\":5,\"format\":{\"id\":\"number\"},\"params\":{},\"label\":\"Count\",\"aggType\":\"count\"}],\"buckets\":[{\"accessor\":0,\"format\":{\"id\":\"terms\",\"params\":{\"id\":\"drilldown\",\"otherBucketLabel\":\"Other\",\"missingBucketLabel\":\"Missing\"}},\"params\":{},\"label\":\"Values\",\"aggType\":\"terms\"}]}}}", + "visState": "{\"title\":\"Modbus - Reads\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"event.action\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":200,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Function\"},\"schema\":\"bucket\"},{\"id\":\"6\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"destination.ip\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"IP\"},\"schema\":\"bucket\"},{\"id\":\"5\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus.unit_id\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"-\",\"customLabel\":\"Unit ID\"},\"schema\":\"bucket\"},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus_detailed.values\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":200,\"otherBucket\":false,\"otherBucketLabel\":\"-\",\"missingBucket\":false,\"missingBucketLabel\":\"-\",\"customLabel\":\"Values\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":15,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\",\"dimensions\":{\"metrics\":[{\"accessor\":5,\"format\":{\"id\":\"number\"},\"params\":{},\"label\":\"Count\",\"aggType\":\"count\"}],\"buckets\":[{\"accessor\":0,\"format\":{\"id\":\"terms\",\"params\":{\"id\":\"drilldown\",\"otherBucketLabel\":\"Other\",\"missingBucketLabel\":\"Missing\"}},\"params\":{},\"label\":\"Values\",\"aggType\":\"terms\"}]}}}", "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}", "description": "Modbus read holding registers, input registers, discrete inputs, and coils overview from modbus_detailed.log", "version": 1, @@ -449,11 +459,11 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:37:28.218Z", - "version": "Wzk0OCwxXQ==", + "updated_at": "2023-11-10T19:01:32.686Z", + "version": "Wzk1NSwxXQ==", "attributes": { "title": "Modbus - Writes", - "visState": "{\"title\":\"Modbus - Writes\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"event.action\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":200,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Function\"},\"schema\":\"bucket\"},{\"id\":\"6\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"destination.ip\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"IP\"},\"schema\":\"bucket\"},{\"id\":\"5\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus.unit_id\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"-\",\"customLabel\":\"Unit ID\"},\"schema\":\"bucket\"},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus_detailed.address\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":200,\"otherBucket\":false,\"otherBucketLabel\":\"-\",\"missingBucket\":false,\"missingBucketLabel\":\"-\",\"customLabel\":\"Address\"},\"schema\":\"bucket\"},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus_detailed.values\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":200,\"otherBucket\":false,\"otherBucketLabel\":\"-\",\"missingBucket\":false,\"missingBucketLabel\":\"-\",\"customLabel\":\"Values\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":30,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\",\"dimensions\":{\"metrics\":[{\"accessor\":3,\"format\":{\"id\":\"number\"},\"params\":{},\"label\":\"Count\",\"aggType\":\"count\"}],\"buckets\":[{\"accessor\":0,\"format\":{\"id\":\"terms\",\"params\":{\"id\":\"drilldown\",\"otherBucketLabel\":\"Other\",\"missingBucketLabel\":\"Missing\"}},\"params\":{},\"label\":\"Values\",\"aggType\":\"terms\"}]}}}", + "visState": "{\"title\":\"Modbus - Writes\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"event.action\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":200,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Function\"},\"schema\":\"bucket\"},{\"id\":\"6\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"destination.ip\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"IP\"},\"schema\":\"bucket\"},{\"id\":\"5\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus.unit_id\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"-\",\"customLabel\":\"Unit ID\"},\"schema\":\"bucket\"},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus_detailed.address\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":200,\"otherBucket\":false,\"otherBucketLabel\":\"-\",\"missingBucket\":false,\"missingBucketLabel\":\"-\",\"customLabel\":\"Address\"},\"schema\":\"bucket\"},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus_detailed.values\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":200,\"otherBucket\":false,\"otherBucketLabel\":\"-\",\"missingBucket\":false,\"missingBucketLabel\":\"-\",\"customLabel\":\"Values\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":15,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\",\"dimensions\":{\"metrics\":[{\"accessor\":3,\"format\":{\"id\":\"number\"},\"params\":{},\"label\":\"Count\",\"aggType\":\"count\"}],\"buckets\":[{\"accessor\":0,\"format\":{\"id\":\"terms\",\"params\":{\"id\":\"drilldown\",\"otherBucketLabel\":\"Other\",\"missingBucketLabel\":\"Missing\"}},\"params\":{},\"label\":\"Values\",\"aggType\":\"terms\"}]}}}", "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}", "description": "Modbus write register and write coil overview from modbus_detailed.log", "version": 1, @@ -479,8 +489,8 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", - "version": "WzE0MywxXQ==", + "updated_at": "2023-11-10T18:34:22.366Z", + "version": "WzE0NSwxXQ==", "attributes": { "title": "Modbus - Transport", "visState": "{\"title\":\"Modbus - Transport\",\"type\":\"pie\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"network.type\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Type\"},\"schema\":\"segment\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"network.transport\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Transport\"},\"schema\":\"segment\"},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"destination.port\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":10,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Port\"},\"schema\":\"segment\"}],\"params\":{\"type\":\"pie\",\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"bottom\",\"isDonut\":true,\"labels\":{\"show\":true,\"values\":true,\"last_level\":true,\"truncate\":100}}}", @@ -503,13 +513,43 @@ "visualization": "7.10.0" } }, + { + "id": "f6d09e10-7ffb-11ee-9964-dd538601517e", + "type": "visualization", + "namespaces": [ + "default" + ], + "updated_at": "2023-11-10T19:04:24.945Z", + "version": "Wzk1NiwxXQ==", + "attributes": { + "title": "Modbus - Device Identification Objects", + "visState": "{\"title\":\"Modbus - Device Identification Objects\",\"type\":\"table\",\"aggs\":[{\"id\":\"1\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"},{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus_read_device_identification.device_id_code\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Device ID\"},\"schema\":\"bucket\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus_read_device_identification.object_id\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Object ID\"},\"schema\":\"bucket\"},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"zeek.modbus_read_device_identification.object_value\",\"orderBy\":\"1\",\"order\":\"desc\",\"size\":100,\"otherBucket\":true,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"customLabel\":\"Value\"},\"schema\":\"bucket\"}],\"params\":{\"perPage\":10,\"showPartialRows\":false,\"showMetricsAtAllLevels\":false,\"showTotal\":false,\"totalFunc\":\"sum\",\"percentageCol\":\"\"}}", + "uiStateJSON": "{\"vis\":{\"sortColumn\":{\"colIndex\":0,\"direction\":\"asc\"}}}", + "description": "", + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"kuery\"},\"filter\":[]}" + }, + "savedSearchRefName": "search_0" + }, + "references": [ + { + "name": "search_0", + "type": "search", + "id": "624a1d80-7ffa-11ee-9964-dd538601517e" + } + ], + "migrationVersion": { + "visualization": "7.10.0" + } + }, { "id": "1cfb4e10-e0b7-11ea-8a49-0d5868b09681", "type": "search", "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzE0NiwxXQ==", "attributes": { "title": "Modbus - Detailed", @@ -553,7 +593,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzE0NywxXQ==", "attributes": { "title": "Modbus - Mask Write", @@ -597,7 +637,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzE0OCwxXQ==", "attributes": { "title": "Modbus - Read Write Multiple", @@ -636,13 +676,61 @@ "search": "7.9.3" } }, + { + "id": "624a1d80-7ffa-11ee-9964-dd538601517e", + "type": "search", + "namespaces": [ + "default" + ], + "updated_at": "2023-11-10T18:55:03.788Z", + "version": "Wzk1MiwxXQ==", + "attributes": { + "title": "Modbus - Read Device Identification", + "description": "", + "hits": 0, + "columns": [ + "source.ip", + "destination.ip", + "zeek.modbus.network_direction", + "event.action", + "event.result", + "zeek.modbus.unit_id", + "zeek.modbus.trans_id", + "zeek.modbus_read_device_identification.device_id_code", + "zeek.modbus_read_device_identification.conformity_level", + "zeek.modbus_read_device_identification.object_id", + "zeek.modbus_read_device_identification.object_value", + "event.id" + ], + "sort": [ + [ + "firstPacket", + "desc" + ] + ], + "version": 1, + "kibanaSavedObjectMeta": { + "searchSourceJSON": "{\"highlightAll\":true,\"version\":true,\"query\":{\"query\":\"event.dataset:modbus_read_device_identification\",\"language\":\"lucene\"},\"filter\":[],\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}" + } + }, + "references": [ + { + "name": "kibanaSavedObjectMeta.searchSourceJSON.index", + "type": "index-pattern", + "id": "arkime_sessions3-*" + } + ], + "migrationVersion": { + "search": "7.9.3" + } + }, { "id": "da7d99a0-ef74-11e9-91bd-23d686ac8389", "type": "search", "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzE0OSwxXQ==", "attributes": { "title": "Modbus - Known Clients and Servers Logs", @@ -681,7 +769,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:20:16.971Z", + "updated_at": "2023-11-10T18:34:22.366Z", "version": "WzE1MCwxXQ==", "attributes": { "title": "Modbus - All Logs", @@ -721,7 +809,7 @@ "namespaces": [ "default" ], - "updated_at": "2023-07-19T23:21:16.791Z", + "updated_at": "2023-11-10T18:35:22.307Z", "version": "WzgzMiwxXQ==", "attributes": { "title": "Connections - Logs", diff --git a/dashboards/templates/composable/component/zeek_ot.json b/dashboards/templates/composable/component/zeek_ot.json index 6a1870503..2ed2174ef 100644 --- a/dashboards/templates/composable/component/zeek_ot.json +++ b/dashboards/templates/composable/component/zeek_ot.json @@ -201,12 +201,19 @@ "zeek.modbus.network_direction": { "type": "keyword" }, "zeek.modbus.trans_id": { "type": "integer" }, "zeek.modbus.unit_id": { "type": "integer" }, + "zeek.modbus.mei_type": { "type": "keyword" }, "zeek.modbus_detailed.address": { "type": "integer" }, "zeek.modbus_detailed.quantity": { "type": "integer" }, "zeek.modbus_detailed.values": { "type": "keyword" }, "zeek.modbus_mask_write_register.address": { "type": "integer" }, "zeek.modbus_mask_write_register.and_mask": { "type": "integer" }, "zeek.modbus_mask_write_register.or_mask": { "type": "integer" }, + "zeek.modbus_read_device_identification.conformity_level_code": { "type": "keyword" }, + "zeek.modbus_read_device_identification.conformity_level": { "type": "keyword" }, + "zeek.modbus_read_device_identification.device_id_code": { "type": "long" }, + "zeek.modbus_read_device_identification.object_id_code": { "type": "keyword" }, + "zeek.modbus_read_device_identification.object_id": { "type": "keyword" }, + "zeek.modbus_read_device_identification.object_value": { "type": "keyword" }, "zeek.modbus_read_write_multiple_registers.read_quantity": { "type": "integer" }, "zeek.modbus_read_write_multiple_registers.read_registers": { "type": "keyword" }, "zeek.modbus_read_write_multiple_registers.read_start_address": { "type": "integer" }, diff --git a/logstash/pipelines/zeek/11_zeek_parse.conf b/logstash/pipelines/zeek/11_zeek_parse.conf index 0c9716b53..791859b42 100644 --- a/logstash/pipelines/zeek/11_zeek_parse.conf +++ b/logstash/pipelines/zeek/11_zeek_parse.conf @@ -1764,6 +1764,12 @@ filter { mutate { id => "mutate_gsub_zeek_known_modbus_device_type" gsub => [ "[zeek_cols][device_type]", "Known::", "" ] } + mutate { id => "mutate_gsub_zeek_known_modbus_master" + gsub => [ "[zeek_cols][device_type]", "MASTER", "CLIENT" ] } + + mutate { id => "mutate_gsub_zeek_known_modbus_slave" + gsub => [ "[zeek_cols][device_type]", "SLAVE", "SERVER" ] } + mutate { id => "mutate_add_tag_ics_known_modbus_log" add_tag => [ "ics" ] } @@ -1932,15 +1938,16 @@ filter { } else if ([log_source] == "modbus_detailed") { ############################################################################################################################# # modbus_detailed.log - # https://github.com/cisagov/ICSNPP + # main.zeek (https://github.com/cisagov/icsnpp-modbus) dissect { id => "dissect_zeek_modbus_detailed" # zeek's default delimiter is a literal tab, MAKE SURE YOUR EDITOR DOESN'T SCREW IT UP mapping => { - "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][drop_orig_h]} %{[zeek_cols][drop_orig_p]} %{[zeek_cols][drop_resp_h]} %{[zeek_cols][drop_resp_p]} %{[zeek_cols][is_orig]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][unit_id]} %{[zeek_cols][func]} %{[zeek_cols][network_direction]} %{[zeek_cols][address]} %{[zeek_cols][quantity]} %{[zeek_cols][values]}" + "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][drop_orig_h]} %{[zeek_cols][drop_orig_p]} %{[zeek_cols][drop_resp_h]} %{[zeek_cols][drop_resp_p]} %{[zeek_cols][is_orig]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][trans_id]} %{[zeek_cols][unit_id]} %{[zeek_cols][func]} %{[zeek_cols][network_direction]} %{[zeek_cols][address]} %{[zeek_cols][quantity]} %{[zeek_cols][values]}" } } + if ("_dissectfailure" in [tags]) { mutate { id => "mutate_split_zeek_modbus_detailed" @@ -1949,29 +1956,32 @@ filter { } ruby { id => "ruby_zip_zeek_modbus_detailed" - init => "$zeek_modbus_detailed_field_names = [ 'ts', 'uid', 'drop_orig_h', 'drop_orig_p', 'drop_resp_h', 'drop_resp_p', 'is_orig', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'unit_id', 'func', 'network_direction', 'address', 'quantity', 'values' ]" + init => "$zeek_modbus_detailed_field_names = [ 'ts', 'uid', 'drop_orig_h', 'drop_orig_p', 'drop_resp_h', 'drop_resp_p', 'is_orig', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'trans_id', 'unit_id', 'func', 'network_direction', 'address', 'quantity', 'values' ]" code => "event.set('[zeek_cols]', $zeek_modbus_detailed_field_names.zip(event.get('[message]')).to_h)" } } mutate { id => "mutate_add_fields_zeek_modbus_detailed" - add_field => { "[zeek_cols][service]" => "modbus" } + add_field => { + "[zeek_cols][service]" => "modbus" + } add_tag => [ "ics" ] } } else if ([log_source] == "modbus_mask_write_register") { ############################################################################################################################# # modbus_mask_write_register.log - # https://github.com/cisagov/ICSNPP + # main.zeek (https://github.com/cisagov/icsnpp-modbus) dissect { id => "dissect_zeek_modbus_mask_write_register" # zeek's default delimiter is a literal tab, MAKE SURE YOUR EDITOR DOESN'T SCREW IT UP mapping => { - "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][drop_orig_h]} %{[zeek_cols][drop_orig_p]} %{[zeek_cols][drop_resp_h]} %{[zeek_cols][drop_resp_p]} %{[zeek_cols][is_orig]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][unit_id]} %{[zeek_cols][func]} %{[zeek_cols][network_direction]} %{[zeek_cols][address]} %{[zeek_cols][and_mask]} %{[zeek_cols][or_mask]}" + "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][drop_orig_h]} %{[zeek_cols][drop_orig_p]} %{[zeek_cols][drop_resp_h]} %{[zeek_cols][drop_resp_p]} %{[zeek_cols][is_orig]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][trans_id]} %{[zeek_cols][unit_id]} %{[zeek_cols][func]} %{[zeek_cols][network_direction]} %{[zeek_cols][address]} %{[zeek_cols][and_mask]} %{[zeek_cols][or_mask]}" } } + if ("_dissectfailure" in [tags]) { mutate { id => "mutate_split_zeek_modbus_mask_write_register" @@ -1980,28 +1990,66 @@ filter { } ruby { id => "ruby_zip_zeek_modbus_mask_write_register" - init => "$zeek_modbus_mask_write_register_field_names = [ 'ts', 'uid', 'drop_orig_h', 'drop_orig_p', 'drop_resp_h', 'drop_resp_p', 'is_orig', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'unit_id', 'func', 'network_direction', 'address', 'and_mask', 'or_mask' ]" - code => "event.set('[zeek_cols]', $zeek_modbus_modbus_mask_write_register_field_names.zip(event.get('[message]')).to_h)" + init => "$zeek_modbus_mask_write_register_field_names = [ 'ts', 'uid', 'drop_orig_h', 'drop_orig_p', 'drop_resp_h', 'drop_resp_p', 'is_orig', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'trans_id', 'unit_id', 'func', 'network_direction', 'address', 'and_mask', 'or_mask' ]" + code => "event.set('[zeek_cols]', $zeek_modbus_mask_write_register_field_names.zip(event.get('[message]')).to_h)" } } mutate { - id => "mutate_add_fields_modbus_mask_write_register" - add_field => { "[zeek_cols][service]" => "modbus" } + id => "mutate_add_fields_zeek_modbus_mask_write_register" + add_field => { + "[zeek_cols][service]" => "modbus" + } + add_tag => [ "ics" ] + } + + } else if ([log_source] == "modbus_read_device_identification") { + ############################################################################################################################# + # modbus_read_device_identification.log + # main.zeek (https://github.com/cisagov/icsnpp-modbus) + + dissect { + id => "dissect_zeek_modbus_read_device_identification" + # zeek's default delimiter is a literal tab, MAKE SURE YOUR EDITOR DOESN'T SCREW IT UP + mapping => { + "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][drop_orig_h]} %{[zeek_cols][drop_orig_p]} %{[zeek_cols][drop_resp_h]} %{[zeek_cols][drop_resp_p]} %{[zeek_cols][is_orig]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][trans_id]} %{[zeek_cols][unit_id]} %{[zeek_cols][func]} %{[zeek_cols][network_direction]} %{[zeek_cols][mei_type]} %{[zeek_cols][conformity_level_code]} %{[zeek_cols][conformity_level]} %{[zeek_cols][device_id_code]} %{[zeek_cols][object_id_code]} %{[zeek_cols][object_id]} %{[zeek_cols][object_value]}" + } + } + + if ("_dissectfailure" in [tags]) { + mutate { + id => "mutate_split_zeek_modbus_read_device_identification" + # zeek's default delimiter is a literal tab, MAKE SURE YOUR EDITOR DOESN'T SCREW IT UP + split => { "[message]" => " " } + } + ruby { + id => "ruby_zip_zeek_modbus_read_device_identification" + init => "$zeek_modbus_read_device_identification_field_names = [ 'ts', 'uid', 'drop_orig_h', 'drop_orig_p', 'drop_resp_h', 'drop_resp_p', 'is_orig', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'trans_id', 'unit_id', 'func', 'network_direction', 'mei_type', 'conformity_level_code', 'conformity_level', 'device_id_code', 'object_id_code', 'object_id', 'object_value' ]" + code => "event.set('[zeek_cols]', $zeek_modbus_read_device_identification_field_names.zip(event.get('[message]')).to_h)" + } + } + + mutate { + id => "mutate_add_fields_zeek_modbus_read_device_identification" + add_field => { + "[zeek_cols][service]" => "modbus" + } add_tag => [ "ics" ] } } else if ([log_source] == "modbus_read_write_multiple_registers") { ############################################################################################################################# # modbus_read_write_multiple_registers.log - # https://github.com/cisagov/ICSNPP + # main.zeek (https://github.com/cisagov/icsnpp-modbus) + dissect { id => "dissect_zeek_modbus_read_write_multiple_registers" # zeek's default delimiter is a literal tab, MAKE SURE YOUR EDITOR DOESN'T SCREW IT UP mapping => { - "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][drop_orig_h]} %{[zeek_cols][drop_orig_p]} %{[zeek_cols][drop_resp_h]} %{[zeek_cols][drop_resp_p]} %{[zeek_cols][is_orig]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][unit_id]} %{[zeek_cols][func]} %{[zeek_cols][network_direction]} %{[zeek_cols][write_start_address]} %{[zeek_cols][write_registers]} %{[zeek_cols][read_start_address]} %{[zeek_cols][read_quantity]} %{[zeek_cols][read_registers]}" + "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][drop_orig_h]} %{[zeek_cols][drop_orig_p]} %{[zeek_cols][drop_resp_h]} %{[zeek_cols][drop_resp_p]} %{[zeek_cols][is_orig]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][`]} %{[zeek_cols][unit_id]} %{[zeek_cols][func]} %{[zeek_cols][network_direction]} %{[zeek_cols][write_start_address]} %{[zeek_cols][write_registers]} %{[zeek_cols][read_start_address]} %{[zeek_cols][read_quantity]} %{[zeek_cols][read_registers]}" } } + if ("_dissectfailure" in [tags]) { mutate { id => "mutate_split_zeek_modbus_read_write_multiple_registers" @@ -2010,14 +2058,16 @@ filter { } ruby { id => "ruby_zip_zeek_modbus_read_write_multiple_registers" - init => "$zeek_modbus_read_write_multiple_registers_field_names = [ 'ts', 'uid', 'drop_orig_h', 'drop_orig_p', 'drop_resp_h', 'drop_resp_p', 'is_orig', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'unit_id', 'func', 'network_direction', 'write_start_address', 'write_registers', 'read_start_address', 'read_quantity', 'read_registers' ]" + init => "$zeek_modbus_read_write_multiple_registers_field_names = [ 'ts', 'uid', 'drop_orig_h', 'drop_orig_p', 'drop_resp_h', 'drop_resp_p', 'is_orig', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'trans_id', 'unit_id', 'func', 'network_direction', 'write_start_address', 'write_registers', 'read_start_address', 'read_quantity', 'read_registers' ]" code => "event.set('[zeek_cols]', $zeek_modbus_read_write_multiple_registers_field_names.zip(event.get('[message]')).to_h)" } } mutate { id => "mutate_add_fields_zeek_modbus_read_write_multiple_registers" - add_field => { "[zeek_cols][service]" => "modbus" } + add_field => { + "[zeek_cols][service]" => "modbus" + } add_tag => [ "ics" ] } diff --git a/logstash/pipelines/zeek/12_zeek_mutate.conf b/logstash/pipelines/zeek/12_zeek_mutate.conf index 6d3b61636..83ed11143 100644 --- a/logstash/pipelines/zeek/12_zeek_mutate.conf +++ b/logstash/pipelines/zeek/12_zeek_mutate.conf @@ -1020,11 +1020,12 @@ filter { split => { "[zeek][modbus_detailed][values]" => "," } } } - # rename a to make correlating modbus easier between logs + # rename some fields to make correlating modbus easier between logs mutate { id => "mutate_rename_modbus_detailed_fields" rename => { "[zeek][modbus_detailed][func]" => "[zeek][modbus][func]" } rename => { "[zeek][modbus_detailed][unit_id]" => "[zeek][modbus][unit_id]" } + rename => { "[zeek][modbus_detailed][trans_id]" => "[zeek][modbus][trans_id]" } rename => { "[zeek][modbus_detailed][network_direction]" => "[zeek][modbus][network_direction]" } } @@ -1032,13 +1033,28 @@ filter { ############################################################################################################################# # modbus_mask_write_register.log specific logic - # rename a to make correlating modbus easier between logs + # rename some fields to make correlating modbus easier between logs mutate { id => "mutate_rename_modbus_mask_write_register_fields" rename => { "[zeek][modbus_mask_write_register][address]" => "[zeek][modbus_detailed][address]" } rename => { "[zeek][modbus_mask_write_register][func]" => "[zeek][modbus][func]" } rename => { "[zeek][modbus_mask_write_register][network_direction]" => "[zeek][modbus][network_direction]" } rename => { "[zeek][modbus_mask_write_register][unit_id]" => "[zeek][modbus][unit_id]" } + rename => { "[zeek][modbus_mask_write_register][trans_id]" => "[zeek][modbus][trans_id]" } + } + + } else if ([log_source] == "modbus_read_device_identification") { + ############################################################################################################################# + # modbus_read_device_identification.log specific logic + + # rename some fields to make correlating modbus easier between logs + mutate { + id => "mutate_rename_modbus_read_device_identification_fields" + rename => { "[zeek][modbus_read_device_identification][network_direction]" => "[zeek][modbus][network_direction]" } + rename => { "[zeek][modbus_read_device_identification][unit_id]" => "[zeek][modbus][unit_id]" } + rename => { "[zeek][modbus_read_device_identification][trans_id]" => "[zeek][modbus][trans_id]" } + rename => { "[zeek][modbus_read_device_identification][func]" => "[zeek][modbus][func]" } + rename => { "[zeek][modbus_read_device_identification][mei_type]" => "[zeek][modbus][mei_type]" } } } else if ([log_source] == "modbus_read_write_multiple_registers") { @@ -1055,11 +1071,12 @@ filter { split => { "[zeek][modbus_read_write_multiple_registers][write_registers]" => "," } } } - # rename a to make correlating modbus easier between logs + # rename some fields to make correlating modbus easier between logs mutate { id => "mutate_rename_modbus_read_write_multiple_registers_fields" rename => { "[zeek][modbus_read_write_multiple_registers][network_direction]" => "[zeek][modbus][network_direction]" } rename => { "[zeek][modbus_read_write_multiple_registers][unit_id]" => "[zeek][modbus][unit_id]" } + rename => { "[zeek][modbus_read_write_multiple_registers][trans_id]" => "[zeek][modbus][trans_id]" } rename => { "[zeek][modbus_read_write_multiple_registers][func]" => "[zeek][modbus][func]" } } diff --git a/logstash/pipelines/zeek/13_zeek_normalize.conf b/logstash/pipelines/zeek/13_zeek_normalize.conf index 4e735f962..c4e74423b 100644 --- a/logstash/pipelines/zeek/13_zeek_normalize.conf +++ b/logstash/pipelines/zeek/13_zeek_normalize.conf @@ -323,8 +323,17 @@ filter { merge => { "[event][action]" => "[@metadata][zeek_ldap_search_action]" } } } - if ([zeek][modbus][func]) { mutate { id => "mutate_merge_normalize_zeek_modbus_func" - merge => { "[event][action]" => "[zeek][modbus][func]" } } } + if ([zeek][modbus][func]) { + mutate { id => "mutate_gsub_zeek_modbus_master" + gsub => [ "[zeek][modbus][func]", "MASTER", "CLIENT" ] } + mutate { id => "mutate_gsub_zeek_modbus_slave" + gsub => [ "[zeek][modbus][func]", "SLAVE", "SERVER" ] } + mutate { id => "mutate_merge_normalize_zeek_modbus_func" + merge => { "[event][action]" => "[zeek][modbus][func]" } } + } + + if ([zeek][modbus][mei_type]) { mutate { id => "mutate_merge_normalize_zeek_modbus_mei_type" + merge => { "[event][action]" => "[zeek][modbus][mei_type]" } } } if ([zeek][mqtt_connect][connect_status]) { # this log entry implicitly means "connect" diff --git a/scripts/zeek_script_to_malcolm_boilerplate.py b/scripts/zeek_script_to_malcolm_boilerplate.py index 42b9b366b..11c9857d3 100755 --- a/scripts/zeek_script_to_malcolm_boilerplate.py +++ b/scripts/zeek_script_to_malcolm_boilerplate.py @@ -50,6 +50,10 @@ 'resp_h', 'resp_p', 'resp_l2_addr', + 'drop_orig_h', + 'drop_orig_p', + 'drop_resp_h', + 'drop_resp_p', 'proto', 'service', 'user', diff --git a/shared/bin/zeek_install_plugins.sh b/shared/bin/zeek_install_plugins.sh index 0458dbc62..d1588bf8b 100755 --- a/shared/bin/zeek_install_plugins.sh +++ b/shared/bin/zeek_install_plugins.sh @@ -74,7 +74,7 @@ ZKG_GITHUB_URLS=( "https://github.com/cisagov/icsnpp-enip" "https://github.com/cisagov/icsnpp-ethercat" "https://github.com/cisagov/icsnpp-genisys" - "https://github.com/mmguero-dev/icsnpp-modbus" + "https://github.com/cisagov/icsnpp-modbus" "https://github.com/cisagov/icsnpp-opcua-binary" "https://github.com/cisagov/icsnpp-s7comm" "https://github.com/cisagov/icsnpp-synchrophasor" @@ -98,7 +98,7 @@ ZKG_GITHUB_URLS=( "https://github.com/corelight/zeek-spicy-ospf" "https://github.com/corelight/zeek-spicy-stun" "https://github.com/corelight/zeek-spicy-wireguard" - "https://github.com/mmguero-dev/zeek-xor-exe-plugin" + "https://github.com/corelight/zeek-xor-exe-plugin|master" "https://github.com/corelight/zerologon" "https://github.com/cybera/zeek-sniffpass" "https://github.com/mmguero-dev/bzar" From 61e3b6ef1f384419b16c065b160a0e1086146b36 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Fri, 10 Nov 2023 13:00:19 -0700 Subject: [PATCH 31/82] Fix logstash parser issues with ldap (idaholab/Malcolm#289) --- logstash/pipelines/zeek/11_zeek_parse.conf | 24 ++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/logstash/pipelines/zeek/11_zeek_parse.conf b/logstash/pipelines/zeek/11_zeek_parse.conf index 791859b42..f6809839d 100644 --- a/logstash/pipelines/zeek/11_zeek_parse.conf +++ b/logstash/pipelines/zeek/11_zeek_parse.conf @@ -1822,15 +1822,16 @@ filter { } else if ([log_source] == "ldap") { ############################################################################################################################# # ldap.log - # https://github.com/zeek/spicy-ldap/blob/main/analyzer/main.zeek + # main.zeek (https://docs.zeek.org/en/master/scripts/base/protocols/ldap/main.zeek.html) dissect { id => "dissect_zeek_ldap" # zeek's default delimiter is a literal tab, MAKE SURE YOUR EDITOR DOESN'T SCREW IT UP mapping => { - "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][proto]} %{[zeek_cols][message_id]} %{[zeek_cols][version]} %{[zeek_cols][operation]} %{[zeek_cols][result_code]} %{[zeek_cols][result_message]} %{[zeek_cols][object]} %{[zeek_cols][argument]}" + "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][message_id]} %{[zeek_cols][version]} %{[zeek_cols][operation]} %{[zeek_cols][result_code]} %{[zeek_cols][result_message]} %{[zeek_cols][object]} %{[zeek_cols][argument]}" } } + if ("_dissectfailure" in [tags]) { mutate { id => "mutate_split_zeek_ldap" @@ -1839,28 +1840,32 @@ filter { } ruby { id => "ruby_zip_zeek_ldap" - init => "$zeek_ldap_field_names = [ 'ts', 'uid', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'proto', 'message_id', 'version', 'operation', 'result_code', 'result_message', 'object', 'argument' ]" + init => "$zeek_ldap_field_names = [ 'ts', 'uid', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'message_id', 'version', 'operation', 'result_code', 'result_message', 'object', 'argument' ]" code => "event.set('[zeek_cols]', $zeek_ldap_field_names.zip(event.get('[message]')).to_h)" } } mutate { id => "mutate_add_fields_zeek_ldap" - add_field => { "[zeek_cols][service]" => "ldap" } + add_field => { + "[zeek_cols][service]" => "ldap" + } + } } else if ([log_source] == "ldap_search") { ############################################################################################################################# # ldap_search.log - # https://github.com/zeek/spicy-ldap/blob/main/analyzer/main.zeek + # main.zeek (https://docs.zeek.org/en/master/scripts/base/protocols/ldap/main.zeek.html) dissect { id => "dissect_zeek_ldap_search" # zeek's default delimiter is a literal tab, MAKE SURE YOUR EDITOR DOESN'T SCREW IT UP mapping => { - "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][proto]} %{[zeek_cols][message_id]} %{[zeek_cols][scope]} %{[zeek_cols][deref]} %{[zeek_cols][base_object]} %{[zeek_cols][result_count]} %{[zeek_cols][result_code]} %{[zeek_cols][result_message]} %{[zeek_cols][filter]} %{[zeek_cols][attributes]}" + "[message]" => "%{[zeek_cols][ts]} %{[zeek_cols][uid]} %{[zeek_cols][orig_h]} %{[zeek_cols][orig_p]} %{[zeek_cols][resp_h]} %{[zeek_cols][resp_p]} %{[zeek_cols][message_id]} %{[zeek_cols][scope]} %{[zeek_cols][deref]} %{[zeek_cols][base_object]} %{[zeek_cols][result_count]} %{[zeek_cols][result_code]} %{[zeek_cols][result_message]} %{[zeek_cols][filter]} %{[zeek_cols][attributes]}" } } + if ("_dissectfailure" in [tags]) { mutate { id => "mutate_split_zeek_ldap_search" @@ -1869,14 +1874,17 @@ filter { } ruby { id => "ruby_zip_zeek_ldap_search" - init => "$zeek_ldap_search_field_names = [ 'ts', 'uid', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'proto', 'message_id', 'scope', 'deref', 'base_object', 'result_count', 'result_code', 'result_message', 'filter', 'attributes' ]" + init => "$zeek_ldap_search_field_names = [ 'ts', 'uid', 'orig_h', 'orig_p', 'resp_h', 'resp_p', 'message_id', 'scope', 'deref', 'base_object', 'result_count', 'result_code', 'result_message', 'filter', 'attributes' ]" code => "event.set('[zeek_cols]', $zeek_ldap_search_field_names.zip(event.get('[message]')).to_h)" } } mutate { id => "mutate_add_fields_zeek_ldap_search" - add_field => { "[zeek_cols][service]" => "ldap" } + add_field => { + "[zeek_cols][service]" => "ldap" + } + } } else if ([log_source] == "login") { From 3934eaee346dd28a5828d249b1de00975bf24ad1 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Mon, 13 Nov 2023 07:14:47 -0700 Subject: [PATCH 32/82] elasticsearch python libraries to 8.11.0 --- api/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/requirements.txt b/api/requirements.txt index a2e4a9eb2..05578b547 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -5,5 +5,5 @@ opensearch-py==2.3.2 requests==2.31.0 regex==2022.3.2 dateparser==1.1.1 -elasticsearch==8.10.1 -elasticsearch-dsl==8.9.0 \ No newline at end of file +elasticsearch==8.11.0 +elasticsearch-dsl==8.11.0 \ No newline at end of file From baebc49e7b671aa826177fb0956ecbfea51bae79 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Mon, 13 Nov 2023 07:45:51 -0700 Subject: [PATCH 33/82] idaholab/Malcolm#275, integrate suricata version of nsacyber ELITEWOLF rules --- Dockerfiles/suricata.Dockerfile | 3 ++ suricata/default-rules/IT/.gitignore | 2 + suricata/default-rules/OT/.gitignore | 2 + .../AllenBradley_RockwellAutomation.rules | 14 +++++++ .../SchweitzerEngineeringLaboratories.rules | 38 +++++++++++++++++++ .../OT/nsacyber/ELITEWOLF/Siemens.rules | 5 +++ 6 files changed, 64 insertions(+) create mode 100644 suricata/default-rules/IT/.gitignore create mode 100644 suricata/default-rules/OT/.gitignore create mode 100644 suricata/default-rules/OT/nsacyber/ELITEWOLF/AllenBradley_RockwellAutomation.rules create mode 100644 suricata/default-rules/OT/nsacyber/ELITEWOLF/SchweitzerEngineeringLaboratories.rules create mode 100644 suricata/default-rules/OT/nsacyber/ELITEWOLF/Siemens.rules diff --git a/Dockerfiles/suricata.Dockerfile b/Dockerfiles/suricata.Dockerfile index 34c846a1e..66fa29c34 100644 --- a/Dockerfiles/suricata.Dockerfile +++ b/Dockerfiles/suricata.Dockerfile @@ -51,6 +51,8 @@ ENV SURICATA_UPDATE_DIR "$SURICATA_MANAGED_DIR/update" ENV SURICATA_UPDATE_SOURCES_DIR "$SURICATA_UPDATE_DIR/sources" ENV SURICATA_UPDATE_CACHE_DIR "$SURICATA_UPDATE_DIR/cache" +COPY --chmod=644 suricata/default-rules/ /tmp/default-rules/ + RUN sed -i "s/main$/main contrib non-free/g" /etc/apt/sources.list.d/debian.sources && \ apt-get -q update && \ apt-get -y -q --no-install-recommends upgrade && \ @@ -118,6 +120,7 @@ RUN sed -i "s/main$/main contrib non-free/g" /etc/apt/sources.list.d/debian.sour chown -R ${PUSER}:${PGROUP} "$SURICATA_CUSTOM_RULES_DIR" && \ cp "$(dpkg -L suricata-update | grep 'update\.yaml$' | head -n 1)" \ "$SURICATA_UPDATE_CONFIG_FILE" && \ + find /tmp/default-rules/ -not -path '*/.gitignore' -type f -exec cp "{}" "$SURICATA_CONFIG_DIR"/rules/ \; && \ suricata-update update-sources --verbose --data-dir "$SURICATA_MANAGED_DIR" --config "$SURICATA_UPDATE_CONFIG_FILE" --suricata-conf "$SURICATA_CONFIG_FILE" && \ suricata-update update --fail --verbose --etopen --data-dir "$SURICATA_MANAGED_DIR" --config "$SURICATA_UPDATE_CONFIG_FILE" --suricata-conf "$SURICATA_CONFIG_FILE" && \ chown root:${PGROUP} /sbin/ethtool /usr/bin/suricata && \ diff --git a/suricata/default-rules/IT/.gitignore b/suricata/default-rules/IT/.gitignore new file mode 100644 index 000000000..4397c3a32 --- /dev/null +++ b/suricata/default-rules/IT/.gitignore @@ -0,0 +1,2 @@ +!.gitignore + diff --git a/suricata/default-rules/OT/.gitignore b/suricata/default-rules/OT/.gitignore new file mode 100644 index 000000000..4397c3a32 --- /dev/null +++ b/suricata/default-rules/OT/.gitignore @@ -0,0 +1,2 @@ +!.gitignore + diff --git a/suricata/default-rules/OT/nsacyber/ELITEWOLF/AllenBradley_RockwellAutomation.rules b/suricata/default-rules/OT/nsacyber/ELITEWOLF/AllenBradley_RockwellAutomation.rules new file mode 100644 index 000000000..c2159496e --- /dev/null +++ b/suricata/default-rules/OT/nsacyber/ELITEWOLF/AllenBradley_RockwellAutomation.rules @@ -0,0 +1,14 @@ +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-TCP REQUEST"; content:"/rokform/advancedDiags?pageReq=tcp"; sid:1000039; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-SYSTEM DATA DETAIL"; content:"/rokform/SysDataDetail?name="; sid:1000040; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-UDP TABLE"; content:"/rokform/advancedDiags?pageReq=udptable"; sid:1000041; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-TCP CONNECT"; content:"rokform/advancedDiags?pageReq=tcpconn"; sid:1000042; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-IP ROUTE"; content:"/rokform/advancedDiags?pageReq=iproute"; sid:1000043; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-GENERAL MEMORY"; content:"/rokform/advancedDiags?pageReq=genmem"; sid:1000044; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-HEAP REQUEST"; content:"/rokform/advancedDiags?pageReq=heap"; sid:1000045; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-ICMP REQUEST"; content:"/rokform/advancedDiags?pageReq=icmp"; sid:1000046; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-ARP REQUEST"; content:"/rokform/advancedDiags?pageReq=arp"; sid:1000047; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-UDP REQUEST"; content:"/rokform/advancedDiags?pageReq=udp"; sid:1000048; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-IF REQUEST"; content:"/rokform/advancedDiags?pageReq=if"; sid:1000049; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-IP REQUEST"; content:"/rokform/advancedDiags?pageReq=ip"; sid:1000050; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-CSS Path"; content:"/css/radevice.css"; sid:1000051; rev:1;) +alert http any any -> any any (msg: "ELITEWOLF Allen-Bradley/Rockwell Automation URL Path Activity-SYSTEM LIST DATA"; content:"/rokform/SysListDetail?name=";sid:1000052;rev:1;) diff --git a/suricata/default-rules/OT/nsacyber/ELITEWOLF/SchweitzerEngineeringLaboratories.rules b/suricata/default-rules/OT/nsacyber/ELITEWOLF/SchweitzerEngineeringLaboratories.rules new file mode 100644 index 000000000..fe0f4a32d --- /dev/null +++ b/suricata/default-rules/OT/nsacyber/ELITEWOLF/SchweitzerEngineeringLaboratories.rules @@ -0,0 +1,38 @@ +alert tcp any 443 -> any any (msg: "ELITEWOLF SEL-3530-RTAC URL path activity - homepage"; content:"/home.sel"; sid:1000001; rev:1;) +alert tcp any 443 -> any any (msg: "ELITEWOLF SEL-3530-RTAC URL path activity - LoginError"; content:"/errors/err401.sel?username="; sid:1000002; rev:1;) +alert tcp any 443 -> any any (msg: "ELITEWOLF SEL-3530-RTAC URL path activity - default.sel page"; content:"/default.sel"; sid:1000003; rev:1;) +alert tcp any 1024 -> any any (msg: "ELITEWOLF SEL-3530-RTAC Possible SSH Login Activity"; content:"SSH-2.0-dropbear_2016.74"; sid:1000004; rev:1;) +alert tcp any 5432 -> any any (msg: "ELITEWOLF SEL-3530-RTAC Possible AcSELerator Firmware Activity"; content:"SEL-3530 RTAC"; sid:1000005; rev:1;) + +alert tcp any 443 -> any any (msg:"ELITEWOLF_SEL-3620 X509 certificate activity"; content: "http://www.sel-secure.com"; sid:1000006; rev:1;) +alert tcp any 443 -> any any (msg:"ELITEWOLF_SEL-3620 X509 certificate activity"; content: "commonname=http://www.sel-secure.com"; sid:1000007; rev:1;) +alert tcp any 443 -> any any (msg:"ELITEWOLF_SEL-3620 X509 certificate activity"; content: "issuer_CN: http://www.sel-secure.com"; sid:1000008; rev:1;) + +alert tcp any 443 -> any any (msg:"ELITEWOLF_SEL-2488 URL path activity"; content: "/scripts/dScripts.sel"; sid:1000009; rev:1;) +alert tcp any 443 -> any any (msg:"ELITEWOLF_SEL-2488 URL path activity"; content: "/css/sel.css?vid="; sid:1000010; rev:1;) +alert tcp any 443 -> any any (msg:"ELITEWOLF_SEL-2488 X509 certificate activity"; content: "commonName=http://www.selinc.com/EthernetCommunications/"; sid:1000011; rev:1;) +alert tcp any 443 -> any any (msg:"ELITEWOLF_SEL-2488 X509 certificate activity"; content: "issuer_CN: http://www.selinc.com/EthernetCommunications/"; sid:1000012; rev:1;) + +alert tcp any 23 -> any any (msg:"ELITEWOLF SEL Telnet Activity"; pcre:"/SEL-[0-9]{3,4}/"; sid:1000013; rev:1;) +alert tcp any 23 -> any any (msg:"ELITEWOLF SEL Access Level 1 Change"; content: "Level 1"; sid:1000014; rev:1;) +alert tcp any 23 -> any any (msg:"ELITEWOLF SEL Access Level 2 Change"; content: "Level 2"; sid:1000015; rev:1;) +alert tcp any 23 -> any any (msg:"ELITEWOLF SEL 2032 Processor"; content:"COMMUNICATIONS PROCESSOR-S/N"; sid:1000016; rev:1;) +alert tcp any 23 -> any any (msg:"ELITEWOLF SEL Callibration Access Level Login Success"; content:"Calibration Access Established"; sid:1000017; rev:1;) + +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - Access Change"; content: "USER 2AC"; sid:1000018; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - Change working directory 2701"; content: "CWD SEL-2701"; sid:1000019; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - Change working directory 2701"; content: "CWD /SEL-2701"; sid:1000020; rev:1;) +alert tcp any 21 -> any any (msg: "ELITEWOLF SEL FTP Activity - Current directory"; content: "/SEL-2701"; sid:1000021; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - RETR DNPMAP.TXT file"; content: "RETR DNPMAP.TXT"; sid:1000022; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - STOR SET_DNP1.TXT file"; content: "STOR SET_DNP1.TXT"; sid:1000023; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - potential file change"; content:"STOR SET_"; pcre:"/STOR SET_[0-9A-Z]{1,4}.TXT/"; sid:1000024; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - Access Change ACC"; content: "USER ACC"; sid:1000025; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - Password Login otter"; content: "PASS otter"; sid:1000026; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - STOR DNPMAP.TXT file"; content: "STOR DNPMAP.TXT"; sid:1000027; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - RETR ERR.TXT file"; content: "RETR ERR.TXT"; sid:1000028; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - RETR SET_DNP1.TXT file 2701"; content: "RETR SET_DNP1.TXT"; sid:1000029; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - File Retrieval"; content:"RETR SET_"; pcre:"/RETR SET_[0-9A-Z]{1,4}/"; sid:1000030; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - Default Username"; content:"USER FTPUSER"; sid:1000031; rev:1;) +alert tcp any any -> any 21 (msg: "ELITEWOLF SEL FTP Activity - Default Password"; content:"PASS TAIL"; sid:1000032; rev:1;) +alert tcp any 21 -> any any (msg: "ELITEWOLF SEL-751A FTP SERVER"; content:"SEL-751A"; sid:1000033; rev:1;) + diff --git a/suricata/default-rules/OT/nsacyber/ELITEWOLF/Siemens.rules b/suricata/default-rules/OT/nsacyber/ELITEWOLF/Siemens.rules new file mode 100644 index 000000000..74bd84914 --- /dev/null +++ b/suricata/default-rules/OT/nsacyber/ELITEWOLF/Siemens.rules @@ -0,0 +1,5 @@ +alert tcp any 80 -> any any (msg: "ELITEWOLF S7-1200 Possible Siemens Web Activity"; content:"/CSS/S7Web.css"; sid:1000034; rev:1;) +alert tcp any 80 -> any any (msg: "ELITEWOLF S7-1200 Possible Siemens Web Activity"; content:"/Images/CPU1200/"; sid:1; rev:1000035;) +alert tcp any 443 -> any any (msg: "ELITEWOLF S7-1200 Possible Siemens X509 certificate activity"; content:"S7-1200 Controller Family"; sid:1000036; rev:1;) +alert tcp any 443 -> any any (msg: "ELITEWOLF S7-1200 Possible Siemens X509 certificate activity"; content:"commonName=S7-1200 Controller Family"; sid:1000037; rev:1;) +alert tcp any 443 -> any any (msg: "ELITEWOLF S7-1200 Possible Siemens X509 certificate activity"; content:"issuer_CN: S7-1200 Controller Family"; sid:1000038; rev:1;) From b0826ddf2c3160704a6a05bd4b21c347181dc287 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Mon, 13 Nov 2023 07:47:04 -0700 Subject: [PATCH 34/82] added https://github.com/reversinglabs/reversinglabs-yara-rules yara rule set --- docs/components.md | 1 + docs/malcolm-config.md | 2 +- shared/bin/yara_rules_setup.sh | 5 +++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/components.md b/docs/components.md index df2895402..5a2b52538 100644 --- a/docs/components.md +++ b/docs/components.md @@ -25,6 +25,7 @@ Malcolm leverages the following excellent open source tools, among others. * [Mark Baggett](https://github.com/MarkBaggett)'s [freq](https://github.com/MarkBaggett/freq) - a tool for calculating entropy of strings * [Florian Roth](https://github.com/Neo23x0)'s [Signature-Base](https://github.com/Neo23x0/signature-base) Yara ruleset * [Bart Blaze](https://github.com/bartblaze)'s [Yara ruleset](https://github.com/bartblaze/Yara-rules) +* [ReversingLabs'](https://github.com/reversinglabs) [Yara ruleset](https://github.com/reversinglabs/reversinglabs-yara-rules) * These Zeek plugins: * some of Amazon.com, Inc.'s [ICS protocol](https://github.com/amzn?q=zeek) analyzers * Andrew Klaus's [Sniffpass](https://github.com/cybera/zeek-sniffpass) plugin for detecting cleartext passwords in HTTP POST requests diff --git a/docs/malcolm-config.md b/docs/malcolm-config.md index 1883172c2..e143fd7b6 100644 --- a/docs/malcolm-config.md +++ b/docs/malcolm-config.md @@ -86,7 +86,7 @@ Although the configuration script automates many of the following configuration - `EXTRACTED_FILE_IGNORE_EXISTING` – if set to `true`, files extant in `./zeek-logs/extract_files/` directory will be ignored on startup rather than scanned - `EXTRACTED_FILE_PRESERVATION` – determines behavior for preservation of [Zeek-extracted files](file-scanning.md#ZeekFileExtraction) - `EXTRACTED_FILE_UPDATE_RULES` – if set to `true`, file scanner engines (e.g., ClamAV, Capa, Yara) will periodically update their rule definitions (default `false`) - - `EXTRACTED_FILE_YARA_CUSTOM_ONLY` – if set to `true`, Malcolm will bypass the default Yara rulesets ([Neo23x0/signature-base](https://github.com/Neo23x0/signature-base) and [bartblaze/Yara-rules](https://github.com/bartblaze/Yara-rules)) and use only user-defined rules in `./yara/rules` + - `EXTRACTED_FILE_YARA_CUSTOM_ONLY` – if set to `true`, Malcolm will bypass the default Yara rulesets ([Neo23x0/signature-base](https://github.com/Neo23x0/signature-base), [reversinglabs/reversinglabs-yara-rules](https://github.com/reversinglabs/reversinglabs-yara-rules), and [bartblaze/Yara-rules](https://github.com/bartblaze/Yara-rules)) and use only user-defined rules in `./yara/rules` - `VTOT_API2_KEY` – used to specify a [VirusTotal Public API v.20](https://www.virustotal.com/en/documentation/public-api/) key, which, if specified, will be used to submit hashes of [Zeek-extracted files](file-scanning.md#ZeekFileExtraction) to VirusTotal - `ZEEK_AUTO_ANALYZE_PCAP_FILES` – if set to `true`, all PCAP files imported into Malcolm will automatically be analyzed by Zeek, and the resulting logs will also be imported (default `false`) - `ZEEK_AUTO_ANALYZE_PCAP_THREADS` – the number of threads available to Malcolm for analyzing Zeek logs (default `1`) diff --git a/shared/bin/yara_rules_setup.sh b/shared/bin/yara_rules_setup.sh index 26a936240..a6d0ecadd 100755 --- a/shared/bin/yara_rules_setup.sh +++ b/shared/bin/yara_rules_setup.sh @@ -96,13 +96,14 @@ mkdir -p "$YARA_RULES_SRC_DIR" "$YARA_RULES_DIR" # clone yara rules and create symlinks in destination directory pushd "$YARA_RULES_SRC_DIR" >/dev/null 2>&1 YARA_RULE_GITHUB_URLS=( - "https://github.com/Neo23x0/signature-base|master" "https://github.com/bartblaze/Yara-rules|master" + "https://github.com/Neo23x0/signature-base|master" + "https://github.com/reversinglabs/reversinglabs-yara-rules|develop" ) for i in ${YARA_RULE_GITHUB_URLS[@]}; do SRC_DIR="$(clone_github_repo "$i")" if [[ -d "$SRC_DIR" ]]; then - find "$SRC_DIR" -type f -iname "*.yar" -print0 | xargs -0 -r -I XXX ln $VERBOSE_FLAG -s -f "$("$REALPATH" "XXX")" "$YARA_RULES_DIR"/ + find "$SRC_DIR" -type f \( -iname '*.yara' -o -iname '*.yar' \) -print0 | xargs -0 -r -I XXX ln $VERBOSE_FLAG -s -f "$("$REALPATH" "XXX")" "$YARA_RULES_DIR"/ fi done popd >/dev/null 2>&1 From 99afed6373e576d5cdf1b284bb29a5392dd7e5cb Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Mon, 13 Nov 2023 10:29:27 -0700 Subject: [PATCH 35/82] update logstash and beats to 8.11.1 --- Dockerfiles/filebeat.Dockerfile | 2 +- Dockerfiles/logstash.Dockerfile | 2 +- sensor-iso/build.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfiles/filebeat.Dockerfile b/Dockerfiles/filebeat.Dockerfile index 8d4e2e163..f6f069eb2 100644 --- a/Dockerfiles/filebeat.Dockerfile +++ b/Dockerfiles/filebeat.Dockerfile @@ -1,4 +1,4 @@ -FROM docker.elastic.co/beats/filebeat-oss:8.11.0 +FROM docker.elastic.co/beats/filebeat-oss:8.11.1 # Copyright (c) 2023 Battelle Energy Alliance, LLC. All rights reserved. LABEL maintainer="malcolm@inl.gov" diff --git a/Dockerfiles/logstash.Dockerfile b/Dockerfiles/logstash.Dockerfile index fa930f676..ef0777b55 100644 --- a/Dockerfiles/logstash.Dockerfile +++ b/Dockerfiles/logstash.Dockerfile @@ -1,4 +1,4 @@ -FROM docker.elastic.co/logstash/logstash-oss:8.11.0 +FROM docker.elastic.co/logstash/logstash-oss:8.11.1 LABEL maintainer="malcolm@inl.gov" LABEL org.opencontainers.image.authors='malcolm@inl.gov' diff --git a/sensor-iso/build.sh b/sensor-iso/build.sh index a2575756e..0bc69ef89 100755 --- a/sensor-iso/build.sh +++ b/sensor-iso/build.sh @@ -5,7 +5,7 @@ IMAGE_PUBLISHER=idaholab IMAGE_VERSION=1.0.0 IMAGE_DISTRIBUTION=bookworm -BEATS_VER="8.11.0" +BEATS_VER="8.11.1" BEATS_OSS="-oss" BUILD_ERROR_CODE=1 From 9b1253a36a3ba2cf8bf62e859fba304f6beee745 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Mon, 13 Nov 2023 10:52:53 -0700 Subject: [PATCH 36/82] Work in progress for idaholab/Malcolm#287 --- scripts/control.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/scripts/control.py b/scripts/control.py index dce0e2aab..fddecdb96 100755 --- a/scripts/control.py +++ b/scripts/control.py @@ -585,7 +585,18 @@ def netboxRestore(backupFileName=None): f'{uidGidDict["PUID"]}:{uidGidDict["PGID"]}', ] - # if the netbox_init.py process is happening, interrupt it + # stop the netbox processes + dockerCmd = dockerCmdBase + [ + 'netbox', + 'supervisorctl', + 'stop', + 'netbox:*', + ] + err, results = run_process(dockerCmd, env=osEnv, debug=args.debug) + if (err != 0) and args.debug: + eprint(f'Error stopping netbox:*: {results}') + + # if the netbox_init.py process is still happening, interrupt it dockerCmd = dockerCmdBase + [ 'netbox', 'bash', @@ -628,6 +639,19 @@ def netboxRestore(backupFileName=None): if (err != 0) or (len(results) == 0): raise Exception('Error loading NetBox database') + # start back up the netbox processes + dockerCmd = dockerCmdBase + [ + 'netbox', + 'supervisorctl', + 'start', + 'netbox:housekeeping', + 'netbox:main', + 'netbox:worker', + ] + err, results = run_process(dockerCmd, env=osEnv, debug=args.debug) + if (err != 0) and args.debug: + eprint(f'Error starting netbox:*: {results}') + # migrations if needed dockerCmd = dockerCmdBase + ['netbox', '/opt/netbox/netbox/manage.py', 'migrate'] err, results = run_process(dockerCmd, env=osEnv, debug=args.debug) From de41962e69425fab78c16b740bd636700add8c48 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Mon, 13 Nov 2023 11:56:32 -0700 Subject: [PATCH 37/82] better startup for netbox restore --- scripts/control.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/scripts/control.py b/scripts/control.py index fddecdb96..b2e578647 100755 --- a/scripts/control.py +++ b/scripts/control.py @@ -639,14 +639,12 @@ def netboxRestore(backupFileName=None): if (err != 0) or (len(results) == 0): raise Exception('Error loading NetBox database') - # start back up the netbox processes + # start back up the netbox processes (except initialization) dockerCmd = dockerCmdBase + [ 'netbox', - 'supervisorctl', - 'start', - 'netbox:housekeeping', - 'netbox:main', - 'netbox:worker', + 'bash', + '-c', + "supervisorctl status netbox:* | grep -v :initialization | awk '{ print $1 }' | xargs -r -L 1 -P 4 supervisorctl start", ] err, results = run_process(dockerCmd, env=osEnv, debug=args.debug) if (err != 0) and args.debug: From 48bca91820d675dd90f1275ce90a045b8c6044a4 Mon Sep 17 00:00:00 2001 From: Seth Grover Date: Mon, 13 Nov 2023 16:34:19 -0700 Subject: [PATCH 38/82] fix idaholab/Malcolm#287; fix issue with SUPERUSER_API_TOKEN not being set correctly after netbox-restore --- netbox/scripts/netbox_superuser_create.py | 15 ++++ scripts/control.py | 98 ++++++++++++++++++++++- 2 files changed, 111 insertions(+), 2 deletions(-) create mode 100644 netbox/scripts/netbox_superuser_create.py diff --git a/netbox/scripts/netbox_superuser_create.py b/netbox/scripts/netbox_superuser_create.py new file mode 100644 index 000000000..6fe8be2e3 --- /dev/null +++ b/netbox/scripts/netbox_superuser_create.py @@ -0,0 +1,15 @@ +from django.contrib.auth.models import User +from users.models import Token +from os import getenv + +# adapted from +# - https://github.com/netbox-community/netbox-docker/blob/b47e85ab3f2261021adf99ae9de2e9692fd674c3/docker/docker-entrypoint.sh#L74-L80 + +superUserName = getenv('SUPERUSER_NAME', '') +superUserEmail = getenv('SUPERUSER_EMAIL', '') +superUserPassword = getenv('SUPERUSER_PASSWORD', '') +superUserToken = getenv('SUPERUSER_API_TOKEN', '') + +if not User.objects.filter(username=superUserName): + u = User.objects.create_superuser(superUserName, superUserEmail, superUserPassword) + Token.objects.create(user=u, key=superUserToken) diff --git a/scripts/control.py b/scripts/control.py index b2e578647..25852c563 100755 --- a/scripts/control.py +++ b/scripts/control.py @@ -639,6 +639,19 @@ def netboxRestore(backupFileName=None): if (err != 0) or (len(results) == 0): raise Exception('Error loading NetBox database') + # don't restore auth_user, tokens, etc: they're created by Malcolm and may not be the same on this instance + dockerCmd = dockerCmdBase + [ + 'netbox-postgres', + 'psql', + '-U', + 'netbox', + '-c', + 'TRUNCATE auth_user CASCADE', + ] + err, results = run_process(dockerCmd, env=osEnv, debug=args.debug) + if (err != 0) and args.debug: + eprint(f'Error truncating table auth_user table: {results}') + # start back up the netbox processes (except initialization) dockerCmd = dockerCmdBase + [ 'netbox', @@ -656,6 +669,17 @@ def netboxRestore(backupFileName=None): if (err != 0) or (len(results) == 0): raise Exception('Error performing NetBox migration') + # create auth_user for superuser + dockerCmd = dockerCmdBase + [ + 'netbox', + 'bash', + '-c', + "/opt/netbox/netbox/manage.py shell --interface python < /usr/local/bin/netbox_superuser_create.py", + ] + err, results = run_process(dockerCmd, env=osEnv, debug=args.debug) + if (err != 0) or (len(results) == 0): + raise Exception('Error setting up superuser') + # restore media directory backupFileParts = os.path.splitext(backupFileName) backupMediaFileName = backupFileParts[0] + ".media.tar.gz" @@ -666,7 +690,21 @@ def netboxRestore(backupFileName=None): t.extractall(mediaPath) elif orchMode is OrchestrationFramework.KUBERNETES: - # if the netbox_init.py process is happening, interrupt it + # stop the netbox processes + if podsResults := PodExec( + service='netbox', + namespace=args.namespace, + command=['supervisorctl', 'stop', 'netbox:*'], + ): + err = 0 if all([deep_get(v, ['err'], 1) == 0 for k, v in podsResults.items()]) else 1 + results = list(chain(*[deep_get(v, ['output'], '') for k, v in podsResults.items()])) + else: + err = 1 + results = [] + if (err != 0) and args.debug: + eprint(f'Error ({err}) stopping NetBox: {results}') + + # if the netbox_init.py process is still happening, interrupt it if podsResults := PodExec( service='netbox', namespace=args.namespace, @@ -713,7 +751,6 @@ def netboxRestore(backupFileName=None): service='netbox-postgres', namespace=args.namespace, command=[ - 'netbox-postgres', 'psql', '-U', 'netbox', @@ -745,6 +782,45 @@ def netboxRestore(backupFileName=None): if (err != 0) or (len(results) == 0): raise Exception(f'Error loading NetBox database: {results}') + # don't restore auth_user, tokens, etc: they're created by Malcolm and may not be the same on this instance + # make sure permissions are set up right + if podsResults := PodExec( + service='netbox-postgres', + namespace=args.namespace, + command=[ + 'psql', + '-U', + 'netbox', + '-c', + 'TRUNCATE auth_user CASCADE', + ], + ): + err = 0 if all([deep_get(v, ['err'], 1) == 0 for k, v in podsResults.items()]) else 1 + results = list(chain(*[deep_get(v, ['output'], '') for k, v in podsResults.items()])) + else: + err = 1 + results = [] + if err != 0: + raise Exception(f'Error truncating table auth_user table: {results}') + + # start the netbox processes + if podsResults := PodExec( + service='netbox', + namespace=args.namespace, + command=[ + 'bash', + '-c', + "supervisorctl status netbox:* | grep -v :initialization | awk '{ print $1 }' | xargs -r -L 1 -P 4 supervisorctl start", + ], + ): + err = 0 if all([deep_get(v, ['err'], 1) == 0 for k, v in podsResults.items()]) else 1 + results = list(chain(*[deep_get(v, ['output'], '') for k, v in podsResults.items()])) + else: + err = 1 + results = [] + if (err != 0) and args.debug: + eprint(f'Error ({err}) starting NetBox: {results}') + # migrations if needed if podsResults := PodExec( service='netbox', @@ -759,6 +835,24 @@ def netboxRestore(backupFileName=None): if (err != 0) or (len(results) == 0): raise Exception(f'Error performing NetBox migration: {results}') + # migrations if needed + if podsResults := PodExec( + service='netbox', + namespace=args.namespace, + command=[ + 'bash', + '-c', + "/opt/netbox/netbox/manage.py shell --interface python < /usr/local/bin/netbox_superuser_create.py", + ], + ): + err = 0 if all([deep_get(v, ['err'], 1) == 0 for k, v in podsResults.items()]) else 1 + results = list(chain(*[deep_get(v, ['output'], '') for k, v in podsResults.items()])) + else: + err = 1 + results = [] + if (err != 0) or (len(results) == 0): + raise Exception(f'Error setting up superuser: {results}') + # TODO: can't restore netbox/media directory via kubernetes at the moment else: From fd6d05012bc176e4c0d71d6ffdf46e65094f5757 Mon Sep 17 00:00:00 2001 From: SG Date: Tue, 14 Nov 2023 08:47:35 -0700 Subject: [PATCH 39/82] idaholab/Malcolm#286, strip out broken Arkime and NetBox links from dashboards for Kibana import --- dashboards/scripts/create-arkime-sessions-index.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/dashboards/scripts/create-arkime-sessions-index.sh b/dashboards/scripts/create-arkime-sessions-index.sh index 0355d25b6..fc5905513 100755 --- a/dashboards/scripts/create-arkime-sessions-index.sh +++ b/dashboards/scripts/create-arkime-sessions-index.sh @@ -194,9 +194,14 @@ if [[ "$CREATE_OS_ARKIME_SESSION_INDEX" = "true" ]] ; then echo "Importing $DATASTORE_TYPE Dashboards saved objects..." # install default dashboards - for i in /opt/dashboards/*.json; do + DASHBOARDS_IMPORT_DIR="$(mktemp -d -t dashboards-XXXXXX)" + cp /opt/dashboards/*.json "${DASHBOARDS_IMPORT_DIR}"/ + for i in "${DASHBOARDS_IMPORT_DIR}"/*.json; do + # strip out Arkime and NetBox links from dashboards' navigation pane when doing Kibana import (idaholab/Malcolm#286) + [[ "$DATASTORE_TYPE" == "elasticsearch" ]] && sed -i 's/ \\\\n\[↪ NetBox\](\/netbox\/) \\\\n\[↪ Arkime\](\/sessions)//' "$i" curl "${CURL_CONFIG_PARAMS[@]}" -L --silent --output /dev/null --show-error -XPOST "$DASHB_URL/api/$DASHBOARDS_URI_PATH/dashboards/import?force=true" -H "$XSRF_HEADER:true" -H 'Content-type:application/json' -d "@$i" done + rm -rf "${DASHBOARDS_IMPORT_DIR}" # beats will no longer import its dashbaords into OpenSearch # (see opensearch-project/OpenSearch-Dashboards#656 and From b7133e3f6047b20c28c7b97a7b852e36900be843 Mon Sep 17 00:00:00 2001 From: SG Date: Tue, 14 Nov 2023 10:41:48 -0700 Subject: [PATCH 40/82] idaholab/Malcolm#285, allow customizing Arkime's freeSpaceG setting (for PCAP deletion) in an environment variable --- arkime/scripts/docker_entrypoint.sh | 2 ++ config/arkime.env.example | 1 + docs/kubernetes.md | 2 ++ docs/malcolm-hedgehog-e2e-iso-install.md | 2 ++ scripts/install.py | 26 +++++++++++++++++++ .../interface/sensor_ctl/control_vars.conf | 3 ++- .../supervisor.init/arkime_config_populate.sh | 5 ++++ 7 files changed, 40 insertions(+), 1 deletion(-) diff --git a/arkime/scripts/docker_entrypoint.sh b/arkime/scripts/docker_entrypoint.sh index 16a1ca30a..a7b2fe542 100755 --- a/arkime/scripts/docker_entrypoint.sh +++ b/arkime/scripts/docker_entrypoint.sh @@ -10,6 +10,7 @@ function urlencodeall() { ARKIME_DIR=${ARKIME_DIR:-"/opt/arkime"} ARKIME_PASSWORD_SECRET=${ARKIME_PASSWORD_SECRET:-"Malcolm"} +ARKIME_FREESPACEG=${ARKIME_FREESPACEG:-"10%"} MALCOLM_PROFILE=${MALCOLM_PROFILE:-"malcolm"} OPENSEARCH_URL_FINAL=${OPENSEARCH_URL:-"http://opensearch:9200"} @@ -48,6 +49,7 @@ if [[ -r "${ARKIME_DIR}"/etc/config.orig.ini ]]; then cp "${ARKIME_DIR}"/etc/config.orig.ini "${ARKIME_DIR}"/etc/config.ini sed -i "s|^\(elasticsearch=\).*|\1"${OPENSEARCH_URL_FINAL}"|" "${ARKIME_DIR}"/etc/config.ini sed -i "s/^\(passwordSecret=\).*/\1"${ARKIME_PASSWORD_SECRET}"/" "${ARKIME_DIR}"/etc/config.ini + sed -i "s/^\(freeSpaceG=\).*/\1"${ARKIME_FREESPACEG}"/" "${ARKIME_DIR}"/etc/config.ini if [[ "$MALCOLM_PROFILE" == "hedgehog" ]]; then sed -i "s/^\(userNameHeader=\)/# \1/" "${ARKIME_DIR}"/etc/config.ini sed -i "s/^\(userAuthIps=\)/# \1/" "${ARKIME_DIR}"/etc/config.ini diff --git a/config/arkime.env.example b/config/arkime.env.example index 183e970e3..8248a636d 100644 --- a/config/arkime.env.example +++ b/config/arkime.env.example @@ -1,6 +1,7 @@ # Whether or not Arkime is allowed to delete uploaded/captured PCAP (see # https://arkime.com/faq#pcap-deletion) MANAGE_PCAP_FILES=false +ARKIME_FREESPACEG=10% # The number of Arkime capture processes allowed to run concurrently ARKIME_ANALYZE_PCAP_THREADS=1 diff --git a/docs/kubernetes.md b/docs/kubernetes.md index 28b631d65..02c62b0d3 100644 --- a/docs/kubernetes.md +++ b/docs/kubernetes.md @@ -379,6 +379,8 @@ Determine oldest indices by name (instead of creation time)? (Y / n): y Should Arkime delete PCAP files based on available storage (see https://arkime.com/faq#pcap-deletion)? (y / N): y +Enter PCAP deletion threshold in gigabytes or as a percentage (e.g., 500, 10%, etc.): 10% + Automatically analyze all PCAP files with Suricata? (Y / n): y Download updated Suricata signatures periodically? (y / N): y diff --git a/docs/malcolm-hedgehog-e2e-iso-install.md b/docs/malcolm-hedgehog-e2e-iso-install.md index 280698e90..a537f9f65 100644 --- a/docs/malcolm-hedgehog-e2e-iso-install.md +++ b/docs/malcolm-hedgehog-e2e-iso-install.md @@ -179,6 +179,8 @@ The [configuration and tuning](malcolm-config.md#ConfigAndTuning) wizard's quest - Most of the configuration around OpenSearch [Index State Management](https://opensearch.org/docs/latest/im-plugin/ism/index/) and [Snapshot Management](https://opensearch.org/docs/latest/opensearch/snapshots/sm-dashboards/) can be done in OpenSearch Dashboards. In addition to (or instead of) the OpenSearch index state management operations, Malcolm can also be configured to delete the oldest network session metadata indices when the database exceeds a certain size to prevent filling up all available storage with OpenSearch indices. - **Should Arkime delete PCAP files based on available storage?** - Answering **Y** allows Arkime to prune (delete) old PCAP files based on available disk space (see https://arkime.com/faq#pcap-deletion). + - **Enter PCAP deletion threshold in gigabytes or as a percentage (e.g., 500, 10%, etc.)** + - If [Arkime PCAP-deletion](https://arkime.com/faq#pcap-deletion) is enabled, Arkime will delete PCAP files when **free space** is lower than this value, specified as integer gigabytes (e.g., `500`) or a percentage (e.g., `10%`) * **Automatically analyze all PCAP files with Suricata?** - This option is used to enable [Suricata](https://suricata.io/) (an IDS and threat detection engine) to analyze PCAP files uploaded to Malcolm via its upload web interface. * **Download updated Suricata signatures periodically?** diff --git a/scripts/install.py b/scripts/install.py index 6a6479923..8408144ed 100755 --- a/scripts/install.py +++ b/scripts/install.py @@ -993,6 +993,7 @@ def tweak_malcolm_runtime(self, malcolm_install_path): indexPruneSizeLimit = '0' indexPruneNameSort = False arkimeManagePCAP = False + arkimeFreeSpaceG = '10%' if InstallerYesOrNo( 'Should Malcolm delete the oldest database indices and/or PCAP files based on available storage?' @@ -1032,6 +1033,16 @@ def tweak_malcolm_runtime(self, malcolm_install_path): default=args.arkimeManagePCAP, ) ) + if arkimeManagePCAP: + arkimeFreeSpaceGTmp = '' + loopBreaker = CountUntilException(MaxAskForValueCount, 'Invalid PCAP deletion threshold') + while (not re.match(r'^\d+%?$', arkimeFreeSpaceGTmp, flags=re.IGNORECASE)) and loopBreaker.increment(): + arkimeFreeSpaceGTmp = InstallerAskForString( + 'Enter PCAP deletion threshold in gigabytes or as a percentage (e.g., 500, 10%, etc.)', + default=args.arkimeFreeSpaceG, + ) + if arkimeFreeSpaceGTmp: + arkimeFreeSpaceG = arkimeFreeSpaceGTmp autoSuricata = InstallerYesOrNo( 'Automatically analyze all PCAP files with Suricata?', default=args.autoSuricata @@ -1376,6 +1387,12 @@ def tweak_malcolm_runtime(self, malcolm_install_path): 'MANAGE_PCAP_FILES', TrueOrFalseNoQuote(arkimeManagePCAP), ), + # Threshold for Arkime PCAP deletion + EnvValue( + os.path.join(args.configDir, 'arkime.env'), + 'ARKIME_FREESPACEG', + arkimeFreeSpaceG, + ), # authentication method: basic (true), ldap (false) or no_authentication EnvValue( os.path.join(args.configDir, 'auth-common.env'), @@ -3471,6 +3488,15 @@ def main(): default=False, help="Arkime should delete PCAP files based on available storage (see https://arkime.com/faq#pcap-deletion)", ) + storageArgGroup.add_argument( + '--delete-pcap-threshold', + dest='arkimeFreeSpaceG', + required=False, + metavar='', + type=str, + default='', + help=f'Threshold for Arkime PCAP deletion (see https://arkime.com/faq#pcap-deletion)', + ) storageArgGroup.add_argument( '--delete-index-threshold', dest='indexPruneSizeLimit', diff --git a/sensor-iso/interface/sensor_ctl/control_vars.conf b/sensor-iso/interface/sensor_ctl/control_vars.conf index 255a2d22d..097c2a186 100644 --- a/sensor-iso/interface/sensor_ctl/control_vars.conf +++ b/sensor-iso/interface/sensor_ctl/control_vars.conf @@ -21,7 +21,8 @@ export ARKIME_COMPRESSION_LEVEL=0 export ARKIME_VIEWER_CERT=viewer.crt export ARKIME_VIEWER_KEY=viewer.key # Password hash secret for Arkime viewer cluster (see https://arkime.com/settings) -ARKIME_PASSWORD_SECRET=Malcolm +export ARKIME_PASSWORD_SECRET=Malcolm +export ARKIME_FREESPACEG=7% export DOCUMENTATION_PORT=8420 export MISCBEAT_PORT=9516 diff --git a/sensor-iso/interface/sensor_ctl/supervisor.init/arkime_config_populate.sh b/sensor-iso/interface/sensor_ctl/supervisor.init/arkime_config_populate.sh index a47b80795..048e2944e 100644 --- a/sensor-iso/interface/sensor_ctl/supervisor.init/arkime_config_populate.sh +++ b/sensor-iso/interface/sensor_ctl/supervisor.init/arkime_config_populate.sh @@ -60,6 +60,11 @@ if [[ -n $SUPERVISOR_PATH ]] && [[ -r "$SUPERVISOR_PATH"/arkime/config.ini ]]; t sed -r -i "s/(maxFileTimeM)\s*=\s*.*/\1=$PCAP_ROTATE_MINUTES/" "$ARKIME_CONFIG_FILE" fi + # pcap deletion threshold + if [[ -n $ARKIME_FREESPACEG ]]; then + sed -r -i "s/(freeSpaceG)\s*=\s*.*/\1=$ARKIME_FREESPACEG/" "$ARKIME_CONFIG_FILE" + fi + # pcap compression COMPRESSION_TYPE="${ARKIME_COMPRESSION_TYPE:-none}" COMPRESSION_LEVEL="${ARKIME_COMPRESSION_LEVEL:-0}" From 684d69f58be57f099f128377e094ad2c063954d7 Mon Sep 17 00:00:00 2001 From: SG Date: Tue, 14 Nov 2023 11:33:04 -0700 Subject: [PATCH 41/82] fix file type validation not working for upload from (some?) windows browsers (idaholab/Malcolm#292) by removig front-end validation and just relying on the backend to do it --- file-upload/site/index.html | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/file-upload/site/index.html b/file-upload/site/index.html index fa4377939..a44a049d4 100644 --- a/file-upload/site/index.html +++ b/file-upload/site/index.html @@ -74,7 +74,6 @@

Network Traffic Artifact Upload

-