diff --git a/Makefile b/Makefile index ebcb9c8e..d8eb7404 100644 --- a/Makefile +++ b/Makefile @@ -68,7 +68,7 @@ base-centos-7: docker build ${DOCKER_BUILD_FLAGS} -t base-centos-7:${IMAGE_VERSION} ./base/centos-7 base-redhat-8: - docker build ${DOCKER_BUILD_FLAGS} -t base-redhat-8:${IMAGE_VERSION} ./base/redhat-8 + docker build ${DOCKER_BUILD_FLAGS} --label version=${SPLUNK_VERSION} -t base-redhat-8:${IMAGE_VERSION} ./base/redhat-8 base-windows-2016: docker build ${DOCKER_BUILD_FLAGS} -t base-windows-2016:${IMAGE_VERSION} ./base/windows-2016 diff --git a/base/redhat-8/Dockerfile b/base/redhat-8/Dockerfile index b1ba7a0a..c34b396d 100644 --- a/base/redhat-8/Dockerfile +++ b/base/redhat-8/Dockerfile @@ -16,11 +16,10 @@ # the container catalog moved from registry.access.redhat.com to registry.redhat.io # So at some point before they deprecate the old registry we have to make sure that # we have access to the new registry and change where we pull the ubi image from. -FROM registry.access.redhat.com/ubi8/ubi-minimal +FROM registry.access.redhat.com/ubi8/ubi-minimal:8.0-213 LABEL name="splunk" \ maintainer="support@splunk.com" \ vendor="splunk" \ - version="8.0.0" \ release="1" \ summary="UBI 8 Docker image of Splunk Enterprise" \ description="Splunk Enterprise is a platform for operational intelligence. Our software lets you collect, analyze, and act upon the untapped value of big data that your technology infrastructure, security systems, and business applications generate. It gives you insights to drive operational performance and business results." diff --git a/base/redhat-8/install.sh b/base/redhat-8/install.sh index 73cc73cc..db67be8f 100755 --- a/base/redhat-8/install.sh +++ b/base/redhat-8/install.sh @@ -16,7 +16,6 @@ set -e # reinstalling local en def for now, removed in minimal image https://bugzilla.redhat.com/show_bug.cgi?id=1665251 -microdnf -y update microdnf -y --nodocs install glibc-langpack-en #Currently there is no access to the UTF-8 char map, the following command is commented out until diff --git a/docs/ADVANCED.md b/docs/ADVANCED.md index 81dfd98a..cbb959af 100644 --- a/docs/ADVANCED.md +++ b/docs/ADVANCED.md @@ -242,28 +242,28 @@ To build images directly from this repository, there is a supplied `Makefile` in ``` 3. Run all the tests to verify your environment ``` - $ make splunk-debian-9 - $ make uf-debian-9 + $ make splunk-redhat-8 + $ make uf-redhat-8 ``` -Additionally, there are multiple images and layers that are produced by the previous commands: `base-debian-9`, `splunk-debian-9`, and `uf-debian-9`. +Additionally, there are multiple images and layers that are produced by the previous commands: `base-redhat-8`, `splunk-redhat-8`, and `uf-redhat-8`. -#### base-debian-9 -The directory `base/debian-9` contains a Dockerfile to create a base image on top of which all the other images are built. In order to minimize image size and provide a stable foundation for other images to build on, we elected to use `debian:stretch-slim` (55MB) for our base image. In the future, we plan to add support for additional operating systems. +#### base-redhat-8 +The directory `base-redhat-8` contains a Dockerfile to create a base image on top of which all the other images are built. In order to minimize image size and provide a stable foundation for other images to build on, we elected to use `registry.access.redhat.com/ubi8/ubi-minimal:8.0` (90MB) for our base image. In the future, we plan to add support for additional operating systems. ``` -$ make base-debian-9 +$ make base-redhat-8 ``` **WARNING:** Modifications made to the "base" image can result in Splunk being unable to start or run correctly. -#### splunk-debian-9 -The directory `splunk/debian-9` contains a Dockerfile that extends the base image by installing Splunk and adding tools for provisioning. Advanced Splunk provisioning capabilities are provided through the utilization of an entrypoint script and playbooks published separately via the [splunk-ansible project](https://github.com/splunk/splunk-ansible). +#### splunk-redhat-8 +The directory `splunk/common-files` contains a Dockerfile that extends the base image by installing Splunk and adding tools for provisioning. Advanced Splunk provisioning capabilities are provided through the utilization of an entrypoint script and playbooks published separately via the [splunk-ansible project](https://github.com/splunk/splunk-ansible). ``` -$ make splunk-debian-9 +$ make splunk-redhat-8 ``` -#### uf-debian-9 -The directory `uf/debian-9` contains a Dockerfile that extends the base image by installing Splunk Universal Forwarder and adding tools for provisioning. This image is similar to the Splunk Enterprise image (`splunk-debian-9`), except the more lightweight Splunk Universal Forwarder package is installed instead. +#### uf-redhat-8 +The directory `uf/common-files` contains a Dockerfile that extends the base image by installing Splunk Universal Forwarder and adding tools for provisioning. This image is similar to the Splunk Enterprise image (`splunk-redhat-8`), except the more lightweight Splunk Universal Forwarder package is installed instead. ``` -$ make uf-debian-9 +$ make uf-redhat-8 ``` diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 227d144e..d9aa2505 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -3,9 +3,11 @@ ## Navigation * [8.0.0](#800) +* [7.3.3](#733) * [7.3.2](#732) * [7.3.1](#731) * [7.3.0](#730) +* [7.2.9](#729) * [7.2.8](#728) * [7.2.7](#727) * [7.2.6](#726) @@ -36,6 +38,26 @@ --- +## 7.3.3 + +#### What's New? +* New Splunk Enterprise release of 7.3.3 + +#### docker-splunk changes: +* Bumping Splunk version. For details, see: https://docs.splunk.com/Documentation/Splunk/7.3.3/ReleaseNotes/Fixedissues +* Better management of deployment server apps +* Support for variety of Splunk package types +* Bugfixes around app installation + +#### splunk-ansible changes: +* Removing unnecessary apps in distributed ITSI installations +* Partioning apps in serverclass.conf when using the deployment server +* Adding support for activating Splunk Free license on boot +* Support for cluster labels via environment variables +* Bugfixes around app installation (through default.yml and pathing) + +--- + ## 7.3.2 #### What's New? @@ -95,6 +117,21 @@ --- +## 7.2.9 + +#### What's New? +* Releasing new images to support Splunk Enterprise maintenance patch. +* Bundling in changes to be consistent with the release of [8.0.0](#800) + +#### docker-splunk changes: +* Bumping Splunk version. For details, see: https://docs.splunk.com/Documentation/Splunk/7.2.9/ReleaseNotes/Fixedissues +* See [8.0.0](#800) changes + +#### splunk-ansible changes: +* See [8.0.0](#800) changes + +--- + ## 7.2.8 #### What's New? diff --git a/docs/EXAMPLES.md b/docs/EXAMPLES.md index d661bf7e..a998a927 100644 --- a/docs/EXAMPLES.md +++ b/docs/EXAMPLES.md @@ -14,6 +14,7 @@ Note that for more complex scenarios, we will opt to use a [Docker compose file] * [...with any app](#create-standalone-with-app) * [...with a SplunkBase app](#create-standalone-with-splunkbase-app) * [...with SSL enabled](#create-standalone-with-ssl-enabled) + * [...with a Free license](#create-standalone-with-free-license) * [Create standalone and universal forwarder](#create-standalone-and-universal-forwarder) * [Create heavy forwarder](#create-heavy-forwarder) * [Create heavy forwarder and deployment server](#create-heavy-forwarder-and-deployment-server) @@ -188,7 +189,6 @@ $ SPLUNKBASE_PASSWORD= SPLUNK_PASSWORD= docker-co ``` ## Create standalone with SSL enabled - To enable SSL over SplunkWeb, you'll first need to generate your self-signed certificates. Please see the [Splunk docs](https://docs.splunk.com/Documentation/Splunk/latest/Security/Self-signcertificatesforSplunkWeb) on how to go about doing this. For the purposes of local development, you can use: ``` openssl req -x509 -newkey rsa:4096 -passout pass:abcd1234 -keyout /home/key.pem -out /home/cert.pem -days 365 -subj /CN=localhost @@ -207,6 +207,14 @@ $ docker run --name so1 --hostname so1 -p 8000:8000 \ -it splunk/splunk:latest ``` +## Create Standalone with Free license +[Splunk Free](https://docs.splunk.com/Documentation/Splunk/latest/Admin/MoreaboutSplunkFree) is the totally free version of Splunk software. The Free license lets you index up to 500 MB per day and will never expire. + +Execute the following to bring up a Splunk Free standalone environment: +``` +$ docker run --name so1 --hostname so1 -p 8000:8000 -e SPLUNK_PASSWORD= -e SPLUNK_START_ARGS=--accept-license -e SPLUNK_LICENSE_URI=Free -it splunk/splunk:latest +``` + ## Create standalone and universal forwarder You can also enable distributed deployments. In this case, we can create a Splunk universal forwarder running in a container to stream logs to a Splunk standalone, also running in a container. diff --git a/docs/advanced/APP_INSTALL.md b/docs/advanced/APP_INSTALL.md index 9232abca..bd149009 100644 --- a/docs/advanced/APP_INSTALL.md +++ b/docs/advanced/APP_INSTALL.md @@ -1,7 +1,7 @@ ## Installing Splunk Apps and Add-ons Splunk's Docker image supports the ability to dynamically install any Splunk-compliant app or add-on. These can be certified apps that are hosted through [SplunkBase](https://splunkbase.splunk.com/) or they might be local apps you have developed yourself. -App installation can be done a variety of ways: either through a file/directory volume-mounted inside the container, or through an external URL for dynamic downloads. Nothing is required for the former, and the enviroment variable `SPLUNK_APPS_URL` supports the later. +App installation can be done a variety of ways: either through a file/directory volume-mounted inside the container, or through an external URL for dynamic downloads. Nothing is required for the former, and the enviroment variable `SPLUNK_APPS_URL` supports the latter. **NOTE:** Installation of Splunk Enterprise Security (ES) and Splunk IT Service Intelligence (ITSI) is currently not supported with this image. Please contact Splunk Services for more information on using these applications with Splunk Enterprise in a container. @@ -9,7 +9,7 @@ App installation can be done a variety of ways: either through a file/directory * [Volume-mount app directory](#volume-mount-app-directory) * [Download via URL](#download-via-url) -* [Multiple apps](@multiple-apps) +* [Multiple apps](#multiple-apps) * [Apps in distributed environments](#apps-in-distributed-environments) ## Volume-mount app directory @@ -34,7 +34,6 @@ In most cases, you're likely hosting the app as a tar file somewhere accessible #### SplunkBase apps Please refer to this docker-compose.yml file for how to download SplunkBase apps with authentication: -
docker-compose.yml

``` version: "3.6" @@ -52,11 +51,9 @@ services: ports: - 8000 ``` -

#### Self-hosted apps Please refer to this docker-compose.yml file for how to download any app hosted at an arbitrary location: -
docker-compose.yml

``` version: "3.6" @@ -72,13 +69,11 @@ services: ports: - 8000 ``` -

#### Apps on filesystem If you build your own image on top of the `splunk/splunk` or `splunk/universalforwarder` image, it's possible you may embedd a tar file of an app inside. Or, you can go with the bind-mount volume approach and inject a tar file on container run time. In either case, it's still possible to install an app from this file on the container's filesystem with the following. Please refer to this docker-compose.yml file for how to install an app in the container's filesystem: -
docker-compose.yml

``` version: "3.6" @@ -94,13 +89,11 @@ services: ports: - 8000 ``` -

## Multiple apps As one would expect, Splunk can and should support downloading any combination or series of apps. This can be incredibly useful when cross-referencing data from various sources. The `SPLUNK_APPS_URL` supports multiple apps, as long as they are comma-separated. Plase refer to this docker-compose.yml file for how to install multiple apps: -
docker-compose.yml

``` version: "3.6" @@ -118,15 +111,12 @@ services: ports: - 8000 ``` -

## Apps in distributed environments This docker image also deploys apps when running Splunk in distributed environments. There are, however, special cases and instructions for how apps get deployed in these scenarios. In the case of multiple search heads (no clustering) and multiple indexers (no clustering), you will explicitly need to tell each container what apps to install by defining a `SPLUNK_APPS_URL` for each role. See the example below and note the different apps used for search heads and indexers: -
2idx2sh.yml

- ``` version: "3.6" @@ -212,11 +202,9 @@ services: ports: - 8000 ``` -

In the case of search head clusters, you will explicitly need to tell the `splunk_deployer` what apps to install by defining a `SPLUNK_APPS_URL` for that particular role. The deployer will manage the distribution of apps to each of the search head cluster members (search heads). See the example below and note the different apps used for search heads and indexers: -
1dep3sh2idx.yml

``` version: "3.6" @@ -342,12 +330,9 @@ services: ports: - 8000 ``` -

In the case of indexer clusters, you will explicitly need to tell the `splunk_cluster_master` what apps to install by defining a `SPLUNK_APPS_URL` for that particular role. The cluster master will manage the distribution of apps to each of the indexer cluster members (indexers). See the example below and note the different apps used for search heads and indexers: -
3idx1sh1cm.yml

- ``` version: "3.6" @@ -453,4 +438,3 @@ services: ports: - 8000 ``` -

diff --git a/docs/advanced/LICENSE_INSTALL.md b/docs/advanced/LICENSE_INSTALL.md index 5fda9c66..9e4ef32f 100644 --- a/docs/advanced/LICENSE_INSTALL.md +++ b/docs/advanced/LICENSE_INSTALL.md @@ -8,6 +8,7 @@ There are primarily two different ways to apply a license when starting your con * [Path to file](#path-to-file) * [Download via URL](#download-via-url) +* [Free license](#splunk-free-license) * [Using a license master](#using-a-license-master) ## Path to file @@ -94,6 +95,14 @@ You should be able to bring up your deployment with the Splunk license automatic $ SPLUNK_PASSWORD= docker stack deploy --compose-file=docker-compose.yml splunk_deployment ``` +## Splunk Free license +Not to be confused with an actual free Splunk enterprise license, but [Splunk Free](https://docs.splunk.com/Documentation/Splunk/latest/Admin/MoreaboutSplunkFree) is a product offering that enables the power of Splunk with a never-expiring but ingest-limited license. By default, when you create a Splunk environment using this Docker container, it will enable a Splunk Trial license which is good for 30 days from the start of your instance. With Splunk Free, you can create a full developer environment of Splunk for any personal, sustained usage. + +To bring up a single instance using Splunk Free, you can run the following command: +``` +$ docker run --name so1 --hostname so1 -p 8000:8000 -e SPLUNK_PASSWORD= -e SPLUNK_START_ARGS=--accept-license -e SPLUNK_LICENSE_URI=Free -it splunk/splunk:latest +``` + ## Using a license master When starting up a distributed Splunk deployment, it may be inefficient for each Splunk instance to apply/fetch the same license. Luckily, there is a dedicated Splunk role for this - `splunk_license_master`. For more information on what this role is, please refer to Splunk documentation on [license masters](https://docs.splunk.com/Documentation/Splunk/latest/Admin/Configurealicensemaster). diff --git a/test_scenarios/3idx1cm.yaml b/test_scenarios/3idx1cm.yaml new file mode 100644 index 00000000..02560fe3 --- /dev/null +++ b/test_scenarios/3idx1cm.yaml @@ -0,0 +1,100 @@ +version: "3.6" + +networks: + splunknet: + driver: bridge + attachable: true + +services: + cm1: + networks: + splunknet: + aliases: + - cm1 + image: ${SPLUNK_IMAGE:-splunk/splunk:latest} + command: start + hostname: cm1 + container_name: cm1 + environment: + - SPLUNK_START_ARGS=--accept-license + - SPLUNK_INDEXER_URL=idx1,idx2,idx3 + - SPLUNK_CLUSTER_MASTER_URL=cm1 + - SPLUNK_ROLE=splunk_cluster_master + - SPLUNK_LICENSE_URI + - DEBUG=true + - SPLUNK_PASSWORD + ports: + - 8000 + - 8089 + volumes: + - ./defaults:/tmp/defaults + + idx1: + networks: + splunknet: + aliases: + - idx1 + image: ${SPLUNK_IMAGE:-splunk/splunk:latest} + command: start + hostname: idx1 + container_name: idx1 + environment: + - SPLUNK_START_ARGS=--accept-license + - SPLUNK_INDEXER_URL=idx1,idx2,idx3 + - SPLUNK_CLUSTER_MASTER_URL=cm1 + - SPLUNK_ROLE=splunk_indexer + - SPLUNK_LICENSE_URI + - DEBUG=true + - SPLUNK_PASSWORD + ports: + - 8000 + - 8089 + volumes: + - ./defaults:/tmp/defaults + + idx2: + networks: + splunknet: + aliases: + - idx2 + image: ${SPLUNK_IMAGE:-splunk/splunk:latest} + command: start + hostname: idx2 + container_name: idx2 + environment: + - SPLUNK_START_ARGS=--accept-license + - SPLUNK_INDEXER_URL=idx1,idx2,idx3 + - SPLUNK_CLUSTER_MASTER_URL=cm1 + - SPLUNK_ROLE=splunk_indexer + - SPLUNK_LICENSE_URI + - DEBUG=true + - SPLUNK_PASSWORD + ports: + - 8000 + - 8089 + volumes: + - ./defaults:/tmp/defaults + + idx3: + networks: + splunknet: + aliases: + - idx3 + image: ${SPLUNK_IMAGE:-splunk/splunk:latest} + command: start + hostname: idx3 + container_name: idx3 + environment: + - SPLUNK_START_ARGS=--accept-license + - SPLUNK_INDEXER_URL=idx1,idx2,idx3 + - SPLUNK_CLUSTER_MASTER_URL=cm1 + - SPLUNK_ROLE=splunk_indexer + - SPLUNK_LICENSE_URI + - DEBUG=true + - SPLUNK_PASSWORD + ports: + - 8000 + - 8089 + volumes: + - ./defaults:/tmp/defaults + \ No newline at end of file diff --git a/tests/test_docker_splunk.py b/tests/test_docker_splunk.py index 189268e8..32c4e43b 100644 --- a/tests/test_docker_splunk.py +++ b/tests/test_docker_splunk.py @@ -1647,93 +1647,83 @@ def test_compose_1uf_command_start_service(self): # Check Splunkd on all the containers assert self.check_splunkd("admin", self.password) - @pytest.mark.skip(reason="Oracle is preventing automated downloads") def test_compose_1so_java_oracle(self): - if 'redhat' in platform: - assert 'Not supported' - else: - # Standup deployment - self.compose_file_name = "1so_java_oracle.yaml" - self.project_name = generate_random_string() - container_count, rc = self.compose_up() - assert rc == 0 - # Wait for containers to come up - assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name)) - # Check ansible inventory json - log_json = self.extract_json("so1") - self.check_common_keys(log_json, "so") - try: - assert log_json["all"]["vars"]["java_version"] == "oracle:8" - except KeyError as e: - self.logger.error(e) - raise e - # Check container logs - output = self.get_container_logs("so1") - self.check_ansible(output) - # Check Splunkd on all the containers - assert self.check_splunkd("admin", self.password) - # Check if java is installed - exec_command = self.client.exec_create("so1", "java -version") - std_out = self.client.exec_start(exec_command) - assert "java version \"1.8.0" in std_out + # Standup deployment + self.compose_file_name = "1so_java_oracle.yaml" + self.project_name = generate_random_string() + container_count, rc = self.compose_up() + assert rc == 0 + # Wait for containers to come up + assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name)) + # Check ansible inventory json + log_json = self.extract_json("so1") + self.check_common_keys(log_json, "so") + try: + assert log_json["all"]["vars"]["java_version"] == "oracle:8" + except KeyError as e: + self.logger.error(e) + raise e + # Check container logs + output = self.get_container_logs("so1") + self.check_ansible(output) + # Check Splunkd on all the containers + assert self.check_splunkd("admin", self.password) + # Check if java is installed + exec_command = self.client.exec_create("so1", "java -version") + std_out = self.client.exec_start(exec_command) + assert "java version \"1.8.0" in std_out def test_compose_1so_java_openjdk8(self): - if 'redhat' in platform: - assert 'Not supported' - else: - # Standup deployment - self.compose_file_name = "1so_java_openjdk8.yaml" - self.project_name = generate_random_string() - container_count, rc = self.compose_up() - assert rc == 0 - # Wait for containers to come up - assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name)) - # Check ansible inventory json - log_json = self.extract_json("so1") - self.check_common_keys(log_json, "so") - try: - assert log_json["all"]["vars"]["java_version"] == "openjdk:8" - except KeyError as e: - self.logger.error(e) - raise e - # Check container logs - output = self.get_container_logs("so1") - self.check_ansible(output) - # Check Splunkd on all the containers - assert self.check_splunkd("admin", self.password) - # Check if java is installed - exec_command = self.client.exec_create("so1", "java -version") - std_out = self.client.exec_start(exec_command) - assert "openjdk version \"1.8.0" in std_out + # Standup deployment + self.compose_file_name = "1so_java_openjdk8.yaml" + self.project_name = generate_random_string() + container_count, rc = self.compose_up() + assert rc == 0 + # Wait for containers to come up + assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name)) + # Check ansible inventory json + log_json = self.extract_json("so1") + self.check_common_keys(log_json, "so") + try: + assert log_json["all"]["vars"]["java_version"] == "openjdk:8" + except KeyError as e: + self.logger.error(e) + raise e + # Check container logs + output = self.get_container_logs("so1") + self.check_ansible(output) + # Check Splunkd on all the containers + assert self.check_splunkd("admin", self.password) + # Check if java is installed + exec_command = self.client.exec_create("so1", "java -version") + std_out = self.client.exec_start(exec_command) + assert "openjdk version \"1.8.0" in std_out def test_compose_1so_java_openjdk11(self): - if 'redhat' in platform: - assert 'Not supported' - else: - # Standup deployment - self.compose_file_name = "1so_java_openjdk11.yaml" - self.project_name = generate_random_string() - container_count, rc = self.compose_up() - assert rc == 0 - # Wait for containers to come up - assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name)) - # Check ansible inventory json - log_json = self.extract_json("so1") - self.check_common_keys(log_json, "so") - try: - assert log_json["all"]["vars"]["java_version"] == "openjdk:11" - except KeyError as e: - self.logger.error(e) - raise e - # Check container logs - output = self.get_container_logs("so1") - self.check_ansible(output) - # Check Splunkd on all the containers - assert self.check_splunkd("admin", self.password) - # Check if java is installed - exec_command = self.client.exec_create("so1", "java -version") - std_out = self.client.exec_start(exec_command) - assert "openjdk version \"11.0.2" in std_out + # Standup deployment + self.compose_file_name = "1so_java_openjdk11.yaml" + self.project_name = generate_random_string() + container_count, rc = self.compose_up() + assert rc == 0 + # Wait for containers to come up + assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name)) + # Check ansible inventory json + log_json = self.extract_json("so1") + self.check_common_keys(log_json, "so") + try: + assert log_json["all"]["vars"]["java_version"] == "openjdk:11" + except KeyError as e: + self.logger.error(e) + raise e + # Check container logs + output = self.get_container_logs("so1") + self.check_ansible(output) + # Check Splunkd on all the containers + assert self.check_splunkd("admin", self.password) + # Check if java is installed + exec_command = self.client.exec_create("so1", "java -version") + std_out = self.client.exec_start(exec_command) + assert "openjdk version \"11.0.2" in std_out def test_compose_1so_hec(self): # Standup deployment @@ -1988,6 +1978,129 @@ def test_compose_1uf1so(self): assert search_providers[0] == "so1" assert distinct_hosts == 2 + def test_compose_3idx1cm_default_repl_factor(self): + # Generate default.yml + cid = self.client.create_container(self.SPLUNK_IMAGE_NAME, tty=True, command="create-defaults") + self.client.start(cid.get("Id")) + output = self.get_container_logs(cid.get("Id")) + self.client.remove_container(cid.get("Id"), v=True, force=True) + # Get the password + password = re.search(" password: (.*)", output).group(1).strip() + assert password + # Write the default.yml to a file + with open(os.path.join(SCENARIOS_DIR, "defaults", "default.yml"), "w") as f: + f.write(output) + # Standup deployment + try: + self.compose_file_name = "3idx1cm.yaml" + self.project_name = generate_random_string() + container_count, rc = self.compose_up() + assert rc == 0 + # Wait for containers to come up + assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name), timeout=600) + # Get container logs + container_mapping = {"cm1": "cm", "idx1": "idx", "idx2": "idx", "idx3": "idx"} + for container in container_mapping: + # Check ansible version & configs + ansible_logs = self.get_container_logs(container) + self.check_ansible(ansible_logs) + # Check values in log output + inventory_json = self.extract_json(container) + self.check_common_keys(inventory_json, container_mapping[container]) + try: + assert inventory_json["splunk_indexer"]["hosts"] == ["idx1", "idx2", "idx3"] + assert inventory_json["splunk_cluster_master"]["hosts"] == ["cm1"] + except KeyError as e: + self.logger.error(e) + raise e + # Check Splunkd on all the containers + assert self.check_splunkd("admin", self.password) + # Make sure apps are installed, and shcluster is setup properly + containers = self.client.containers(filters={"label": "com.docker.compose.project={}".format(self.project_name)}) + assert len(containers) == 4 + for container in containers: + container_name = container["Names"][0].strip("/") + splunkd_port = self.client.port(container["Id"], 8089)[0]["HostPort"] + if container_name == "cm1": + # Check the replication factor & search factor + url = "https://localhost:{}/services/cluster/config/config?output_mode=json".format(splunkd_port) + kwargs = {"auth": ("admin", self.password), "verify": False} + status, content = self.handle_request_retry("GET", url, kwargs) + assert status == 200 + assert json.loads(content)["entry"][0]["content"]["replication_factor"] == 3 + assert json.loads(content)["entry"][0]["content"]["search_factor"] == 3 + except Exception as e: + self.logger.error(e) + raise e + finally: + try: + os.remove(os.path.join(SCENARIOS_DIR, "defaults", "default.yml")) + except OSError as e: + pass + + def test_compose_3idx1cm_custom_repl_factor(self): + # Generate default.yml + cid = self.client.create_container(self.SPLUNK_IMAGE_NAME, tty=True, command="create-defaults") + self.client.start(cid.get("Id")) + output = self.get_container_logs(cid.get("Id")) + self.client.remove_container(cid.get("Id"), v=True, force=True) + # Get the password + password = re.search(" password: (.*)", output).group(1).strip() + assert password + # Change repl factor & search factor + output = re.sub(r' replication_factor: 3', r''' replication_factor: 2''', output) + output = re.sub(r' search_factor: 3', r''' search_factor: 1''', output) + # Write the default.yml to a file + with open(os.path.join(SCENARIOS_DIR, "defaults", "default.yml"), "w") as f: + f.write(output) + # Standup deployment + try: + self.compose_file_name = "3idx1cm.yaml" + self.project_name = generate_random_string() + container_count, rc = self.compose_up() + assert rc == 0 + # Wait for containers to come up + assert self.wait_for_containers(container_count, label="com.docker.compose.project={}".format(self.project_name), timeout=600) + # Get container logs + container_mapping = {"cm1": "cm", "idx1": "idx", "idx2": "idx", "idx3": "idx"} + for container in container_mapping: + # Check ansible version & configs + ansible_logs = self.get_container_logs(container) + self.check_ansible(ansible_logs) + # Check values in log output + inventory_json = self.extract_json(container) + self.check_common_keys(inventory_json, container_mapping[container]) + try: + assert inventory_json["splunk_indexer"]["hosts"] == ["idx1", "idx2", "idx3"] + assert inventory_json["splunk_cluster_master"]["hosts"] == ["cm1"] + except KeyError as e: + self.logger.error(e) + raise e + # Check Splunkd on all the containers + assert self.check_splunkd("admin", self.password) + # Make sure apps are installed, and shcluster is setup properly + containers = self.client.containers(filters={"label": "com.docker.compose.project={}".format(self.project_name)}) + assert len(containers) == 4 + for container in containers: + container_name = container["Names"][0].strip("/") + splunkd_port = self.client.port(container["Id"], 8089)[0]["HostPort"] + if container_name == "cm1": + # Check the replication factor & search factor + url = "https://localhost:{}/services/cluster/config/config?output_mode=json".format(splunkd_port) + kwargs = {"auth": ("admin", self.password), "verify": False} + status, content = self.handle_request_retry("GET", url, kwargs) + assert status == 200 + assert json.loads(content)["entry"][0]["content"]["replication_factor"] == 2 + assert json.loads(content)["entry"][0]["content"]["search_factor"] == 1 + except Exception as e: + self.logger.error(e) + raise e + finally: + try: + os.remove(os.path.join(SCENARIOS_DIR, "defaults", "default.yml")) + except OSError as e: + pass + def test_compose_1cm_smartstore(self): # Generate default.yml cid = self.client.create_container(self.SPLUNK_IMAGE_NAME, tty=True, command="create-defaults")