diff --git a/.circleci/config.yml b/.circleci/config.yml index 190e9892..639de1a8 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -6,13 +6,21 @@ jobs: steps: - checkout - run: - name: Build Container + name: Setup python3 command: | - make all + pyenv global 2.7.12 3.5.2 + python --version + pip --version + python3 --version + pip3 --version - run: name: Setup Tests / Scanner Requirements command: | make test_setup + - run: + name: Build Container + command: | + make all - run: name: Export Build Images for Artifacts command: | @@ -26,7 +34,16 @@ jobs: path: clair-scanner-logs destintation: clair-scanner-logs - run: - name: Running CI Tests + name: Test Python3 installation + command: make test_python3_all + - run: + name: Test Python2 as the default + command: make test_python2_all + - run: + name: Test if image size increase + command: make test_debian9_image_size + - run: + name: Running debian9 CI Tests command: make run_tests_debian9 no_output_timeout: 20m - store_artifacts: diff --git a/Makefile b/Makefile index aa16f2c2..1b7ff053 100644 --- a/Makefile +++ b/Makefile @@ -31,8 +31,8 @@ SCANNER_DATE := `date +%Y-%m-%d` SCANNER_DATE_YEST := `TZ=GMT+24 +%Y:%m:%d` SCANNER_VERSION := v8 SCANNER_LOCALIP := $(shell ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1' | awk '{print $1}' | head -n 1) -SCANNER_IMAGES_TO_SCAN := splunk-debian-9 splunk-debian-10 splunk-centos-7 splunk-redhat-8 uf-debian-9 uf-debian-10 uf-centos-7 uf-redhat-8 -CONTAINERS_TO_SAVE := splunk-debian-9 splunk-debian-10 splunk-centos-7 splunk-redhat-8 uf-debian-9 uf-debian-10 uf-centos-7 uf-redhat-8 +SCANNER_IMAGES_TO_SCAN := splunk-debian-9 splunk-debian-10 splunk-centos-7 splunk-redhat-8 uf-debian-9 uf-debian-10 uf-centos-7 uf-redhat-8 splunk-py23-debian-9 splunk-py23-debian-10 splunk-py23-centos-7 splunk-py23-redhat-8 uf-py23-debian-9 uf-py23-debian-10 uf-py23-centos-7 uf-py23-redhat-8 +CONTAINERS_TO_SAVE := splunk-debian-9 splunk-debian-10 splunk-centos-7 splunk-redhat-8 uf-debian-9 uf-debian-10 uf-centos-7 uf-redhat-8 splunk-py23-debian-9 splunk-py23-debian-10 splunk-py23-centos-7 splunk-py23-redhat-8 uf-py23-debian-9 uf-py23-debian-10 uf-py23-centos-7 uf-py23-redhat-8 ifeq ($(shell uname), Linux) SCANNER_FILE = clair-scanner_linux_amd64 else ifeq ($(shell uname), Darwin) @@ -44,7 +44,7 @@ endif .PHONY: tests interactive_tutorials -all: splunk uf +all: splunk uf splunk-py23 uf-py23 ansible: @if [ -d "splunk-ansible" ]; then \ @@ -225,6 +225,61 @@ uf-windows-2016: base-windows-2016 ansible --build-arg SPLUNK_BUILD_URL=${UF_WIN_BUILD_URL} \ -t uf-windows-2016:${IMAGE_VERSION} . + +##### Python 3 support ##### +splunk-py23: splunk-py23-debian-9 splunk-py23-debian-10 splunk-py23-centos-7 splunk-py23-redhat-8 + +splunk-py23-debian-9: splunk-debian-9 + docker build ${DOCKER_BUILD_FLAGS} \ + -f py23-image/debian-9/Dockerfile \ + --build-arg SPLUNK_PRODUCT=splunk \ + -t splunk-py23-debian-9:${IMAGE_VERSION} . + +splunk-py23-debian-10: splunk-debian-10 + docker build ${DOCKER_BUILD_FLAGS} \ + -f py23-image/debian-10/Dockerfile \ + --build-arg SPLUNK_PRODUCT=splunk \ + -t splunk-py23-debian-10:${IMAGE_VERSION} . + +splunk-py23-centos-7: splunk-centos-7 + docker build ${DOCKER_BUILD_FLAGS} \ + -f py23-image/centos-7/Dockerfile \ + --build-arg SPLUNK_PRODUCT=splunk \ + -t splunk-py23-centos-7:${IMAGE_VERSION} . + +splunk-py23-redhat-8: splunk-redhat-8 + docker build ${DOCKER_BUILD_FLAGS} \ + -f py23-image/redhat-8/Dockerfile \ + --build-arg SPLUNK_PRODUCT=splunk \ + -t splunk-py23-redhat-8:${IMAGE_VERSION} . + +uf-py23: uf-py23-debian-9 uf-py23-debian-10 uf-py23-centos-7 uf-py23-redhat-8 + +uf-py23-debian-9: uf-debian-9 + docker build ${DOCKER_BUILD_FLAGS} \ + -f py23-image/debian-9/Dockerfile \ + --build-arg SPLUNK_PRODUCT=uf \ + -t uf-py23-debian-9:${IMAGE_VERSION} . + +uf-py23-debian-10: uf-debian-10 + docker build ${DOCKER_BUILD_FLAGS} \ + -f py23-image/debian-10/Dockerfile \ + --build-arg SPLUNK_PRODUCT=uf \ + -t uf-py23-debian-10:${IMAGE_VERSION} . + +uf-py23-centos-7: uf-centos-7 + docker build ${DOCKER_BUILD_FLAGS} \ + -f py23-image/centos-7/Dockerfile \ + --build-arg SPLUNK_PRODUCT=uf \ + -t uf-py23-centos-7:${IMAGE_VERSION} . + +uf-py23-redhat-8: uf-redhat-8 + docker build ${DOCKER_BUILD_FLAGS} \ + -f py23-image/redhat-8/Dockerfile \ + --build-arg SPLUNK_PRODUCT=uf \ + -t uf-py23-redhat-8:${IMAGE_VERSION} . + + ##### Tests ##### sample-compose-up: sample-compose-down docker-compose -f test_scenarios/${SPLUNK_COMPOSE} up -d @@ -248,7 +303,7 @@ run_tests_centos7: run_tests_redhat8: @echo 'Running the super awesome tests; RedHat 8' - pytest -sv tests/test_redhat_8.py --junitxml test-results/redhat8-result/testresults_redhat8.xml + pytest -sv tests/test_docker_splunk.py --platform redhat-8 --junitxml test-results/redhat8-result/testresults_redhat8.xml test_setup: @echo 'Install test requirements' @@ -256,6 +311,8 @@ test_setup: pip install -r $(shell pwd)/tests/requirements.txt --upgrade mkdir test-results/centos7-result || true mkdir test-results/debian9-result || true + mkdir test-results/debian10-result || true + mkdir test-results/redhat8-result || true run_tests_debian9: @echo 'Running the super awesome tests; Debian 9' @@ -270,6 +327,91 @@ save_containers: mkdir test-results/saved_images || true $(foreach image,${CONTAINERS_TO_SAVE}, echo "Currently saving: ${image}"; docker save ${image} --output test-results/saved_images/${image}.tar; echo "Compressing: ${image}.tar"; gzip test-results/saved_images/${image}.tar; ) +test_python3_all: test_splunk_python3_all test_uf_python3_all + +test_splunk_python3_all: test_splunk_centos7_python3 test_splunk_redhat8_python3 test_splunk_debian9_python3 test_splunk_debian10_python3 + +test_uf_python3_all: test_uf_centos7_python3 test_uf_redhat8_python3 test_uf_debian9_python3 test_uf_debian10_python3 + +test_splunk_centos7_python3: + $(call test_python3_installation,splunk-py23-centos-7) + +test_splunk_redhat8_python3: + $(call test_python3_installation,splunk-py23-redhat-8) + +test_splunk_debian9_python3: + $(call test_python3_installation,splunk-py23-debian-9) + +test_splunk_debian10_python3: + $(call test_python3_installation,splunk-py23-debian-10) + +test_uf_centos7_python3: + $(call test_python3_installation,uf-py23-centos-7) + +test_uf_redhat8_python3: + $(call test_python3_installation,uf-py23-redhat-8) + +test_uf_debian9_python3: + $(call test_python3_installation,uf-py23-debian-9) + +test_uf_debian10_python3: + $(call test_python3_installation,uf-py23-debian-10) + +define test_python3_installation +docker run -d --rm --name $1 -it $1 bash +docker exec -it $1 bash -c 'if [[ $$(python3 -V) =~ "Python 3" ]] ; then echo "$$(python3 -V) installed" ; else echo "No Python3 installation found" ; docker kill $1 ; exit 1 ; fi' +docker kill $1 +endef + +test_python2_all: test_splunk_python2_all test_uf_python2_all + +test_splunk_python2_all: test_splunk_centos7_python2 test_splunk_redhat8_python2 test_splunk_debian9_python2 test_splunk_debian10_python2 + +test_uf_python2_all: test_uf_centos7_python2 test_uf_redhat8_python2 test_uf_debian9_python2 test_uf_debian10_python2 + +test_splunk_centos7_python2: + $(call test_python2_installation,splunk-py23-centos-7) + +test_splunk_redhat8_python2: + $(call test_python2_installation,splunk-py23-redhat-8) + +test_splunk_debian9_python2: + $(call test_python2_installation,splunk-py23-debian-9) + +test_splunk_debian10_python2: + $(call test_python2_installation,splunk-py23-debian-10) + +test_uf_centos7_python2: + $(call test_python2_installation,uf-py23-centos-7) + +test_uf_redhat8_python2: + $(call test_python2_installation,uf-py23-redhat-8) + +test_uf_debian9_python2: + $(call test_python2_installation,uf-py23-debian-9) + +test_uf_debian10_python2: + $(call test_python2_installation,uf-py23-debian-10) + +#python2 version print to stderr, hence the 2>&1 +define test_python2_installation +docker run -d --rm --name $1 -it $1 bash +docker exec -it $1 bash -c 'if [[ $$(python -V 2>&1) =~ "Python 2" ]] ; then echo "$$(python -V 2>&1) is the default python" ; else echo "Python is not default to python2" ; docker kill $1 ; exit 1 ; fi' +docker kill $1 +endef + +test_debian9_image_size: + $(call test_image_size,splunk-debian-9) + +define test_image_size +docker pull splunk/splunk:edge +CUR_SIZE=$$(docker image inspect $1:latest --format='{{.Size}}') ; \ +EDGE_SIZE=$$(docker image inspect splunk/splunk:edge --format='{{.Size}}') ; \ +echo "current $1 image size = "$$CUR_SIZE ; \ +echo "edge image size = "$$EDGE_SIZE ; \ +if [[ $$CUR_SIZE -gt $$EDGE_SIZE*102/100 ]] ; then echo "current image size is 2% more than edge image" ; exit 1 ; fi +endef + setup_clair_scanner: mkdir clair-scanner-logs mkdir test-results/cucumber diff --git a/docs/SECURITY.md b/docs/SECURITY.md new file mode 100644 index 00000000..6f1ae709 --- /dev/null +++ b/docs/SECURITY.md @@ -0,0 +1,87 @@ +## Security ## +This section will cover various security considerations when using the Splunk Enterprise and Universal Forwarder containers. + +### Startup Users ### + +The Splunk Enterprise and Universal Forwarder containers may be started using one of the following three user accounts: + +* `splunk` (most secure): This user has no privileged access and cannot use `sudo` to change to another user account. +It is a member of the `ansible` group, which enables it to run the embedded playbooks at startup. When using the +`splunk` user, all processes will run as this user. Note that you must set the `SPLUNK_HOME_OWNERSHIP_ENFORCEMENT` +environment variable to `false` when starting as this user. ***Recommended for production*** + +* `ansible` (middle ground): This user is a member of the `sudo` group and able to execute `sudo` commands without a +password. It uses privileged access at startup only to perform certain actions which cannot be performed by regular +users (see below). After startup, `sudo` access will automatically be removed from the `ansible` user if the +environment variable `STEPDOWN_ANSIBLE_USER` is set to `true`. ***This is the default user account*** + +* `root` (least secure): This is a privileged user running with UID of `0`. Some customers may want to use this for +forwarder processes that require access to log files which cannot be read by any other user. ***This is not recommended*** + +### After Startup ### + +By default, the primary Splunk processes will always run as the unprivileged user and group `splunk`, +irregardless of which user account the containers are started with. You can override this by changing the following: + +* User: `splunk.user` variable in your `default.yml` template, or the `SPLUNK_USER` environment variable +* Group: `splunk.group` variable in your `default.yml` template, or the `SPLUNK_GROUP` environment variable + +Note that the containers are built with the `splunk` user having UID `41812` and the `splunk` group having GID `41812`. + +You may want to override these settings to ensure that Splunk forwarder processes have access to read your log files. +For example, you can ensure that all processes run as `root` by starting as the `root` user with the environment +variable `SPLUNK_USER` also set to `root` (this is not recommended). + +### Privileged Features ### + +Certain features supported by the Splunk Enterprise and Universal Forwarder containers require that they are started +with privileged access using either the `ansible` or `root` user accounts. + +#### Splunk Home Ownership #### + +By default, at startup the containers will ensure that all files located under the `SPLUNK_HOME` directory +(`/opt/splunk`) are owned by user `splunk` and group `splunk`. This helps to ensure that the Splunk processes are +able to read and write any external volumes mounted for `/opt/splunk/etc` and `/opt/splunk/var`. While all supported +versions of the docker engine will automatically set proper ownership for these volumes, external orchestration systems +typically will require extra steps. + +If you know that this step is unnecessary, you can disable it by setting the `SPLUNK_HOME_OWNERSHIP_ENFORCEMENT` +environment variable to `false`. Note that this must be disabled when starting containers with the `splunk` user +account. + +#### Package Installation #### + +The `JAVA_VERSION` environment variable can be used to automatically install OpenJDK at startup time. This feature +requires starting as a privileged user account. + +### Kubernetes Users ### + +For Kubernetes, we recommend using the `fsGroup` [Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) +to ensure that all Pods are able to write to your Persistent Volumes. For example: + +``` +apiVersion: v1 +kind: Pod +metadata: + name: example-splunk-pod +spec: + securityContext: + runAsUser: 41812 + fsGroup: 41812 + containers: + name: example-splunk-container + image: splunk/splunk + env: + - name: SPLUNK_HOME_OWNERSHIP_ENFORCEMENT + value: "false" +... +``` + +This can be used to create a Splunk Enterprise Pod running as the unprivileged `splunk` user which is able to securely +read and write from any Persistent Volumes that are created for it. + +Red Hat OpenShift users can leverage the built-in `nonroot` [Security Context Constraint](https://docs.openshift.com/container-platform/3.9/admin_guide/manage_scc.html) +to run Pods with the above Security Context: +``` +oc adm policy add-scc-to-user nonroot default +``` \ No newline at end of file diff --git a/py23-image/centos-7/Dockerfile b/py23-image/centos-7/Dockerfile new file mode 100644 index 00000000..a113bab0 --- /dev/null +++ b/py23-image/centos-7/Dockerfile @@ -0,0 +1,7 @@ +ARG SPLUNK_PRODUCT=splunk +FROM ${SPLUNK_PRODUCT}-centos-7:latest +USER root + +RUN yum -y update +RUN yum -y install python36 python36-requests +RUN python3 -m ensurepip \ No newline at end of file diff --git a/py23-image/debian-10/Dockerfile b/py23-image/debian-10/Dockerfile new file mode 100644 index 00000000..c190369f --- /dev/null +++ b/py23-image/debian-10/Dockerfile @@ -0,0 +1,7 @@ +ARG SPLUNK_PRODUCT=splunk +FROM ${SPLUNK_PRODUCT}-debian-10:latest +USER root + +RUN apt update +RUN apt-get install -y --no-install-recommends python3 python3-pip python3-setuptools python3-requests python3-yaml +RUN pip3 --no-cache-dir install ansible \ No newline at end of file diff --git a/py23-image/debian-9/Dockerfile b/py23-image/debian-9/Dockerfile new file mode 100644 index 00000000..159d497f --- /dev/null +++ b/py23-image/debian-9/Dockerfile @@ -0,0 +1,6 @@ +ARG SPLUNK_PRODUCT=splunk +FROM ${SPLUNK_PRODUCT}-debian-9:latest +USER root + +RUN apt-get update +RUN apt-get install -y --no-install-recommends python3 python3-pip python3-requests \ No newline at end of file diff --git a/py23-image/redhat-8/Dockerfile b/py23-image/redhat-8/Dockerfile new file mode 100644 index 00000000..6d035fcd --- /dev/null +++ b/py23-image/redhat-8/Dockerfile @@ -0,0 +1,7 @@ +ARG SPLUNK_PRODUCT=splunk +FROM ${SPLUNK_PRODUCT}-redhat-8:latest +USER root + +RUN microdnf -y --nodocs install python3 +RUN alternatives --set python /usr/bin/python2 +RUN pip3 -q --no-cache-dir install requests ansible \ No newline at end of file diff --git a/splunk/common-files/Dockerfile b/splunk/common-files/Dockerfile index 9c70e0ac..667f1ab0 100644 --- a/splunk/common-files/Dockerfile +++ b/splunk/common-files/Dockerfile @@ -101,11 +101,15 @@ RUN sed -i -e 's/%sudo\s\+ALL=(ALL\(:ALL\)\?)\s\+ALL/%sudo ALL=NOPASSWD:ALL/g' / && groupadd -r ${ANSIBLE_GROUP} \ && useradd -r -m -g ${ANSIBLE_GROUP} ${ANSIBLE_USER} \ && usermod -aG sudo ${ANSIBLE_USER} \ + && usermod -aG ${ANSIBLE_GROUP} ${SPLUNK_USER} \ # Container Artifact Directory is a place for all artifacts and logs that are generated by the provisioning process. The directory is owned by the user "ansible". && mkdir ${CONTAINER_ARTIFACT_DIR} \ - && chown -R ${ANSIBLE_USER}:${ANSIBLE_GROUP} $CONTAINER_ARTIFACT_DIR \ + && chown -R ${ANSIBLE_USER}:${ANSIBLE_GROUP} ${CONTAINER_ARTIFACT_DIR} \ + && chmod -R 775 ${CONTAINER_ARTIFACT_DIR} \ && chmod -R 555 ${SPLUNK_ANSIBLE_HOME} \ - && chmod -R 777 ${CONTAINER_ARTIFACT_DIR} \ + && chgrp ${ANSIBLE_GROUP} ${SPLUNK_ANSIBLE_HOME} ${SPLUNK_ANSIBLE_HOME}/ansible.cfg \ + && chmod 775 ${SPLUNK_ANSIBLE_HOME} \ + && chmod 664 ${SPLUNK_ANSIBLE_HOME}/ansible.cfg \ && chmod 755 /sbin/entrypoint.sh /sbin/createdefaults.py /sbin/checkstate.sh USER ${ANSIBLE_USER} diff --git a/splunk/common-files/entrypoint.sh b/splunk/common-files/entrypoint.sh index b4ce7caa..238e8a70 100755 --- a/splunk/common-files/entrypoint.sh +++ b/splunk/common-files/entrypoint.sh @@ -35,6 +35,9 @@ trap teardown SIGINT SIGTERM prep_ansible() { cd ${SPLUNK_ANSIBLE_HOME} + if [ `whoami` == "${SPLUNK_USER}" ]; then + sed -i -e "s,^become\\s*=.*,become = false," ansible.cfg + fi if [[ "$DEBUG" == "true" ]]; then ansible-playbook --version python inventory/environ.py --write-to-file @@ -53,34 +56,37 @@ watch_for_failure(){ echo Ansible playbook complete, will begin streaming var/log/splunk/splunkd_stderr.log echo user_permission_change + if [ `whoami` != "${SPLUNK_USER}" ]; then + RUN_AS_SPLUNK="sudo -u ${SPLUNK_USER}" + fi # Any crashes/errors while Splunk is running should get logged to splunkd_stderr.log and sent to the container's stdout if [ -z "$SPLUNK_TAIL_FILE" ]; then - sudo -u ${SPLUNK_USER} tail -n 0 -f ${SPLUNK_HOME}/var/log/splunk/splunkd_stderr.log & + ${RUN_AS_SPLUNK} tail -n 0 -f ${SPLUNK_HOME}/var/log/splunk/splunkd_stderr.log & else - sudo -u ${SPLUNK_USER} tail -n 0 -f ${SPLUNK_TAIL_FILE} & + ${RUN_AS_SPLUNK} tail -n 0 -f ${SPLUNK_TAIL_FILE} & fi wait } create_defaults() { - createdefaults.py + createdefaults.py } start_and_exit() { - if [ -z "$SPLUNK_PASSWORD" ] - then - echo "WARNING: No password ENV var. Stack may fail to provision if splunk.password is not set in ENV or a default.yml" - fi + if [ -z "$SPLUNK_PASSWORD" ] + then + echo "WARNING: No password ENV var. Stack may fail to provision if splunk.password is not set in ENV or a default.yml" + fi sh -c "echo 'starting' > ${CONTAINER_ARTIFACT_DIR}/splunk-container.state" setup - prep_ansible + prep_ansible ansible-playbook $ANSIBLE_EXTRA_FLAGS -i inventory/environ.py site.yml } start() { - trap teardown EXIT + trap teardown EXIT start_and_exit - watch_for_failure + watch_for_failure } configure_multisite() { @@ -89,17 +95,17 @@ configure_multisite() { } restart(){ - trap teardown EXIT + trap teardown EXIT sh -c "echo 'restarting' > ${CONTAINER_ARTIFACT_DIR}/splunk-container.state" - prep_ansible - ${SPLUNK_HOME}/bin/splunk stop 2>/dev/null || true + prep_ansible + ${SPLUNK_HOME}/bin/splunk stop 2>/dev/null || true ansible-playbook -i inventory/environ.py start.yml watch_for_failure } user_permission_change(){ if [[ "$STEPDOWN_ANSIBLE_USER" == "true" ]]; then - bash -c "sudo deluser -q ansible sudo" + bash -c "sudo deluser -q ansible sudo" fi } @@ -140,7 +146,7 @@ Examples: * docker run -it -e SPLUNK_START_ARGS=--accept-license -e SPLUNK_INDEXER_URL=idx1,idx2 -e SPLUNK_SEARCH_HEAD_URL=sh1,sh2 -e SPLUNK_ROLE=splunk_search_head --hostname sh1 --network splunknet --network-alias sh1 -e SPLUNK_PASSWORD=helloworld -e SPLUNK_LICENSE_URI=http://example.com/splunk.lic splunk/splunk start EOF - exit 1 + exit 1 } case "$1" in @@ -157,12 +163,12 @@ case "$1" in configure_multisite $0 ;; create-defaults) - create_defaults - ;; + create_defaults + ;; restart) - shift - restart $@ - ;; + shift + restart $@ + ;; no-provision) user_permission_change tail -n 0 -f /etc/hosts & diff --git a/tests/requirements.txt b/tests/requirements.txt index f5b7f954..1c2d838b 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -4,4 +4,4 @@ docker PyYAML docker-compose pyasn1 -junit-xml +junit-xml \ No newline at end of file diff --git a/tests/test_docker_splunk.py b/tests/test_docker_splunk.py index 46068fa9..f9ddb5b2 100644 --- a/tests/test_docker_splunk.py +++ b/tests/test_docker_splunk.py @@ -8,6 +8,7 @@ import shlex import yaml import docker +from docker.types import Mount import urllib import requests import subprocess @@ -41,6 +42,8 @@ global platform platform = "debian-9" +OLD_SPLUNK_VERSION = "7.2.7" + def generate_random_string(): return ''.join(choice(ascii_lowercase) for b in range(20)) @@ -239,7 +242,7 @@ def extract_json(self, container_name): except Exception as e: self.logger.error(e) return None - + def search_internal_distinct_hosts(self, container_id, username="admin", password="password"): query = "search index=_internal earliest=-1m | stats dc(host) as distinct_hosts" splunkd_port = self.client.port(container_id, 8089)[0]["HostPort"] @@ -1134,6 +1137,94 @@ def test_adhoc_1so_web_ssl(self): except OSError: pass + def test_adhoc_1so_upgrade(self): + # Pull the old image + for line in self.client.pull("splunk/splunk:{}".format(OLD_SPLUNK_VERSION), stream=True, decode=True): + continue + # Create the "splunk-old" container + try: + cid = None + splunk_container_name = generate_random_string() + password = generate_random_string() + cid = self.client.create_container("splunk/splunk:{}".format(OLD_SPLUNK_VERSION), tty=True, ports=[8089, 8088], hostname="splunk", + name=splunk_container_name, environment={"DEBUG": "true", "SPLUNK_HEC_TOKEN": "qwerty", "SPLUNK_PASSWORD": password, "SPLUNK_START_ARGS": "--accept-license"}, + host_config=self.client.create_host_config(mounts=[Mount("/opt/splunk/etc", "opt-splunk-etc"), Mount("/opt/splunk/var", "opt-splunk-var")], + port_bindings={8089: ("0.0.0.0",), 8088: ("0.0.0.0",)}) + ) + cid = cid.get("Id") + self.client.start(cid) + # Poll for the container to be ready + assert self.wait_for_containers(1, name=splunk_container_name) + # Check splunkd + assert self.check_splunkd("admin", password) + # Add some data via HEC + splunk_hec_port = self.client.port(cid, 8088)[0]["HostPort"] + url = "https://localhost:{}/services/collector/event".format(splunk_hec_port) + kwargs = {"json": {"event": "world never says hello back"}, "verify": False, "headers": {"Authorization": "Splunk qwerty"}} + status, content = self.handle_request_retry("POST", url, kwargs) + assert status == 200 + # Remove the "splunk-old" container + self.client.remove_container(cid, v=False, force=True) + # Create the "splunk-new" container re-using volumes + splunk_container_name = generate_random_string() + cid = self.client.create_container(self.SPLUNK_IMAGE_NAME, tty=True, ports=[8089, 8000], hostname="splunk", + name=splunk_container_name, environment={"DEBUG": "true", "SPLUNK_HEC_TOKEN": "qwerty", "SPLUNK_PASSWORD": password, "SPLUNK_START_ARGS": "--accept-license"}, + host_config=self.client.create_host_config(mounts=[Mount("/opt/splunk/etc", "opt-splunk-etc"), Mount("/opt/splunk/var", "opt-splunk-var")], + port_bindings={8089: ("0.0.0.0",), 8000: ("0.0.0.0",)}) + ) + cid = cid.get("Id") + self.client.start(cid) + # Poll for the container to be ready + assert self.wait_for_containers(1, name=splunk_container_name) + # Check splunkd + assert self.check_splunkd("admin", password) + # Run a search - we should be getting 2 hosts because the hostnames were different in the two containers created above + query = "search index=main earliest=-3m" + splunkd_port = self.client.port(cid, 8089)[0]["HostPort"] + url = "https://localhost:{}/services/search/jobs?output_mode=json".format(splunkd_port) + kwargs = { + "auth": ("admin", password), + "data": "search={}".format(urllib.quote_plus(query)), + "verify": False + } + resp = requests.post(url, **kwargs) + assert resp.status_code == 201 + sid = json.loads(resp.content)["sid"] + assert sid + self.logger.info("Search job {} created against on {}".format(sid, cid)) + # Wait for search to finish + # TODO: implement polling mechanism here + job_status = None + for _ in range(10): + url = "https://localhost:{}/services/search/jobs/{}?output_mode=json".format(splunkd_port, sid) + kwargs = {"auth": ("admin", password), "verify": False} + job_status = requests.get(url, **kwargs) + done = json.loads(job_status.content)["entry"][0]["content"]["isDone"] + self.logger.info("Search job {} done status is {}".format(sid, done)) + if done: + break + time.sleep(3) + # Check searchProviders - use the latest job_status check from the polling + assert job_status.status_code == 200 + # Check search results + url = "https://localhost:{}/services/search/jobs/{}/results?output_mode=json".format(splunkd_port, sid) + kwargs = {"auth": ("admin", password), "verify": False} + resp = requests.get(url, **kwargs) + assert resp.status_code == 200 + results = json.loads(resp.content)["results"] + assert len(results) == 1 + assert results[0]["_raw"] == "world never says hello back" + except Exception as e: + self.logger.error(e) + raise e + finally: + if cid: + self.client.remove_container(cid, v=True, force=True) + try: + os.remove(os.path.join(FIXTURES_DIR, "default.yml")) + except OSError: + pass + def test_compose_1so_trial(self): # Standup deployment self.compose_file_name = "1so_trial.yaml" @@ -1616,7 +1707,7 @@ def test_compose_1so_hec(self): url = "https://localhost:{}/services/collector/event".format(splunk_hec_port) kwargs = {"json": {"event": "hello world"}, "verify": False, "headers": {"Authorization": "Splunk abcd1234"}} status, content = self.handle_request_retry("POST", url, kwargs) - assert status == 200 + assert status == 200 def test_compose_1uf_hec(self): # Standup deployment diff --git a/uf/common-files/Dockerfile b/uf/common-files/Dockerfile index 9ea27573..5bbab1f5 100644 --- a/uf/common-files/Dockerfile +++ b/uf/common-files/Dockerfile @@ -90,10 +90,15 @@ RUN \ && groupadd -r ${ANSIBLE_GROUP} \ && useradd -r -m -g ${ANSIBLE_GROUP} ${ANSIBLE_USER} \ && usermod -aG sudo ${ANSIBLE_USER} \ + && usermod -aG ${ANSIBLE_GROUP} ${SPLUNK_USER} \ # Container Artifact Directory is a place for all artifacts and logs that are generated by the provisioning process. The directory is owned by the user "ansible". && mkdir ${CONTAINER_ARTIFACT_DIR} \ - && chown -R ${ANSIBLE_USER}:${ANSIBLE_GROUP} $CONTAINER_ARTIFACT_DIR \ + && chown -R ${ANSIBLE_USER}:${ANSIBLE_GROUP} ${CONTAINER_ARTIFACT_DIR} \ + && chmod -R 775 ${CONTAINER_ARTIFACT_DIR} \ && chmod -R 555 ${SPLUNK_ANSIBLE_HOME} \ + && chgrp ${ANSIBLE_GROUP} ${SPLUNK_ANSIBLE_HOME} ${SPLUNK_ANSIBLE_HOME}/ansible.cfg \ + && chmod 775 ${SPLUNK_ANSIBLE_HOME} \ + && chmod 664 ${SPLUNK_ANSIBLE_HOME}/ansible.cfg \ && chmod 755 /sbin/entrypoint.sh /sbin/createdefaults.py /sbin/checkstate.sh USER ${ANSIBLE_USER} diff --git a/uf/common-files/entrypoint.sh b/uf/common-files/entrypoint.sh index 33f581fc..4bbf8ebb 100755 --- a/uf/common-files/entrypoint.sh +++ b/uf/common-files/entrypoint.sh @@ -20,8 +20,8 @@ setup() { # Check if the user accepted the license if [[ "$SPLUNK_START_ARGS" != *"--accept-license"* ]]; then printf "License not accepted, please ensure the environment variable SPLUNK_START_ARGS contains the '--accept-license' flag\n" - printf "For example: docker run -e SPLUNK_START_ARGS=--accept-license splunk/splunk\n\n" - printf "For additional information and examples, see the help: docker run -it splunk/splunk help\n" + printf "For example: docker run -e SPLUNK_START_ARGS=--accept-license splunk/universalforwarder\n\n" + printf "For additional information and examples, see the help: docker run -it splunk/universalforwarder help\n" exit 1 fi } @@ -35,6 +35,9 @@ trap teardown SIGINT SIGTERM prep_ansible() { cd ${SPLUNK_ANSIBLE_HOME} + if [ `whoami` == "${SPLUNK_USER}" ]; then + sed -i -e "s,^become\\s*=.*,become = false," ansible.cfg + fi if [[ "$DEBUG" == "true" ]]; then ansible-playbook --version python inventory/environ.py --write-to-file @@ -52,42 +55,44 @@ watch_for_failure(){ echo Ansible playbook complete, will begin streaming var/log/splunk/splunkd_stderr.log echo user_permission_change - # Any crashes/errors while Splunk is running should get logged to splunkd_stderr.log and sent to the container's stdout + if [ `whoami` != "${SPLUNK_USER}" ]; then + RUN_AS_SPLUNK="sudo -u ${SPLUNK_USER}" + fi # Any crashes/errors while Splunk is running should get logged to splunkd_stderr.log and sent to the container's stdout if [ -z "$SPLUNK_TAIL_FILE" ]; then - sudo -u ${SPLUNK_USER} tail -n 0 -f ${SPLUNK_HOME}/var/log/splunk/splunkd_stderr.log & + ${RUN_AS_SPLUNK} tail -n 0 -f ${SPLUNK_HOME}/var/log/splunk/splunkd_stderr.log & else - sudo -u ${SPLUNK_USER} tail -n 0 -f ${SPLUNK_TAIL_FILE} & + ${RUN_AS_SPLUNK} tail -n 0 -f ${SPLUNK_TAIL_FILE} & fi wait } create_defaults() { - createdefaults.py + createdefaults.py } start_and_exit() { - if [ -z "$SPLUNK_PASSWORD" ] - then - echo "WARNING: No password ENV var. Stack may fail to provision if splunk.password is not set in ENV or a default.yml" - fi + if [ -z "$SPLUNK_PASSWORD" ] + then + echo "WARNING: No password ENV var. Stack may fail to provision if splunk.password is not set in ENV or a default.yml" + fi sh -c "echo 'starting' > ${CONTAINER_ARTIFACT_DIR}/splunk-container.state" setup - prep_ansible + prep_ansible ansible-playbook $ANSIBLE_EXTRA_FLAGS -i inventory/environ.py site.yml } start() { - trap teardown EXIT + trap teardown EXIT start_and_exit - watch_for_failure + watch_for_failure } restart(){ trap teardown EXIT sh -c "echo 'restarting' > ${CONTAINER_ARTIFACT_DIR}/splunk-container.state" - prep_ansible - ${SPLUNK_HOME}/bin/splunk stop 2>/dev/null || true + prep_ansible + ${SPLUNK_HOME}/bin/splunk stop 2>/dev/null || true ansible-playbook -i inventory/environ.py start.yml watch_for_failure } @@ -123,7 +128,7 @@ Environment Variables: EOF - exit 1 + exit 1 } case "$1" in @@ -136,12 +141,12 @@ case "$1" in start_and_exit $@ ;; create-defaults) - create_defaults - ;; + create_defaults + ;; restart) - shift - restart $@ - ;; + shift + restart $@ + ;; no-provision) user_permission_change tail -n 0 -f /etc/hosts &