From 3ba27395eabaf219905ffddc999e32481f968575 Mon Sep 17 00:00:00 2001 From: Dana Maxfield Date: Mon, 15 Jan 2018 14:27:06 -0500 Subject: [PATCH 1/2] Automatic: Updates from the build for release 4.4.0 --- README.containers.md | 40 + README.md | 11 +- docker-compose/README.md | 72 +- .../bin/hub_add_replication_user.sh | 2 +- docker-compose/bin/hub_create_data_dump.sh | 4 +- docker-compose/bin/hub_db_migrate.sh | 4 +- .../bin/hub_replication_changepassword.sh | 2 +- .../bin/hub_reportdb_changepassword.sh | 2 +- docker-compose/bin/system_check.sh | 2 +- docker-compose/docker-compose.dbmigrate.yml | 15 +- docker-compose/docker-compose.externaldb.yml | 67 +- docker-compose/docker-compose.yml | 74 +- docker-compose/hub-proxy.env | 2 +- docker-run/README.md | 170 --- docker-run/bin/hub_create_data_dump.sh | 78 -- docker-run/bin/system_check.sh | 1212 ----------------- docker-run/docker-hub.sh | 298 ---- docker-run/external-postgres-init.pgsql | 58 - docker-run/hub-postgres.env | 5 - docker-run/hub-proxy.env | 31 - docker-run/hub-webserver.env | 15 - docker-swarm/README.md | 97 +- docker-swarm/bin/hub_add_replication_user.sh | 2 +- docker-swarm/bin/hub_create_data_dump.sh | 4 +- docker-swarm/bin/hub_db_migrate.sh | 4 +- .../bin/hub_replication_changepassword.sh | 2 +- .../bin/hub_reportdb_changepassword.sh | 2 +- docker-swarm/bin/system_check.sh | 2 +- docker-swarm/docker-compose.dbmigrate.yml | 15 +- docker-swarm/docker-compose.externaldb.yml | 73 +- docker-swarm/docker-compose.yml | 80 +- docker-swarm/hub-proxy.env | 2 +- docs/en_US/hub_install_compose.pdf | Bin 0 -> 866772 bytes docs/en_US/hub_install_kubernetes.pdf | Bin 0 -> 845476 bytes docs/en_US/hub_install_openshift.pdf | Bin 0 -> 824928 bytes docs/en_US/hub_install_swarm.pdf | Bin 0 -> 898723 bytes docs/{ => ja_JA}/hub_install_compose.pdf | Bin docs/{ => ja_JA}/hub_install_kubernetes.pdf | Bin docs/{ => ja_JA}/hub_install_openshift.pdf | Bin docs/{ => ja_JA}/hub_install_swarm.pdf | Bin kubernetes/README.md | 84 +- .../bin/hub_add_replication_user.sh | 2 +- .../bin/hub_db_migrate.sh | 4 +- .../bin/hub_replication_changepassword.sh | 2 +- .../bin/hub_reportdb_changepassword.sh | 2 +- kubernetes/kubernetes-external-rds.yml | 102 +- kubernetes/kubernetes-post-db.yml | 97 +- kubernetes/kubernetes-pre-db.yml | 6 +- openshift/README.md | 103 +- openshift/bin/hub_add_replication_user.sh | 55 + openshift/bin/hub_db_migrate.sh | 86 ++ .../bin/hub_replication_changepassword.sh | 55 + openshift/bin/hub_reportdb_changepassword.sh | 55 + openshift/db.yml | 60 + openshift/openshift.yml | 216 ++- 55 files changed, 1216 insertions(+), 2160 deletions(-) delete mode 100644 docker-run/README.md delete mode 100755 docker-run/bin/hub_create_data_dump.sh delete mode 100755 docker-run/bin/system_check.sh delete mode 100755 docker-run/docker-hub.sh delete mode 100644 docker-run/external-postgres-init.pgsql delete mode 100644 docker-run/hub-postgres.env delete mode 100644 docker-run/hub-proxy.env delete mode 100644 docker-run/hub-webserver.env create mode 100644 docs/en_US/hub_install_compose.pdf create mode 100644 docs/en_US/hub_install_kubernetes.pdf create mode 100644 docs/en_US/hub_install_openshift.pdf create mode 100644 docs/en_US/hub_install_swarm.pdf rename docs/{ => ja_JA}/hub_install_compose.pdf (100%) rename docs/{ => ja_JA}/hub_install_kubernetes.pdf (100%) rename docs/{ => ja_JA}/hub_install_openshift.pdf (100%) rename docs/{ => ja_JA}/hub_install_swarm.pdf (100%) rename {docker-run => kubernetes}/bin/hub_add_replication_user.sh (98%) rename {docker-run => kubernetes}/bin/hub_db_migrate.sh (96%) rename {docker-run => kubernetes}/bin/hub_replication_changepassword.sh (98%) rename {docker-run => kubernetes}/bin/hub_reportdb_changepassword.sh (98%) create mode 100755 openshift/bin/hub_add_replication_user.sh create mode 100755 openshift/bin/hub_db_migrate.sh create mode 100755 openshift/bin/hub_replication_changepassword.sh create mode 100755 openshift/bin/hub_reportdb_changepassword.sh create mode 100644 openshift/db.yml diff --git a/README.containers.md b/README.containers.md index 34ae1d9..48e9252 100644 --- a/README.containers.md +++ b/README.containers.md @@ -42,6 +42,44 @@ There are times when running in other types of orchestrations that it is useful * Container Memory: 4GB * Container CPU: 1cpu +# Scan Container (hub-scan) + +### Container Description + +The Hub scan service is the container that all scan data requests are made against. + +### Scalability + +This container can be scaled. + +### Links/Ports + +This container will need to connect to these other containers/services: + +* postgres +* zookeeper +* registration +* logstash +* cfssl + +This container will need to expose port 8080 to other containers that will link to it. + +### Alternate Host Name Environment Variables + +There are times when running in other types of orchestrations that it is useful to have host names set for these containers that are not the default that Docker Compose or Docker Swarm use. These environment variables can be set to override the default host names: + +* postgres - $HUB_POSTGRES_HOST +* zookeeper - $HUB_ZOOKEEPER_HOST +* registration - $HUB_REGISTRATION_HOST +* logstash - $HUB_LOGSTASH_HOST +* cfssl - $HUB_CFSSL_HOST + +### Constraints + +* Default Max Java Heap Size: 4GB +* Container Memory: 4GB +* Container CPU: 1cpu + ## Job Runner App Container (hub-jobrunner) ### Container Description @@ -233,6 +271,7 @@ This container will need to connect to these other containers/services: * cfssl * webapp * documentation +* scan This container should expose port 443 outside of the docker network. @@ -243,6 +282,7 @@ There are times when running in other types of orchestrations that any individua - You may have an external cfssl endpoint. * webapp - $HUB_WEBAPP_HOST +* scan - $HUB_SCAN_HOST * cfssl - $HUB_CFSSL_HOST * documentation - $HUB_DOC_HOST diff --git a/README.md b/README.md index 73548bc..0661c71 100644 --- a/README.md +++ b/README.md @@ -2,9 +2,9 @@ This repository will contain orchestration files and documentation for using the individual Hub Docker containers. -## Location of hub 4.3.1 archive: +## Location of hub 4.2.0 archive: -https://github.com/blackducksoftware/hub/archive/v4.3.1.tar.gz +https://github.com/blackducksoftware/hub/archive/v4.2.0.tar.gz ## Important Upgrade Announcement @@ -26,12 +26,13 @@ https://github.com/blackducksoftware/hub/releases * https://hub.docker.com/r/blackducksoftware/hub-postgres/ * https://hub.docker.com/r/blackducksoftware/hub-zookeeper/ * https://hub.docker.com/r/blackducksoftware/hub-jobrunner/ +* https://hub.docker.com/r/blackducksoftware/hub-scan/ * https://hub.docker.com/r/blackducksoftware/hub-nginx/ * https://hub.docker.com/r/blackducksoftware/hub-documentation/ # Running Hub in Docker -Swarm (mode), Compose, 'docker run', Kubernetes, and OpenShift are supported are supported in Hub 4.3.1. Instructions for running each can be found in the archive bundle: +Swarm (mode), Compose, 'docker run', Kubernetes, and OpenShift are supported are supported in Hub 4.2.0. Instructions for running each can be found in the archive bundle: * docker-run - Instructions and files for running Hub with 'docker run' * docker-swarm - Instructions and files for running Hub with 'docker swarm mode' @@ -55,14 +56,14 @@ Hub has been tested with: This is the minimum hardware that is needed to run a single instance of each container. The sections below document the individual requirements for each container if they will be running on different machines or if more than one instance of a container will be run (right now only Job Runners support this). -* 4 CPUs +* 5 CPUs * 16 GB RAM ### Hardware Requirements (for Docker Compose, Kubernetes, and OpenShift) This is the minimum hardware that is needed to run a single instance of each container. The sections below document the individual requirements for each container if they will be running on different machines or if more than one instance of a container will be run (right now only Job Runners support this). -* 4 CPUs +* 5 CPUs * 20 GB RAM Also note that these requirements are for Hub and do not include other resources that are required to run the cluster overall. diff --git a/docker-compose/README.md b/docker-compose/README.md index 52a393c..7df9dc5 100644 --- a/docker-compose/README.md +++ b/docker-compose/README.md @@ -141,39 +141,65 @@ memory of the container. This is the setting: mem_limit. The setting for mem_lim heap size. If updating the Java heap size we recommend setting the mem_limit to at least 1GB higher than the max heap size. -This example will change the max java heap size for the webapp container to 8GB and the mem_limit to -9GB. In the 'docker-compose.yml' or 'docker-compose.externaldb.yml' that you are using, edit these lines +This example will change the max java heap size for the webapp container to 4GB and the mem_limit to +5GB. In the 'docker-compose.yml' or 'docker-compose.externaldb.yml' that you are using, edit these lines under the 'webapp' service description: Original: ``` - environment: {HUB_MAX_MEMORY: 4096m} + environment: {HUB_MAX_MEMORY: 2048m} restart: always - mem_limit: 4608M + mem_limit: 2560M ``` Updated: ``` - environment: {HUB_MAX_MEMORY: 8192m} + environment: {HUB_MAX_MEMORY: 4096m} restart: always - mem_limit: 9216M + mem_limit: 5120M ``` +### Changing the default Scan Service Memory Limits + +There are two main memory settings to consider for this container - Maximum Java heap size and the Docker memory limit. +The Docker memory limit must be higher than the maximum Java heap size. If updating the maximum Java heap size, it is +recommended to set the Docker memory limit to be at least 1GB higher than the maximum Java heap size. + +Note that this will apply to all Scan Services if the Scan Service container is scaled. + +The following configuration example will update the maximum Java heap size (HUB_MAX_MEMORY) from 2GB to 4GB. Note how +the Docker memory limit configuration value (mem_limit) is increased as well. These configuration values can be changed +in the 'docker-compose.yml' or 'docker-compose.externaldb.yml' files under the 'scan' service section: + + Original: + + ``` + environment: {HUB_MAX_MEMORY: 2048m} + restart: always + mem_limit: 2560M + ``` + + Updated: + + ``` + environment: {HUB_MAX_MEMORY: 4096m} + restart: always + mem_limit: 5120M + ``` + ### Changing the default Job Runner Memory Limits -There are two memory settings for this container. The first is the max java heap size. This is controlled by setting the -environment variable: HUB_MAX_MEMORY. The second is the limit that docker will use to schedule the limit the overall -memory of the container. This is the setting: mem_limit. The setting for mem_limit must be higher than the max Java -heap size. If updating the Java heap size we recommend setting the mem_limit to at least 1GB higher than the max heap -size. +There are two main memory settings to consider for this container - Maximum Java heap size and the Docker memory limit. +The Docker memory limit must be higher than the maximum Java heap size. If updating the maximum Java heap size, it is +recommended to set the Docker memory limit to be at least 1GB higher than the maximum Java heap size. Note that this will apply to all Job Runners if the Job Runner container is scaled. -This example will change the max java heap size for the job runner container to 8GB and the mem_limit to -9GB. In the 'docker-compose.yml' or 'docker-compose.externaldb.yml' that you are using, edit these lines -under the 'jobrunner' service description: +The following configuration example will update the maximum Java heap size (HUB_MAX_MEMORY) from 4GB to 8GB. Note how +the Docker memory limit configuration value (mem_limit) is increased as well. These configuration values can be changed +in the 'docker-compose.yml' or 'docker-compose.externaldb.yml' files under the 'jobrunner' service section: Original: @@ -218,10 +244,11 @@ If the container port is modified, any healthcheck URL references should also be ### Proxy Settings -There are currently three containers that need access to services hosted by Black Duck Software: +There are currently some containers that need access to services hosted by Black Duck Software: -* registration * jobrunner +* registration +* scan * webapp If a proxy is required for external internet access you'll need to configure it. @@ -240,13 +267,14 @@ There are two methods for specifying a proxy password when using Docker Compose. There are the services that will require the proxy password: -* webapp -* registration * jobrunner +* registration +* scan +* webapp ### External PostgreSQL Settings -The external PostgreSQL instance needs to initialized by creating users, databases, etc., and connection information must be provided to the _webapp_ and _jobrunner_ containers. +The external PostgreSQL instance needs to initialized by creating users, databases, etc., and connection information must be provided to the _hub-webapp_, _hub-scan_, and _hub-jobrunner_ containers. #### Steps @@ -259,7 +287,7 @@ The external PostgreSQL instance needs to initialized by creating users, databas 4. Edit _hub-postgres.env_ to specify database connection parameters. 5. Create a file named 'HUB_POSTGRES_USER_PASSWORD_FILE' with the password for the *blackduck_user* user. 6. Create a file named 'HUB_POSTGRES_ADMIN_PASSWORD_FILE' with the password for the *blackduck* user. -7. Mount the directory containing 'HUB_POSTGRES_USER_PASSWORD_FILE' and 'HUB_POSTGRES_ADMIN_PASSWORD_FILE' to /run/secrets in both the _hub-webapp_ and _hub-jobrunner_ containers. +7. Mount the directory containing 'HUB_POSTGRES_USER_PASSWORD_FILE' and 'HUB_POSTGRES_ADMIN_PASSWORD_FILE' to /run/secrets in both the _hub-webapp_, _hub-scan_, and _hub-jobrunner_ containers. #### Secure LDAP Trust Store Password @@ -333,7 +361,9 @@ This should also work for external connections to the database. # Scaling Hub -The Job Runner in the only container that is scalable. Job Runners can be scaled using: +The Job Runner and Scan Service containers support scaling. + +As an example, the Job Runner container can be scaled using: ``` docker-compose -p hub scale jobrunner=2 diff --git a/docker-compose/bin/hub_add_replication_user.sh b/docker-compose/bin/hub_add_replication_user.sh index d12198d..18c3956 100755 --- a/docker-compose/bin/hub_add_replication_user.sh +++ b/docker-compose/bin/hub_add_replication_user.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.3.1} +HUB_VERSION=${HUB_VERSION:-4.4.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-compose/bin/hub_create_data_dump.sh b/docker-compose/bin/hub_create_data_dump.sh index 2db88a9..3b68793 100755 --- a/docker-compose/bin/hub_create_data_dump.sh +++ b/docker-compose/bin/hub_create_data_dump.sh @@ -7,7 +7,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.3.1} +HUB_VERSION=${HUB_VERSION:-4.4.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { @@ -64,7 +64,7 @@ done # Here we go... echo Creating a dump from the container "${container_id}" '...' -docker exec -it ${container_id} pg_dump -U blackduck -Fc -f /tmp/bds_hub.dump bds_hub +docker exec -i ${container_id} pg_dump -U blackduck -Fc -f /tmp/bds_hub.dump bds_hub exitCode=$? [ ${exitCode} -ne 0 ] && fail "Cannot create the dump file from the container [Container Id: ${container_id}]" 8 diff --git a/docker-compose/bin/hub_db_migrate.sh b/docker-compose/bin/hub_db_migrate.sh index 8b30854..398e5cc 100755 --- a/docker-compose/bin/hub_db_migrate.sh +++ b/docker-compose/bin/hub_db_migrate.sh @@ -13,7 +13,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.3.1} +HUB_VERSION=${HUB_VERSION:-4.4.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { @@ -38,7 +38,7 @@ dump_file="$1" [ ! -r "${dump_file}" ] && fail "${dump_file} is not readable" 2 # Check that docker is on our path -[ "$(type -p docker)" == "" ] && fail docker not found on the search path 3 +[ "$(type -p docker)" == "" ] && fail "docker not found on the search path" 3 # Check that we can contact the docker daemon docker ps > /dev/null diff --git a/docker-compose/bin/hub_replication_changepassword.sh b/docker-compose/bin/hub_replication_changepassword.sh index a07d78c..5dfd17e 100755 --- a/docker-compose/bin/hub_replication_changepassword.sh +++ b/docker-compose/bin/hub_replication_changepassword.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.3.1} +HUB_VERSION=${HUB_VERSION:-4.4.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-compose/bin/hub_reportdb_changepassword.sh b/docker-compose/bin/hub_reportdb_changepassword.sh index d0259bd..2a80ce9 100755 --- a/docker-compose/bin/hub_reportdb_changepassword.sh +++ b/docker-compose/bin/hub_reportdb_changepassword.sh @@ -3,7 +3,7 @@ set -e TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.3.1} +HUB_VERSION=${HUB_VERSION:-4.4.0} HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} function fail() { diff --git a/docker-compose/bin/system_check.sh b/docker-compose/bin/system_check.sh index 50d2319..95ce289 100755 --- a/docker-compose/bin/system_check.sh +++ b/docker-compose/bin/system_check.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -HUB_VERSION=${HUB_VERSION:-4.3.1} +HUB_VERSION=${HUB_VERSION:-4.4.0} TIMESTAMP=`date` YEAR=`echo $TIMESTAMP | awk -F' ' '{print $6}'` MONTH=`echo $TIMESTAMP | awk -F' ' '{print $2}'` diff --git a/docker-compose/docker-compose.dbmigrate.yml b/docker-compose/docker-compose.dbmigrate.yml index 1ece5c3..86dbe05 100644 --- a/docker-compose/docker-compose.dbmigrate.yml +++ b/docker-compose/docker-compose.dbmigrate.yml @@ -1,9 +1,11 @@ version: '2.1' services: cfssl: - image: blackducksoftware/hub-cfssl:4.3.1 + image: blackducksoftware/hub-cfssl:4.4.0 + read_only: true volumes: - cert-volume:/etc/cfssl + - /tmp healthcheck: test: [ "CMD", "/usr/local/bin/docker-healthcheck.sh", "http://localhost:8888/api/v1/cfssl/scaninfo" ] interval: 30s @@ -12,9 +14,12 @@ services: user: 'cfssl:root' logstash: - image: blackducksoftware/hub-logstash:4.3.1 + image: blackducksoftware/hub-logstash:4.4.0 + read_only: true volumes: - log-volume:/var/lib/logstash/data + - /tmp + - /usr/share/logstash healthcheck: test: [ "CMD", "/usr/local/bin/docker-healthcheck.sh", "http://localhost:9600/" ] interval: 30s @@ -23,12 +28,16 @@ services: user: 'logstash:root' postgres: - image: blackducksoftware/hub-postgres:4.3.1 + image: blackducksoftware/hub-postgres:4.4.0 + read_only: true links: - cfssl - logstash volumes: - postgres96-data-volume:/var/lib/postgresql/data + - /opt/blackduck + - /tmp + - /var/run healthcheck: test: [ "CMD", "/usr/local/bin/docker-healthcheck.sh" ] interval: 30s diff --git a/docker-compose/docker-compose.externaldb.yml b/docker-compose/docker-compose.externaldb.yml index 2ee7ed2..f33d9d6 100644 --- a/docker-compose/docker-compose.externaldb.yml +++ b/docker-compose/docker-compose.externaldb.yml @@ -1,8 +1,9 @@ version: '2.1' services: cfssl: - image: blackducksoftware/hub-cfssl:4.3.1 - volumes: ['cert-volume:/etc/cfssl'] + image: blackducksoftware/hub-cfssl:4.4.0 + read_only: true + volumes: ['cert-volume:/etc/cfssl', /tmp] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:8888/api/v1/cfssl/scaninfo'] interval: 30s @@ -12,8 +13,9 @@ services: restart: always mem_limit: 512M logstash: - image: blackducksoftware/hub-logstash:4.3.1 - volumes: ['log-volume:/var/lib/logstash/data'] + image: blackducksoftware/hub-logstash:4.4.0 + read_only: true + volumes: ['log-volume:/var/lib/logstash/data', /tmp, /usr/share/logstash] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:9600/'] interval: 30s @@ -23,9 +25,11 @@ services: restart: always mem_limit: 640M registration: - image: blackducksoftware/hub-registration:4.3.1 + image: blackducksoftware/hub-registration:4.4.0 + read_only: true links: [logstash] - volumes: ['config-volume:/opt/blackduck/hub/registration/config'] + volumes: ['config-volume:/opt/blackduck/hub/registration/config', /tmp, /opt/blackduck/hub/registration/logs, + /opt/blackduck/hub/tomcat/logs, /opt/blackduck/hub/filebeat] env_file: hub-proxy.env healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:8080/registration/health-checks/liveness'] @@ -36,7 +40,8 @@ services: restart: always mem_limit: 640M zookeeper: - image: blackducksoftware/hub-zookeeper:4.3.1 + image: blackducksoftware/hub-zookeeper:4.4.0 + read_only: true links: [logstash] healthcheck: test: [CMD, zkServer.sh, status, /opt/blackduck/zookeeper/conf/zoo.cfg] @@ -47,9 +52,11 @@ services: restart: always mem_limit: 384M solr: - image: blackducksoftware/hub-solr:4.3.1 + image: blackducksoftware/hub-solr:4.4.0 + read_only: true links: [logstash, zookeeper] - volumes: ['solr6-volume:/opt/blackduck/hub/solr/cores.data'] + volumes: ['solr6-volume:/opt/blackduck/hub/solr/cores.data', /opt/solr, /opt/blackduck/, + /tmp] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:8983/solr/project/admin/ping?wt=json'] interval: 30s @@ -59,9 +66,11 @@ services: restart: always mem_limit: 640M webapp: - image: blackducksoftware/hub-webapp:4.3.1 + read_only: true + image: blackducksoftware/hub-webapp:4.4.0 links: [cfssl, logstash, registration, zookeeper, solr] - volumes: ['log-volume:/opt/blackduck/hub/logs', 'webapp-volume:/opt/blackduck/hub/hub-webapp/security'] + volumes: ['log-volume:/opt/blackduck/hub/logs', 'webapp-volume:/opt/blackduck/hub/hub-webapp/security', + /tmp, /var/run, /opt/blackduck] env_file: [hub-proxy.env, hub-postgres.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://127.0.0.1:8080/api/health-checks/liveness'] @@ -69,11 +78,29 @@ services: timeout: 60s retries: 15 user: tomcat:root - environment: {HUB_MAX_MEMORY: 4096m} + environment: {HUB_MAX_MEMORY: 2048m} restart: always - mem_limit: 4608M + mem_limit: 2560M + scan: + image: blackducksoftware/hub-scan:4.4.0 + read_only: true + links: [cfssl, logstash, registration, zookeeper] + volumes: ['log-volume:/opt/blackduck/hub/logs', 'scan-volume:/opt/blackduck/hub/hub-scan/security', + /tmp, /opt/blackduck/hub/tomcat, /opt/blackduck/hub/hub-scan/logs, /opt/blackduck/hub/filebeat] + env_file: [hub-proxy.env, hub-postgres.env] + healthcheck: + test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://127.0.0.1:8080/api/health-checks/liveness'] + interval: 30s + timeout: 60s + retries: 15 + user: tomcat:root + environment: {HUB_MAX_MEMORY: 2048m} + restart: always + mem_limit: 2560M jobrunner: - image: blackducksoftware/hub-jobrunner:4.3.1 + image: blackducksoftware/hub-jobrunner:4.4.0 + read_only: true + volumes: [/opt/blackduck/hub, /var/lib/blckdck/hub, /tmp] links: [cfssl, logstash, registration, zookeeper, solr] env_file: [hub-proxy.env, hub-postgres.env] healthcheck: @@ -86,11 +113,13 @@ services: restart: always mem_limit: 4608M webserver: - image: blackducksoftware/hub-nginx:4.3.1 + image: blackducksoftware/hub-nginx:4.4.0 + read_only: true ports: ['443:8443'] env_file: hub-webserver.env links: [webapp, cfssl, documentation] - volumes: ['webserver-volume:/opt/blackduck/hub/webserver/security'] + volumes: ['webserver-volume:/opt/blackduck/hub/webserver/security', /etc/nginx, + /opt/blackduck, /var, /tmp] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/health-checks/liveness', /opt/blackduck/hub/webserver/security/root.crt] @@ -101,7 +130,9 @@ services: restart: always mem_limit: 640M documentation: - image: blackducksoftware/hub-documentation:4.3.1 + image: blackducksoftware/hub-documentation:4.4.0 + read_only: true + volumes: [/tmp, /opt/blackduck/hub] links: [logstash] user: tomcat:root healthcheck: @@ -112,4 +143,4 @@ services: restart: always mem_limit: 512M volumes: {cert-volume: null, config-volume: null, log-volume: null, webserver-volume: null, - webapp-volume: null, solr6-volume: null} + webapp-volume: null, scan-volume: null, solr6-volume: null, monitor-log-volume: null} diff --git a/docker-compose/docker-compose.yml b/docker-compose/docker-compose.yml index cd7fdb3..8965a3c 100644 --- a/docker-compose/docker-compose.yml +++ b/docker-compose/docker-compose.yml @@ -1,10 +1,12 @@ version: '2.1' services: postgres: - image: blackducksoftware/hub-postgres:4.3.1 + read_only: true + image: blackducksoftware/hub-postgres:4.4.0 ports: ['55436:5432'] links: [cfssl, logstash] - volumes: ['postgres96-data-volume:/var/lib/postgresql/data'] + volumes: ['postgres96-data-volume:/var/lib/postgresql/data', /opt/blackduck, /tmp, + /var/run] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh] interval: 30s @@ -16,21 +18,41 @@ services: webapp: links: [postgres, cfssl, logstash, registration, zookeeper, solr] user: tomcat:root - image: blackducksoftware/hub-webapp:4.3.1 - volumes: ['log-volume:/opt/blackduck/hub/logs', 'webapp-volume:/opt/blackduck/hub/hub-webapp/security'] + read_only: true + image: blackducksoftware/hub-webapp:4.4.0 + volumes: ['log-volume:/opt/blackduck/hub/logs', 'webapp-volume:/opt/blackduck/hub/hub-webapp/security', + /tmp, /var/run, /opt/blackduck] env_file: [hub-proxy.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://127.0.0.1:8080/api/health-checks/liveness'] interval: 30s timeout: 60s retries: 15 - environment: {HUB_MAX_MEMORY: 4096m} + environment: {HUB_MAX_MEMORY: 2048m} restart: always - mem_limit: 4608M + mem_limit: 2560M + scan: + links: [postgres, cfssl, logstash, registration, zookeeper] + user: tomcat:root + image: blackducksoftware/hub-scan:4.4.0 + read_only: true + volumes: ['log-volume:/opt/blackduck/hub/logs', 'scan-volume:/opt/blackduck/hub/hub-scan/security', + /tmp, /opt/blackduck/hub/tomcat, /opt/blackduck/hub/hub-scan/logs, /opt/blackduck/hub/filebeat] + env_file: [hub-proxy.env] + healthcheck: + test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://127.0.0.1:8080/api/health-checks/liveness'] + interval: 30s + timeout: 60s + retries: 15 + environment: {HUB_MAX_MEMORY: 2048m} + restart: always + mem_limit: 2560M jobrunner: links: [postgres, cfssl, logstash, registration, zookeeper, solr] user: jobrunner:root - image: blackducksoftware/hub-jobrunner:4.3.1 + image: blackducksoftware/hub-jobrunner:4.4.0 + read_only: true + volumes: [/opt/blackduck/hub, /var/lib/blckdck/hub, /tmp] env_file: [hub-proxy.env] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh] @@ -41,8 +63,9 @@ services: restart: always mem_limit: 4608M cfssl: - image: blackducksoftware/hub-cfssl:4.3.1 - volumes: ['cert-volume:/etc/cfssl'] + image: blackducksoftware/hub-cfssl:4.4.0 + read_only: true + volumes: ['cert-volume:/etc/cfssl', /tmp] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:8888/api/v1/cfssl/scaninfo'] interval: 30s @@ -52,8 +75,9 @@ services: restart: always mem_limit: 512M logstash: - image: blackducksoftware/hub-logstash:4.3.1 - volumes: ['log-volume:/var/lib/logstash/data'] + image: blackducksoftware/hub-logstash:4.4.0 + read_only: true + volumes: ['log-volume:/var/lib/logstash/data', /tmp, /usr/share/logstash] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:9600/'] interval: 30s @@ -63,9 +87,11 @@ services: restart: always mem_limit: 640M registration: - image: blackducksoftware/hub-registration:4.3.1 + image: blackducksoftware/hub-registration:4.4.0 + read_only: true links: [logstash] - volumes: ['config-volume:/opt/blackduck/hub/registration/config'] + volumes: ['config-volume:/opt/blackduck/hub/registration/config', /tmp, /opt/blackduck/hub/registration/logs, + /opt/blackduck/hub/tomcat/logs, /opt/blackduck/hub/filebeat] env_file: hub-proxy.env healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:8080/registration/health-checks/liveness'] @@ -76,7 +102,8 @@ services: restart: always mem_limit: 640M zookeeper: - image: blackducksoftware/hub-zookeeper:4.3.1 + image: blackducksoftware/hub-zookeeper:4.4.0 + read_only: true links: [logstash] healthcheck: test: [CMD, zkServer.sh, status, /opt/blackduck/zookeeper/conf/zoo.cfg] @@ -87,9 +114,11 @@ services: restart: always mem_limit: 384M solr: - image: blackducksoftware/hub-solr:4.3.1 + image: blackducksoftware/hub-solr:4.4.0 + read_only: true links: [logstash, zookeeper] - volumes: ['solr6-volume:/opt/blackduck/hub/solr/cores.data'] + volumes: ['solr6-volume:/opt/blackduck/hub/solr/cores.data', /opt/solr, /opt/blackduck/, + /tmp] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'http://localhost:8983/solr/project/admin/ping?wt=json'] interval: 30s @@ -99,11 +128,13 @@ services: restart: always mem_limit: 640M webserver: - image: blackducksoftware/hub-nginx:4.3.1 + image: blackducksoftware/hub-nginx:4.4.0 + read_only: true ports: ['443:8443'] env_file: hub-webserver.env links: [webapp, cfssl, documentation] - volumes: ['webserver-volume:/opt/blackduck/hub/webserver/security'] + volumes: ['webserver-volume:/opt/blackduck/hub/webserver/security', /etc/nginx, + /opt/blackduck, /var, /tmp] healthcheck: test: [CMD, /usr/local/bin/docker-healthcheck.sh, 'https://localhost:8443/health-checks/liveness', /opt/blackduck/hub/webserver/security/root.crt] @@ -114,7 +145,9 @@ services: restart: always mem_limit: 640M documentation: - image: blackducksoftware/hub-documentation:4.3.1 + image: blackducksoftware/hub-documentation:4.4.0 + read_only: true + volumes: [/tmp, /opt/blackduck/hub] links: [logstash] user: tomcat:root healthcheck: @@ -125,4 +158,5 @@ services: restart: always mem_limit: 512M volumes: {postgres96-data-volume: null, cert-volume: null, config-volume: null, log-volume: null, - webserver-volume: null, webapp-volume: null, solr6-volume: null} + webserver-volume: null, webapp-volume: null, scan-volume: null, solr6-volume: null, + monitor-log-volume: null} diff --git a/docker-compose/hub-proxy.env b/docker-compose/hub-proxy.env index db2f584..b75fdf0 100644 --- a/docker-compose/hub-proxy.env +++ b/docker-compose/hub-proxy.env @@ -27,5 +27,5 @@ BLACKDUCK_SWAGGER_PROXY_PREFIX= BLACKDUCK_SWAGGER_DISPLAYALL= # Do not change -HUB_VERSION=4.3.1 +HUB_VERSION=4.4.0 diff --git a/docker-run/README.md b/docker-run/README.md deleted file mode 100644 index dc706d7..0000000 --- a/docker-run/README.md +++ /dev/null @@ -1,170 +0,0 @@ -# Running Hub in Docker (Using Docker Run) - -This is the bundle for running with Docker Run and no additional orchestration - -## Important Upgrade Announcement - -Customers upgrading from a version prior to 4.2, will need to perform a data migration as part of their upgrade process. A high level description of the upgrade is located in the Important _Upgrade_Announcement.md file in the root directory of this package. Detailed instructions to perform the data migration are located in the “Migrating Hub database data” listed below. - -## Contents - -Here are the descriptions of the files in this distribution: - -1. docker-hub.sh - This file is a multi-purpose orchestration script useful for starting, stopping, and tearing down the Hub using standard Docker CLI commands. -2. hub-proxy.env - The default, empty Proxy configuration file. This is required to exist, even if it is left blank. - -## Requirements - -Hub has been tested on Docker 17.03.x (ce/ee). No additional installations are needed. - -## Running - -Note: These command might require being run as either a root user, a user in the docker group, or with 'sudo'. - -``` - -# Migrate data from the PostgreSQL dump file using docker-hub.sh -$ docker-hub.sh -r 3.7.0 -m - -# Start the Hub using docker-hub.sh -$ docker-hub.sh -r 3.7.0 -u - -# Stop the Hub using docker-hub.sh -$ docker-hub.sh -s - -# Tearing down the Hub using docker-hub.sh, but leaving Volumes in place -$ docker-hub.sh -d - -# Tearing down the Hub using docker-hub.sh and removing Volumes (removes ALL data) -$ docker-hub.sh -d -v -``` - -### Running with External PostgreSQL - -Hub can be run using a PostgreSQL instance other than the provided hub-postgres docker image. This configuration can only be managed using the _docker-hub.sh_ script. Invocation is as above, except with the addition of the _-e_ (_--externadb_) option: - -``` -# Start the Hub using docker-hub.sh -$ docker-hub.sh -r 3.7.0 -u -e - -# Stop the Hub using docker-hub.sh -$ docker-hub.sh -s -e - -# Tearing down the Hub using docker-hub.sh, but leaving Volumes in place -$ docker-hub.sh -d -e - -# Tearing down the Hub using docker-hub.sh and removing Volumes (removes ALL docker-managed data; the external PostgreSQL instance is not affected) -$ docker-hub.sh -d -v -e -``` - -The _docker-hub.sh_ script does not attempt to manage the external PostgreSQL instance and assumes that it has already been configured (see External PostgreSQL Settings below). - - -### Full Usage Documentation - -#### docker-hub.sh -This script accepts several arguments. Do note that some arguments are mutually exclusive, and cannot be run in combination. Also note that the --volumes flag will DELETE DATA from the system, and this is irreversible. Pleaes understand this before running the command. - -``` -$ docker-hub.sh --help -This should be started with the following options: - -r | --release : The Hub version that should be deployed. This field is mandatory when running --up. - -m | --migrate : Migrates Hub data from the PostgreSQL dump file. Typically this is run only once and very first if data needs to be migrated. - -s | --stop : Stops the containers, but leaves them on the system. Does not affect volumes. - -u | --up : Starts the containers. Creates volumes if they do not already exist. - -d | --down : Stops and removes the containers. If --volumes is provided, it will remove volumes as well. - -v | --volumes : If provided with --down, this script will remove the volumes and all data stored within them. - -e | --externaldb : Use an external PostgreSQL instance rather than the default docker container; cannot be used with --migrate. -``` - -Note, you cannot run --up, --stop and --down in the same command. Also, --volumes will not work with --up or --stop. - -Lastly, --release is **required** to be run with --up. - -Error messages will be presented if these rules are broken, without affecting the running system. - - -## Configuration - -Custom configuration may be necessary for host name, port, or proxy server management. - -### Web Server Settings ----- - -#### Host Name Modification - -When the web server starts up, if it does not have certificates configured it will generate an HTTPS certificate. - -Configuration is needed to tell the web server which real host name it will listening on so that the host names can match. Otherwise the certificate will only have the service name to use as the host name. - -To modify the real host name, edit the hub-webserver.env file to update the desired host name value. - -#### Port Modification - -The web server is configured with a host to container port mapping. If a port change is desired, the port mapping should be modified along with the associated configuration. - -To modify the host port, edit the port mapping as well as the hub-webserver.env file to update the desired host and/or container port value. - -If the container port is modified, any healthcheck URL references should also be modified using the updated container port value. - -### Proxy Settings - -There are currently three containers that need access to services hosted by Black Duck Software: - -* registration -* jobrunner -* webapp - -If a proxy is required for external internet access you'll need to configure it. - -#### Steps - -1. Edit the file _hub-proxy.env_ - -#### Authenticated Proxy Password - -First, these are the services which require proxy password. -* webapp -* registration -* jobrunner - -There are two methods for specifying a proxy password when using Docker run. - -* Mount a directory that contains a text file called 'HUB_PROXY_PASSWORD_FILE' to /run/secrets -You can mount the volume by editing the script docker-hub.sh. Add an option (-v) after 'docker run...' for the services mentioned right above -``` --v :/run/secrets -``` - -OR - -* Specify an environment variable called 'HUB_PROXY_PASSWORD' that contains the proxy password -``` --e HUB_PROXY_PASSWORD='PASSWORD' -``` - -### Using Custom web server certificate-key pair -*For the upgrading users from version < 4.0 : 'hub_webserver_use_custom_cert_key.sh' no longer exists so please follow the updated instruction below if you wish to use the custom webserver certificate.* -Hub allows users to use their own web server certificate-key pairs for establishing ssl connection. - -* Mount a directory that contains the custom certificate and key file each as 'WEBSERVER_CUSTOM_CERT_FILE' and 'WEBSERVER_CUSTOM_KEY_FILE' to /run/secrets -You can mount the volume by editing the script docker-hub.sh. Add an option (-v) after 'docker run...' for the services mentioned right above -``` --v :/run/secrets -``` - -### External PostgreSQL Settings - -The external PostgreSQL instance needs to initialized by creating users, databases, etc., and connection information must be provided to the _webapp_ and _jobrunner_ containers. - -#### Steps - -1. Create a database user named _blackduck_ with admisitrator privileges. (On Amazon RDS, do this by setting the "Master User" to "blackduck" when creating the RDS instance.) -2. Run the _external-postgres-init.pgsql_ script to create users, databases, etc.; for example, - ``` - psql -U blackduck -h -p -f external_postgres_init.pgsql postgres - ``` -3. Using your preferred PostgreSQL administration tool, set passwords for the *blackduck* and *blackduck_user* database users (which were created by step #2 above). -4. Edit _hub-postgres.env_ to specify database connection parameters. -5. Create a file named 'HUB_POSTGRES_USER_PASSWORD_FILE' with the password for the *blackduck_user* user. -6. Create a file named 'HUB_POSTGRES_ADMIN_PASSWORD_FILE' with the password for the _blackduck_ user. diff --git a/docker-run/bin/hub_create_data_dump.sh b/docker-run/bin/hub_create_data_dump.sh deleted file mode 100755 index 2db88a9..0000000 --- a/docker-run/bin/hub_create_data_dump.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -# Prerequisites: -# 1. The database container is running. -# 2. The database container has been properly initialized. - -set -e - -TIMEOUT=${TIMEOUT:-10} -HUB_VERSION=${HUB_VERSION:-4.3.1} -HUB_DATABASE_IMAGE_NAME=${HUB_DATABASE_IMAGE_NAME:-postgres} - -function fail() { - message=$1 - exit_status=$2 - echo "${message}" - exit ${exit_status} -} - -function set_container_id() { - container_id=( `docker ps -q -f label=com.blackducksoftware.hub.version=${HUB_VERSION} \ - -f label=com.blackducksoftware.hub.image=${HUB_DATABASE_IMAGE_NAME}` ) - return 0 -} - -# There should be one argument: destination of the path with name of the file -[ $# -ne "1" ] && fail "Usage: $0 " 1 -local_dest_dump_file="$1" - -# Check that docker is on our path -[ "$(type -p docker)" == "" ] && fail docker not found on the search path 2 - -# Check that we can contact the docker daemon -docker ps > /dev/null -success=$? -[ ${success} -ne 0 ] && fail "Could not contact docker daemon. Is DOCKER_HOST set correctly?" 3 - -# Find the database container ID(s); give the container a few seconds to start if necessary -sleep_count=0 -until set_container_id && [ "${#container_id[*]}" -gt 0 ] ; do - sleep_count=$(( ${sleep_count} + 1 )) - [ ${sleep_count} -gt ${TIMEOUT} ] && fail "Database container not ready after ${TIMEOUT} seconds." 4 - sleep 1 -done - -# Check that exactly one instance of the database container is up and running -[ "${#container_id[*]}" -ne 1 ] && fail "${#container_id[*]} instances of the hub database container are running." 5 - -# Make sure that postgres is ready -sleep_count=0 -until docker exec -i -u postgres ${container_id} pg_isready -q ; do - sleep_count=$(( ${sleep_count} + 1 )) - [ ${sleep_count} -gt ${TIMEOUT} ] && fail "Database server in container ${container_id} not ready after ${TIMEOUT} seconds." 6 - sleep 1 -done - -# Make sure that bds_hub exists -sleep_count=0 -until [ "$(docker exec -i -u postgres ${container_id} psql -A -t -c "select count(*) from pg_database where datname = 'bds_hub'" postgres 2> /dev/null)" -eq 1 ] ; do - sleep_count=$(( ${sleep_count} + 1 )) - [ ${sleep_count} -gt ${TIMEOUT} ] && fail "Database bds_hub in container ${container_id} not ready after ${TIMEOUT} seconds." 7 - sleep 1 -done - -# Here we go... -echo Creating a dump from the container "${container_id}" '...' -docker exec -it ${container_id} pg_dump -U blackduck -Fc -f /tmp/bds_hub.dump bds_hub -exitCode=$? -[ ${exitCode} -ne 0 ] && fail "Cannot create the dump file from the container [Container Id: ${container_id}]" 8 - -docker cp ${container_id}:/tmp/bds_hub.dump ${local_dest_dump_file} -exitCode=$? -[ ${exitCode} -ne 0 ] && fail "Was not able to copy the dump file over [Container Id: ${container_id}]" 9 - -# After copy, remove the dump from the container. -docker exec -it ${container_id} rm /tmp/bds_hub.dump - -echo Success with creating the dump and copying over to "[Destination Dir: $(dirname ${local_dest_dump_file})]" from the container: "[Container Id: ${container_id}]" diff --git a/docker-run/bin/system_check.sh b/docker-run/bin/system_check.sh deleted file mode 100755 index 50d2319..0000000 --- a/docker-run/bin/system_check.sh +++ /dev/null @@ -1,1212 +0,0 @@ -#!/usr/bin/env bash - -HUB_VERSION=${HUB_VERSION:-4.3.1} -TIMESTAMP=`date` -YEAR=`echo $TIMESTAMP | awk -F' ' '{print $6}'` -MONTH=`echo $TIMESTAMP | awk -F' ' '{print $2}'` -DAY_OF_MONTH=`echo $TIMESTAMP | awk -F' ' '{print $3}'` -TIME_OF_DAY=`echo $TIMESTAMP | awk -F' ' '{print $4}'` -HR=`echo $TIME_OF_DAY | awk -F':' '{print $1}'` -MIN=`echo $TIME_OF_DAY | awk -F':' '{print $2}'` -SEC=`echo $TIME_OF_DAY | awk -F':' '{print $3}'` -OUTPUT_FILE=${SYSTEM_CHECK_OUTPUT_FILE:-"system_check_${YEAR}_${MONTH}_${DAY_OF_MONTH}_${HR}_${MIN}_${SEC}.txt"} -CPUS_REQUIRED=4 -# Our RAM requirements are as follows: -# Non-Swarm Install: 16GB -# Swarm Install with many nodes: 16GB per node -# Swarm Install with a single node: 20GB -# -# The script plays some games here because linux never reports 100% of the physical memory on a system, -# so the way this script checks memory linux will usually report 1GB less than the correct amount. -# -RAM_REQUIRED_GB=15 -RAM_REQUIRED_GB_SWARM=19 -RAM_REQUIRED_PHYSICAL_DESCRIPTION="16GB Required" -RAM_REQUIRED_PHYSICAL_DESCRIPTION_SWARM="20 Required on Swarm Node if all BD Containers are on a single Node" - -DISK_REQUIRED_MB=250000 -MIN_DOCKER_VERSION=17.03 -MIN_DOCKER_MAJOR_VERSION=17 -MIN_DOCKER_MINOR_VERSION=03 - -printf "Writing System Check Report to: %s\n" "$OUTPUT_FILE" - -check_user() { - echo "Checking user..." - ignored_user_prompt=FALSE - id=`id -u` - current_username=`id -un` - if [ "$id" -ne 0 ] ; then - echo "This script must be run as root for all features to work." - - echo "This script will gather a reduced set of information if run this way, but you will likely " - echo "be asked by BlackDuck support to re-run the script with root privileges." - echo -n "Are you sure you wish to proceed as a non-privileged user? [y/N]: " - read proceed - proceed_upper=`echo $proceed | awk '{print toupper($0)}'` - if [ "$proceed_upper" != "Y" ] ; then - exit -1 - fi - is_root=FALSE - ignored_user_prompt=TRUE - return - fi - is_root=TRUE -} - -OS_UNKNOWN="unknown" -_SetOSName() -{ - echo "Checking OS..." - # Set the PROP_OS_NAME variable to a short string identifying the - # operating system version. This string is also the path where we - # store the 3rd-party rpms. - # - # Usage: _SetOSName 3rd-party-dir - - # Find the local release name. - # See http://linuxmafia.com/faq/Admin/release-files.html for more ideas. - - IS_LINUX=TRUE - command -v lsb_release > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - PROP_HAVE_lsb_release=0 - else - PROP_HAVE_lsb_release=1 - fi - - if [ "$PROP_HAVE_lsb_release" == 1 ]; then - PROP_OS_NAME="`lsb_release -a ; echo ; echo -n uname -a:\ ; uname -a`" - elif [ -e /etc/fedora-release ]; then - PROP_OS_NAME="`cat /etc/fedora-release`" - elif [ -e /etc/redhat-release ]; then - PROP_OS_NAME="`cat /etc/redhat-release`" - elif [ -e /etc/centos-release ]; then - PROP_OS_NAME="`cat /etc/centos-release`" - elif [ -e /etc/SuSE-release ]; then - PROP_OS_NAME="`cat /etc/SuSE-release`" - elif [ -e /etc/gentoo-release ]; then - PROP_OS_NAME="`cat /etc/gentoo-release`" - elif [ -e /etc/os-release ]; then - PROP_OS_NAME="`cat /etc/os-release`" - else - PROP_OS_NAME="`echo -n uname -a:\ ; uname -a`" - IS_LINUX=FALSE - fi -} - -CPUINFO_FILE="/proc/cpuinfo" -get_cpu_info() { - echo "Checking CPU Information..." - if [ -e "$CPUINFO_FILE" ] ; then - CPU_INFO=`cat $CPUINFO_FILE` - else - CPU_INFO="CPU Info Unavailable - non linux system" - fi -} - -check_cpu_count() { - echo "Counting CPUs..." - command -v lscpu > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - CPU_COUNT_INFO="Unable to Check # CPUS, lscpu not found" - return - fi - - CPU_COUNT=`lscpu -p=cpu | grep -v -c '#'` - if [ "$CPU_COUNT" -lt "$CPUS_REQUIRED" ] ; then - CPU_COUNT_INFO="CPU Count: FAILED ($CPUS_REQUIRED required)" - else - CPU_COUNT_INFO="CPU Count: PASSED" - fi - -} - -get_mem_info() { - echo "Retrieving memory Information..." - command -v free > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - # Free not available - MEMORY_INFO="Unable to get memory information - non linux system." - else - MEMORY_INFO="`free -h`" - fi -} - -check_sufficient_ram() { - echo "Checking if sufficient RAM is present..." - command -v free > /dev/null 2>&1 - - if [ $? -ne 0 ] ; then - # Free not available - SUFFICIENT_RAM_INFO="Unable to get memory information - non linux system." - return - fi - - SELECTED_RAM_REQUIREMENT=$RAM_REQUIRED_GB - SELECTED_RAM_DESCRIPTION=$RAM_REQUIRED_PHYSICAL_DESCRIPTION - - if [ "$SWARM_ENABLED" == "TRUE" ] ; then - SELECTED_RAM_REQUIREMENT=$RAM_REQUIRED_GB_SWARM - SELECTED_RAM_DESCRIPTION=$RAM_REQUIRED_PHYSICAL_DESCRIPTION_SWARM - fi - - total_ram_in_gb=`free -g | grep 'Mem' | awk -F' ' '{print $2}'` - if [ "$total_ram_in_gb" -lt "$SELECTED_RAM_REQUIREMENT" ] ; then - SUFFICIENT_RAM_INFO="Total Ram: FAILED ($SELECTED_RAM_DESCRIPTION)" - else - SUFFICIENT_RAM_INFO="Total RAM: PASSED" - fi -} - -get_disk_info() { - echo "Checking Disk Information..." - command -v df > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - DISK_INFO="Unable to get Disk Info - df not present" - else - DISK_INFO="`df -h`" - # Get disk space in human readable format with totals, select only the total line - # select the 2nd column from that, then remove the last character to get rid of the "G" for - # gigabyte - if [ "$IS_LINUX" != "TRUE" ] ; then - TOTAL_DISK_SPACE="Unknown" - DISK_SPACE_MESSAGE="Cannot determine sufficient disk space on non linux system" - return; - fi - - TOTAL_DISK_SPACE=`df -m --total | grep 'total' | awk -F' ' '{print $2}'` - if [ "$TOTAL_DISK_SPACE" -lt "$DISK_REQUIRED_MB" ] ; then - DISK_SPACE_MESSAGE="Insufficient Disk Space (found: ${TOTAL_DISK_SPACE}mb, required: ${DISK_REQUIRED_MB}mb)" - else - DISK_SPACE_MESSAGE="Sufficient Disk Space (found: ${TOTAL_DISK_SPACE}mb, required ${DISK_REQUIRED_MB}mb)" - fi - fi -} - -get_package_list() { - if [ "$IS_LINUX" != "TRUE" ] ; then - PKG_LIST="Cannot retrieve package list - non linux system" - return; - fi - - PKG_LIST="Cannot Retrieve Package List - Could not determine package manager" - - # RPM - rpm -qa - command -v rpm > /dev/null 2>&1 - if [ $? -eq 0 ] ; then - PKG_LIST=`rpm -qa` - return; - fi - - # APT - apt list --installed - command -v apt > /dev/null 2>&1 - if [ $? -eq 0 ] ; then - PKG_LIST=`apt list --installed` - return; - fi - - # DPKG - dpkg --get-selections | grep -v deinstall - command -v dpkg > /dev/null 2>&1 - if [ $? -eq 0 ] ; then - PKG_LIST=`dpkg --get-selections | grep -v deinstall` - return; - fi - - -} - -get_interface_info() { - - echo "Checking Network interface configuration..." - command -v ifconfig > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - ifconfig_data="Unable to run ifconfig - cannot list network interface configuration" - else - ifconfig_data=`ifconfig -a` - fi - -} - -get_routing_info() { - echo "Checking Routing Table..." - if [ "$IS_LINUX" != "TRUE" ] ; then - routing_table="Unable to check routing table - Non Linux System" - return - fi - - routing_table=`ip route list` -} - -get_bridge_info() { - echo "Checking Network Bridge Information..." - if [ "$IS_LINUX" != "TRUE" ] ; then - brctl_info="Unable to get Network Bridge Information, non-linux system." - return - fi - - command -v brctl > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - brctl_info="Unable to get Network Bridge Information, bridge-utils not installed." - return - fi - - brctl_info=`brctl show` - -} - - -# Check what ports are being listened on currently - may be useful for bind errors -check_ports() { - echo "Checking Network Ports..." - command -v netstat > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - listen_ports="Unable to run netstat - cannot list ports being listened on." - else - listen_ports=`netstat -ln` - fi -} - -get_processes() { - echo "Checking Running Processes..." - RUNNING_PROCESSES="" - command -v ps > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - echo "Cannot Check Processes - ps not found" - return - fi - - RUNNING_PROCESSES=`ps aux` -} - - -# Check if Docker is installed -is_docker_present() { - echo "Checking For Docker..." - docker_installed=FALSE - command -v docker > /dev/null 2>&1 - if [ $? -eq 0 ] ; then - docker_installed=TRUE - fi -} - -# Check the version of docker -get_docker_version() { - if [ "$docker_installed" == "TRUE" ] ; then - echo "Checking Docker Version..." - docker_version=`docker --version` - - docker_major_version=`docker --version | awk -F' ' '{print $3}' | awk -F'.' '{print $1}'` - docker_minor_version=`docker --version | awk -F' ' '{print $3}' | awk -F'.' '{print $1}'` - - if [ "$docker_major_version" -lt "$MIN_DOCKER_MAJOR_VERSION" ] ; then - docker_version_check="Docker Version Check - Failed: ($MIN_DOCKER_VERSION required)" - return - fi - - if [ "$docker_minor_version" -lt "$MIN_DOCKER_MINOR_VERSION" ] ; then - docker_version_check="Docker Version Check - Failed: ($MIN_DOCKER_VERSION required)" - return - fi - - docker_version_check="Docker Version Check - Passed" - return - fi - - docker_version_check="Docker Version Check - Failed - Docker not present" - -} - -# Check if docker-compose is installed -check_docker_compose_installed() { - echo "Checking For Docker Compose..." - command -v docker-compose > /dev/null 2>&1 - if [ $? -eq 0 ] ; then - docker_compose_installed=TRUE - else - docker_compose_installed=FALSE - fi -} - -find_docker_compose_version() { - if [ "$docker_compose_installed" == "TRUE" ] ; then - echo "Checking Docker Compose Version..." - docker_compose_version=`docker-compose --version` - else - docker_compose_version="Not Installed" - fi -} - -check_docker_systemctl_status() { - if [ "$docker_installed" == "FALSE" ] ; then - docker_enabled_at_startup=FALSE - return - fi - echo "Checking Systemd to determine if docker is enabled at boot..." - command -v systemctl > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - docker_enabled_at_startup="Unable to determine - systemctl not found" - return - fi - - systemctl list-unit-files | grep enabled | grep docker > /dev/null 2>&1 - if [ $? -eq 0 ] ; then - docker_enabled_at_startup=TRUE - else - docker_enabled_at_startup=FALSE - fi - -} - -check_docker_images() { - if [ "$is_root" == "FALSE" ] ; then - bd_docker_images="Cannot list docker images without root access." - docker_image_inspection="Cannot inspect docker images without root access." - return - fi - - echo "Checking Docker Images Present..." - - if [ "$docker_installed" == "TRUE" ] ; then - bd_docker_images=`docker images | awk -F' ' '{printf("%-80s%-80s\n",$1,$2);}' | sort` - docker_image_inspection=`docker image ls -aq | xargs docker image inspect` - else - bd_docker_images="Docker not installed, no images present." - docker_image_inspection="Docker not installed, no images present." - fi -} - -check_docker_containers() { - if [ "$is_root" == "FALSE" ] ; then - bd_docker_containers="Cannot list docker containers without root access" - container_diff_report="Cannot inspect docker containers without root access" - return - fi - - if [ "$docker_installed" == "TRUE" ] ; then - bd_docker_containers=`docker container ls` - else - bd_docker_containers="Docker Not installed, no containers present." - container_diff_report="Docker Not installed, no containers present." - return - fi - echo "Checking Docker Containers and Taking Diffs..." - container_ids=`docker container ls -aq` - - container_diff_report=$( - while read -r cur_container_id ; - do - echo "------------------------------------------" - docker container ls -a | grep "$cur_container_id" | awk -F' ' '{printf("%-20s%-80s\n",$1,$2);}' - docker inspect "$cur_container_id" - docker container diff "$cur_container_id" - - done <<< "$container_ids" - ) -} - -check_docker_processes() { - if [ "$is_root" == "FALSE" ] ; then - docker_processes="Cannot list docker processes without root access" - return - fi - - if [ "$docker_installed" == "FALSE" ] ; then - docker_processes="No Docker Processes - Docker not installed." - return - fi - - echo "Checking Current Docker Processes..." - - docker_processes=`docker ps` -} - -inspect_docker_networks() { - if [ "$is_root" == "FALSE" ] ; then - docker_networks="Cannot inspect docker networks without root access" - return - fi - - if [ "$docker_installed" == "FALSE" ] ; then - docker_networks="No Docker Networks - Docker not installed." - return - fi - - echo "Checking Docker Networks..." - - docker_networks=`docker network ls -q | xargs docker network inspect` -} - -inspect_docker_volumes() { - if [ "$is_root" == "FALSE" ] ; then - docker_volumes="Cannot inspect docker volumes without root access" - return - fi - - if [ "$docker_installed" == "FALSE" ] ; then - docker_volumes="No Docker Networks - Docker not installed." - return - fi - - echo "Checking Docker Volumes..." - - docker_volumes=`docker volume ls -q | xargs docker volume inspect` -} - -inspect_docker_swarms() { - SWARM_ENABLED=FALSE - if [ "$is_root" == "FALSE" ] ; then - docker_swarm_data="Cannot inspect docker swarms without root access" - return - fi - - if [ "$docker_installed" == "FALSE" ] ; then - docker_swarm_data="No Docker Swarms - Docker not installed." - return - fi - - echo "Checking Docker Swarms..." - - docker_nodes=`docker node ls > /dev/null 2>&1` - if [ "$?" -ne 0 ] ; then - docker_swarm_data="Machine is not part of a docker swarm or is not the manager" - return - fi - - SWARM_ENABLED=TRUE - docker_swarm_data=`docker node ls -q | xargs docker node inspect` - -} - -check_firewalld() { - firewalld_enabled=FALSE - firewalld_active_zones="N/A" - firewalld_all_zones="N/A" - firewalld_default_zone="N/A" - firewalld_services="N/A" - - if [ "$is_root" == "FALSE" ] ; then - firewalld_enabled="Cannot check firewalld without root access" - return - fi - - command -v systemctl > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - firewalld_enabled="Unable to determine - systemctl not found" - return - fi - - echo "Checking Firewalld..." - - firewalld_enabled=`systemctl list-unit-files | grep enabled | grep firewalld.service` - if [ "$?" -ne 0 ] ; then - return - fi - - firewalld_enabled=TRUE - firewalld_active_zones=`firewall-cmd --get-active-zones` - firewalld_all_zones=`firewall-cmd --list-all-zones` - firewalld_services=`firewall-cmd --get-services` - firewalld_default_zone=`firewall-cmd --get-default-zone` - -} - -check_iptables() { - - if [ "$is_root" == "FALSE" ] ; then - echo "Skipping IP Tables Check (Root access required)" - iptables_https_rules="Cannot check iptables https rules without root access" - iptables_all_rules="Cannot check iptables https rules without root access" - iptables_db_rules="Cannot check iptables https rules without root access" - iptables_nat_rules="Cannot check iptables https rules without root access" - return - fi - - echo "Checking IP Tables Rules..." - - command -v iptables > /dev/null 2>&1 - if [ $? -ne 0 ] ; then - iptables_all_rules="Unable to Check iptables - iptables not found." - iptables_https_rules="Unable to Check iptables - iptables not found." - iptables_db_rules="Unable to Check iptables - iptables not found." - iptables_nat_rules="Unable to Check iptables - iptables not found." - else - iptables_https_rules=`iptables --list | grep https` - iptables_db_rules=`iptables --list | grep '55436'` - iptables_all_rules=`iptables --list -v` - iptables_nat_rules=`iptables -t nat -L -v` - fi -} - -# Only valid on linux -ENTROPY_FILE="/proc/sys/kernel/random/entropy_avail" -check_entropy() { - - if [ -e "$ENTROPY_FILE" ] ; then - echo "Checking Entropy..." - available_entropy=`cat $ENTROPY_FILE` - else - available_entropy="Cannot Determine Entropy on non linux system" - fi -} - -# Helper method to ping a host. A small (64 byte) and large (1500 byte) -# ping attempt will be made -# Parameters: -# $1 - Hostname to ping -# $2 - Key to store the results in the ping_results associative array -# -# example: -# ping_host kb.blackducksoftware.com kb -# -# Results will be stored like this: -# ping_results["kb_reachable_small"]=FALSE or TRUE depending on result -# ping_results["kb_reachable_large"]=FALSE or TRUE depending on result -# ping_results["kb_ping_small_data"]= ping output -# ping_results["kb_ping_large_data"]= ping output -# -ping_host() { - - if [ "$#" -lt "3" ] ; then - echo "ping_host: too few parameters." - echo "usage: ping_host