diff --git a/backend/ibutsu_server/__init__.py b/backend/ibutsu_server/__init__.py index 8ae9ec72..742be9b0 100644 --- a/backend/ibutsu_server/__init__.py +++ b/backend/ibutsu_server/__init__.py @@ -35,6 +35,13 @@ def _make_sql_url(hostname, database, **kwargs): return "postgresql://{}/{}".format(url, database) +def _make_broker_url(env_var_value, hostname, password, port): + if env_var_value: + return env_var_value + user_pass_str = f":{password}@" if password else "" + return f"redis://{user_pass_str}{hostname}:{port}" + + def get_app(**extra_config): """Create the WSGI application""" @@ -64,9 +71,28 @@ def get_app(**extra_config): port=config.get("POSTGRESQL_PORT"), user=config.get("POSTGRESQL_USER"), password=config.get("POSTGRESQL_PASSWORD"), - ) + ), } ) + + # Set celery broker URL + config.update( + { + "CELERY_BROKER_URL": _make_broker_url( + config.get("CELERY_BROKER_URL"), + config.get("REDIS_HOSTNAME"), + config.get("REDIS_PASSWORD"), + config.get("REDIS_PORT"), + ), + "CELERY_RESULT_BACKEND": _make_broker_url( + config.get("CELERY_RESULT_BACKEND"), + config.get("REDIS_HOSTNAME"), + config.get("REDIS_PASSWORD"), + config.get("REDIS_PORT"), + ), + } + ) + # Load any extra config config.update(extra_config) diff --git a/ocp-templates/app-interface/backend.yaml b/ocp-templates/app-interface/backend.yaml new file mode 100644 index 00000000..0260ee0b --- /dev/null +++ b/ocp-templates/app-interface/backend.yaml @@ -0,0 +1,398 @@ +# Template for ibutsu prod server +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: ibutsu-backend-template +# =============================================== +# Backend +# =============================================== +objects: +- kind: DeploymentConfig + apiVersion: v1 + metadata: + labels: + app: ${APP_NAME} + name: ibutsu-backend + spec: + replicas: 1 + selector: + deploymentConfig: ibutsu-backend + strategy: + type: Rolling + template: + metadata: + labels: + app: ${APP_NAME} + deploymentConfig: ibutsu-backend + spec: + containers: + - env: + - name: GUNICORN_PROCESSES + value: "1" + - name: PORT + value: "8080" + - name: APP_CONFIG + value: config.py + - name: HAS_FRONTEND + value: "false" + - name: POSTGRESQL_HOST + valueFrom: + secretKeyRef: + key: db.host + name: ibutsu-db + - name: POSTGRESQL_PORT + valueFrom: + secretKeyRef: + key: db.port + name: ibutsu-db + - name: POSTGRESQL_USER + valueFrom: + secretKeyRef: + key: db.user + name: ibutsu-db + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + key: db.password + name: ibutsu-db + - name: POSTGRESQL_DATABASE + valueFrom: + secretKeyRef: + key: db.name + name: ibutsu-db + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + key: db.password + name: redis + optional: true + - name: REDIS_HOSTNAME + valueFrom: + secretKeyRef: + key: db.endpoint + name: redis + - name: REDIS_PORT + valueFrom: + secretKeyRef: + key: db.port + name: redis + - name: IBUTSU_SUPERADMIN_EMAIL + valueFrom: + secretKeyRef: + key: email + name: ibutsu-superadmin + - name: IBUTSU_SUPERADMIN_PASSWORD + valueFrom: + secretKeyRef: + key: password + name: ibutsu-superadmin + - name: IBUTSU_SUPERADMIN_NAME + valueFrom: + secretKeyRef: + key: name + name: ibutsu-superadmin + - name: FRONTEND_URL + value: https://${FRONTEND_ROUTE} + - name: BACKEND_URL + value: https://${BACKEND_ROUTE} + image: quay.io/ibutsu/backend:${IMAGE_TAG} + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8080 + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 20 + name: ibutsu-backend + ports: + - containerPort: 8080 + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 20 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + triggers: + - type: ConfigChange +# ----------------------------------------------- +- kind: Service + apiVersion: v1 + metadata: + labels: + app: ${APP_NAME} + name: ibutsu-backend + spec: + ports: + - port: 8080 + targetPort: 8080 + selector: + deploymentConfig: ibutsu-backend + +# =============================================== +# Flower +# =============================================== +- kind: DeploymentConfig + apiVersion: v1 + metadata: + labels: + app: ${APP_NAME} + name: celery-flower + spec: + replicas: 1 + selector: + deploymentConfig: celery-flower + strategy: + type: Rolling + template: + metadata: + labels: + app: ${APP_NAME} + deploymentConfig: celery-flower + spec: + containers: + - env: + # TOOD: use redis secret to set this + - name: BROKER_URL + value: redis://:${REDIS_PASSWORD}@redis.${NAMESPACE}.svc + image: celery-flower + imagePullPolicy: IfNotPresent + name: quay.io/ibutsu/flower:${IMAGE_TAG} + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + triggers: + - type: ConfigChange +# ----------------------------------------------- +- kind: Service + apiVersion: v1 + metadata: + labels: + app: ${APP_NAME} + name: celery-flower + spec: + ports: + - port: 8080 + targetPort: 5555 + selector: + deploymentConfig: celery-flower + +# =============================================== +# Scheduler +# =============================================== +- kind: DeploymentConfig + apiVersion: v1 + metadata: + labels: + app: ${APP_NAME} + name: ibutsu-scheduler + spec: + replicas: 1 + selector: + deploymentConfig: ibutsu-scheduler + strategy: + type: Rolling + template: + metadata: + labels: + app: ${APP_NAME} + deploymentConfig: ibutsu-scheduler + spec: + containers: + - env: + - name: POSTGRESQL_HOST + valueFrom: + secretKeyRef: + key: db.host + name: ibutsu-db + - name: POSTGRESQL_PORT + valueFrom: + secretKeyRef: + key: db.port + name: ibutsu-db + - name: POSTGRESQL_USER + valueFrom: + secretKeyRef: + key: db.user + name: ibutsu-db + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + key: db.password + name: ibutsu-db + - name: POSTGRESQL_DATABASE + valueFrom: + secretKeyRef: + key: db.name + name: ibutsu-db + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + key: db.password + name: redis + optional: true + - name: REDIS_HOSTNAME + valueFrom: + secretKeyRef: + key: db.endpoint + name: redis + - name: REDIS_PORT + valueFrom: + secretKeyRef: + key: db.port + name: redis + - name: FRONTEND_URL + value: https://${FRONTEND_ROUTE} + - name: BACKEND_URL + value: https://${BACKEND_ROUTE} + image: quay.io/ibutsu/scheduler:${IMAGE_TAG} + imagePullPolicy: IfNotPresent + name: ibutsu-scheduler + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + triggers: + - type: ConfigChange +# ----------------------------------------------- +- kind: Service + apiVersion: v1 + metadata: + labels: + app: ${APP_NAME} + name: ibutsu-scheduler + spec: + ports: + - port: 8080 + targetPort: 8080 + selector: + deploymentConfig: ibutsu-scheduler + +# =============================================== +# Worker +# =============================================== +- kind: DeploymentConfig + apiVersion: v1 + metadata: + labels: + app: ${APP_NAME} + name: ibutsu-worker + spec: + replicas: 1 + selector: + deploymentConfig: ibutsu-worker + strategy: + type: Rolling + template: + metadata: + labels: + app: ${APP_NAME} + deploymentConfig: ibutsu-worker + spec: + containers: + - env: + - name: APP_SCRIPT + value: celery_worker.sh + - name: POSTGRESQL_HOST + valueFrom: + secretKeyRef: + key: db.host + name: ibutsu-db + - name: POSTGRESQL_PORT + valueFrom: + secretKeyRef: + key: db.port + name: ibutsu-db + - name: POSTGRESQL_USER + valueFrom: + secretKeyRef: + key: db.user + name: ibutsu-db + - name: POSTGRESQL_PASSWORD + valueFrom: + secretKeyRef: + key: db.password + name: ibutsu-db + - name: POSTGRESQL_DATABASE + valueFrom: + secretKeyRef: + key: db.name + name: ibutsu-db + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + key: db.password + name: redis + optional: true + - name: REDIS_HOSTNAME + valueFrom: + secretKeyRef: + key: db.endpoint + name: redis + - name: REDIS_PORT + valueFrom: + secretKeyRef: + key: db.port + name: redis + - name: FRONTEND_URL + value: https://${FRONTEND_ROUTE} + - name: BACKEND_URL + value: https://${BACKEND_ROUTE} + image: quay.io/ibutsu/worker:${IMAGE_TAG} + imagePullPolicy: IfNotPresent + name: ibutsu-worker + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + triggers: + - type: ConfigChange +# ----------------------------------------------- +- kind: Service + apiVersion: v1 + metadata: + labels: + app: ${APP_NAME} + name: ibutsu-worker + spec: + ports: + - port: 8080 + targetPort: 8080 + selector: + deploymentConfig: ibutsu-worker + +# =============================================== +# Parameters +# =============================================== +parameters: +- name: APP_NAME + displayName: App Name + description: The name of the application + value: ibutsu-server +- name: BACKEND_ROUTE + displayName: Backend Route + description: The URL of the backend of the Ibutsu server + value: ibutsu-api.example.com +- name: FRONTEND_ROUTE + displayName: Frontend Route + description: The URL of the frontend of the Ibutsu server + value: ibutsu.example.com +- name: IMAGE_TAG + value: latest diff --git a/ocp-templates/app-interface/frontend.yaml b/ocp-templates/app-interface/frontend.yaml new file mode 100644 index 00000000..c177bca5 --- /dev/null +++ b/ocp-templates/app-interface/frontend.yaml @@ -0,0 +1,93 @@ +# Template for ibutsu prod server +kind: Template +apiVersion: template.openshift.io/v1 +metadata: + name: ibutsu-frontend-template +# =============================================== +# Frontend +# =============================================== +objects: +- kind: DeploymentConfig + apiVersion: v1 + metadata: + labels: + app: ${APP_NAME} + name: ibutsu-frontend + spec: + replicas: 1 + selector: + deploymentConfig: ibutsu-frontend + strategy: + type: Rolling + template: + metadata: + labels: + app: ${APP_NAME} + deploymentConfig: ibutsu-frontend + spec: + containers: + - env: + - name: REACT_APP_SERVER_URL + value: https://${BACKEND_ROUTE}/api + - name: NODE_ENV + value: production + image: quay.io/ibutsu/frontend:${IMAGE_TAG} + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8080 + scheme: HTTP + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: ibutsu-frontend + ports: + - containerPort: 8080 + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + triggers: + - type: ConfigChange +# ----------------------------------------------- +- kind: Service + apiVersion: v1 + metadata: + labels: + app: ${APP_NAME} + name: ibutsu-frontend + spec: + ports: + - port: 8080 + targetPort: 8080 + selector: + deploymentConfig: ibutsu-frontend +# =============================================== +# Parameters +# =============================================== +parameters: +- name: APP_NAME + displayName: App Name + description: The name of the application + value: ibutsu-server +- name: BACKEND_ROUTE + displayName: Backend Route + description: The URL of the backend of the Ibutsu server + value: ibutsu-api.example.com +- name: IMAGE_TAG + value: latest diff --git a/ocp-templates/jobs/pgsql-backup-cronjob.yaml b/ocp-templates/jobs/pgsql-backup-cronjob.yaml deleted file mode 100644 index 5e14e64e..00000000 --- a/ocp-templates/jobs/pgsql-backup-cronjob.yaml +++ /dev/null @@ -1,92 +0,0 @@ -# =============================================== -# Database Backup Cronjob -# To add this to your project run: -# oc process -f pgsql-backup-cronjob.yaml | oc create -f - -# =============================================== -kind: Template -apiVersion: v1 -metadata: - name: pgsql-backup-template -parameters: - - name: NAMESPACE - displayName: Namespace - description: The namespace for all of the images, applications, etc. - value: iqe-ibutsu-server - - name: BACKUP_VOLUME_CLAIM - displayName: Backup volume claim - value: database-backup - - name: BACKUP_KEEP - displayName: Number of backups to keep - value: '5' - - name: BACKUP_SCHEDULE - displayName: Cron-like schedule to run backup - value: '1 0 * * 6' -objects: -- kind: CronJob - apiVersion: batch/v1beta1 - metadata: - name: database-backup - namespace: ${NAMESPACE} - spec: - schedule: ${BACKUP_SCHEDULE} - concurrencyPolicy: Forbid - jobTemplate: - spec: - template: - spec: - volumes: - - name: database-backups - persistentVolumeClaim: - claimName: ${BACKUP_VOLUME_CLAIM} - containers: - - name: postgresql-backup - image: postgresql:12 - command: - - 'bash' - - '-eo' - - 'pipefail' - - '-c' - - > - trap "echo 'Backup failed'; exit 0" ERR; - FILENAME=backup-${PGDATABASE}-`date +%Y-%m-%d`.dump; - cd /var/lib/database-backup; - find . -type f -name "backup-${PGDATABASE}-*" -exec ls -ltr "{}" + | head -n -${BACKUP_KEEP} | xargs rm -fr; - echo "Backing up database..."; - PGPASSWORD="$PGPASSWORD" pg_dump -v --username=$PGUSER --host=$PGHOST --port=$PGPORT --dbname=$PGDATABASE --exclude-table=artifacts --format=custom --compress=9 --jobs=1 --no-owner --file=$FILENAME; - echo ""; - echo -n "Backup successful: "; du -h ./$FILENAME; - echo "To restore, use:"; - echo "~# pg_restore --user=$PGUSER --password= --host=$PGHOST --port=$PGPORT --database=$PGDATABASE $FILENAME" - resources: - limits: - cpu: 250m - memory: 1Gi - requests: - cpu: 100m - memory: 512Mi - env: - - name: PGHOST - value: postgresql.${NAMESPACE}.svc - - name: PGPORT - value: "5432" - - name: PGUSER - valueFrom: - secretKeyRef: - key: database-user - name: postgresql - - name: PGPASSWORD - valueFrom: - secretKeyRef: - key: database-password - name: postgresql - - name: PGDATABASE - valueFrom: - secretKeyRef: - key: database-name - name: postgresql - - name: BACKUP_KEEP - value: ${BACKUP_KEEP} - volumeMounts: - - name: database-backups - mountPath: /var/lib/database-backup - restartPolicy: Never diff --git a/ocp-templates/jobs/pgsql-backup-job.yaml b/ocp-templates/jobs/pgsql-backup-job.yaml deleted file mode 100644 index 069c5549..00000000 --- a/ocp-templates/jobs/pgsql-backup-job.yaml +++ /dev/null @@ -1,80 +0,0 @@ -# Test Job for running a backup immediately -# The command should match that of 'pgsql-backup-cronjob' exactly -# To kickoff the job, run: -# oc create -f pgsql-backup-job.yaml -apiVersion: batch/v1 -kind: Job -metadata: - annotations: - cronjob.kubernetes.io/instantiate: manual - creationTimestamp: null - name: test-pgsql-backup - namespace: iqe-ibutsu-server -spec: - selector: {} - template: - metadata: - name: test-psql-backup - spec: - volumes: - - name: database-backups - persistentVolumeClaim: - claimName: database-backups - - configMap: - defaultMode: 420 - name: ocp-pgsql-custom-config - name: postgresql-config - containers: - - name: postgresql-backup - image: postgresql:12 - command: - - 'bash' - - '-eo' - - 'pipefail' - - '-c' - - > - trap "echo 'Backup failed'; exit 0" ERR; - FILENAME=backup-${PGDATABASE}-`date +%Y-%m-%d`.dump; - cd /var/lib/database-backup; - find . -type f -name "backup-${PGDATABASE}-*" -exec ls "{}" + | head -n -${BACKUP_KEEP} | xargs rm -fr; - echo "Backing up database..."; - PGPASSWORD="$PGPASSWORD" pg_dump -v --username=$PGUSER --host=$PGHOST --port=$PGPORT --dbname=$PGDATABASE --exclude-table=artifacts --format=custom --compress=9 --jobs=1 --no-owner --file=$FILENAME; - echo ""; - echo -n "Backup successful: "; du -h ./$FILENAME; - echo "To restore, use:"; - echo "~# pg_restore --user=$PGUSER --password= --host=$PGHOST --port=$PGPORT --database=$PGDATABASE $FILENAME" - resources: - limits: - cpu: 250m - memory: 1Gi - requests: - cpu: 100m - memory: 512Mi - env: - - name: PGHOST - value: postgresql.iqe-ibutsu-server.svc - - name: PGPORT - value: "5432" - - name: PGUSER - valueFrom: - secretKeyRef: - key: database-user - name: postgresql - - name: PGPASSWORD - valueFrom: - secretKeyRef: - key: database-password - name: postgresql - - name: PGDATABASE - valueFrom: - secretKeyRef: - key: database-name - name: postgresql - - name: BACKUP_KEEP - value: '5' - volumeMounts: - - name: database-backups - mountPath: /var/lib/database-backup - - name: postgresql-config - mountPath: /opt/app-root/src/postgresql-cfg - restartPolicy: Never diff --git a/ocp-templates/jobs/pgsql-vacuum-cronjob.yaml b/ocp-templates/jobs/pgsql-vacuum-cronjob.yaml deleted file mode 100644 index 34bf56f8..00000000 --- a/ocp-templates/jobs/pgsql-vacuum-cronjob.yaml +++ /dev/null @@ -1,74 +0,0 @@ -# =============================================== -# Database Vacuum Cronjob -# This provides a cron-based vacuum of the -# entire DB -# To add this to your project run: -# oc process -f pgsql-vacuum-cronjob.yaml | oc create -f - -# =============================================== -kind: Template -apiVersion: v1 -metadata: - name: pgsql-vacuum-template -parameters: - - name: NAMESPACE - displayName: Namespace - description: The namespace for all of the images, applications, etc. - value: iqe-ibutsu-server - - name: VACUUM_SCHEDULE - displayName: Cron-like schedule to run vacuum - value: '1 1 * * *' -objects: -- kind: CronJob - apiVersion: batch/v1beta1 - metadata: - name: database-vacuum - namespace: ${NAMESPACE} - spec: - schedule: ${VACUUM_SCHEDULE} - concurrencyPolicy: Forbid - jobTemplate: - spec: - template: - spec: - containers: - - name: postgresql-vacuum - image: postgresql:12 - command: - - 'bash' - - '-eo' - - 'pipefail' - - '-c' - - > - trap "echo 'VACUUM failed'; exit 0" ERR; - echo "Backing up database..."; - PGPASSWORD="$PGPASSWORD" psql --username=$PGUSER --host=$PGHOST --port=$PGPORT --dbname=$PGDATABASE --command 'VACUUM (verbose);'; - echo ""; - echo -n "VACUUM successful." - resources: - limits: - cpu: 250m - memory: 1Gi - requests: - cpu: 100m - memory: 512Mi - env: - - name: PGHOST - value: postgresql.${NAMESPACE}.svc - - name: PGPORT - value: "5432" - - name: PGUSER - valueFrom: - secretKeyRef: - key: database-user - name: postgresql - - name: PGPASSWORD - valueFrom: - secretKeyRef: - key: database-password - name: postgresql - - name: PGDATABASE - valueFrom: - secretKeyRef: - key: database-name - name: postgresql - restartPolicy: Never diff --git a/ocp-templates/jobs/pgsql-vacuum-job.yaml b/ocp-templates/jobs/pgsql-vacuum-job.yaml deleted file mode 100644 index 0db0a44e..00000000 --- a/ocp-templates/jobs/pgsql-vacuum-job.yaml +++ /dev/null @@ -1,60 +0,0 @@ -# Test Job for running a vacuum immediately -# The command should match that of 'pgsql-vacuum-cronjob' exactly -# To kickoff the job, run: -# oc create -f pgsql-vacuum-job.yaml -apiVersion: batch/v1 -kind: Job -metadata: - annotations: - cronjob.kubernetes.io/instantiate: manual - creationTimestamp: null - name: test-pgsql-vacuum - namespace: iqe-ibutsu-server -spec: - selector: {} - template: - metadata: - name: test-psql-vacuum - spec: - containers: - - name: postgresql-vacuum - image: postgresql:12 - command: - - 'bash' - - '-eo' - - 'pipefail' - - '-c' - - > - trap "echo 'VACUUM failed'; exit 0" ERR; - echo "Backing up database..."; - PGPASSWORD="$PGPASSWORD" psql --username=$PGUSER --host=$PGHOST --port=$PGPORT --dbname=$PGDATABASE --command 'VACUUM (verbose);'; - echo ""; - echo -n "VACUUM successful." - resources: - limits: - cpu: 250m - memory: 1Gi - requests: - cpu: 100m - memory: 512Mi - env: - - name: PGHOST - value: postgresql.iqe-ibutsu-server.svc - - name: PGPORT - value: "5432" - - name: PGUSER - valueFrom: - secretKeyRef: - key: database-user - name: postgresql - - name: PGPASSWORD - valueFrom: - secretKeyRef: - key: database-password - name: postgresql - - name: PGDATABASE - valueFrom: - secretKeyRef: - key: database-name - name: postgresql - restartPolicy: Never