diff --git a/Dockerfile.activator b/Dockerfile.activator index 24b50a66..049f5591 100644 --- a/Dockerfile.activator +++ b/Dockerfile.activator @@ -3,13 +3,8 @@ FROM ghcr.io/lsst-dm/prompt-proto-base:${BASE_TAG} ENV PYTHONUNBUFFERED True ENV APP_HOME /app ENV PROMPT_PROTOTYPE_DIR $APP_HOME -ARG RUBIN_INSTRUMENT ARG PUBSUB_VERIFICATION_TOKEN ARG PORT -ARG CALIB_REPO -ARG IMAGE_BUCKET -ARG IMAGE_TIMEOUT -ARG BUCKET_TOPIC WORKDIR $APP_HOME COPY python/activator activator/ COPY pipelines pipelines/ diff --git a/doc/playbook.rst b/doc/playbook.rst index daf0d196..a2e199f3 100644 --- a/doc/playbook.rst +++ b/doc/playbook.rst @@ -154,7 +154,7 @@ To create or edit the Cloud Run service in the Google Cloud Console: * There are also five optional parameters: - * IMAGE_TIMEOUT: timeout in seconds to wait for raw image, default 50 sec. + * IMAGE_TIMEOUT: timeout in seconds to wait after expected script completion for raw image arrival, default 20 sec. * LOCAL_REPOS: absolute path (in the container) where local repos are created, default ``/tmp``. * USER_APDB: database user for the APDB, default "postgres" * USER_REGISTRY: database user for the registry database, default "postgres" @@ -228,7 +228,7 @@ It includes the following required environment variables: The following environment variables are optional: -* IMAGE_TIMEOUT: timeout in seconds to wait for raw image, default 50 sec. +* IMAGE_TIMEOUT: timeout in seconds to wait after expected script completion for raw image arrival, default 20 sec. * LOCAL_REPOS: absolute path (in the container) where local repos are created, default ``/tmp``. * USER_APDB: database user for the APDB, default "postgres" * USER_REGISTRY: database user for the registry database, default "postgres" @@ -310,15 +310,7 @@ tester ``python/tester/upload.py`` and ``python/tester/upload_hsc_rc2.py`` are scripts that simulate the CCS image writer. It can be run from ``rubin-devl``, but requires the user to install the ``confluent_kafka`` package in their environment. -You must have a profile set up for the ``rubin-pp`` bucket (see `Buckets`_, above), and must set the ``KAFKA_CLUSTER`` environment variable. -Run: - -.. code-block:: sh - - kubectl get service -n kafka prompt-processing-kafka-external-bootstrap - -and look up the ``EXTERNAL-IP``; set ``KAFKA_CLUSTER=:9094``. -The IP address is fixed, so you should only need to look it up once. +You must have a profile set up for the ``rubin-pp`` bucket (see `Buckets`_, above). Install the prototype code, and set it up before use: diff --git a/python/tester/upload.py b/python/tester/upload.py index f47d0780..64782f74 100644 --- a/python/tester/upload.py +++ b/python/tester/upload.py @@ -1,7 +1,6 @@ import dataclasses import itertools import logging -import os import random import re import sys @@ -31,9 +30,6 @@ class Instrument: EXPOSURE_INTERVAL = 18 SLEW_INTERVAL = 2 -# Kafka server -kafka_cluster = os.environ["KAFKA_CLUSTER"] - logging.basicConfig( format="{levelname} {asctime} {name} - {message}", diff --git a/python/tester/upload_hsc_rc2.py b/python/tester/upload_hsc_rc2.py index 47507a1b..29e2b865 100644 --- a/python/tester/upload_hsc_rc2.py +++ b/python/tester/upload_hsc_rc2.py @@ -20,7 +20,6 @@ # along with this program. If not, see . import logging -import os import random import sys import tempfile @@ -38,8 +37,6 @@ EXPOSURE_INTERVAL = 18 SLEW_INTERVAL = 2 -# Kafka server -kafka_cluster = os.environ["KAFKA_CLUSTER"] logging.basicConfig( format="{levelname} {asctime} {name} - {message}",