diff --git a/ads/opctl/backend/ads_ml_job.py b/ads/opctl/backend/ads_ml_job.py index 1c7b581e0..114c8592d 100644 --- a/ads/opctl/backend/ads_ml_job.py +++ b/ads/opctl/backend/ads_ml_job.py @@ -16,8 +16,6 @@ from distutils import dir_util from typing import Dict, Tuple, Union -from jinja2 import Environment, PackageLoader - from ads.common.auth import AuthContext, AuthType, create_signer from ads.common.oci_client import OCIClientFactory from ads.jobs import ( @@ -199,36 +197,6 @@ def run(self) -> Dict: print("JOB RUN OCID:", run_id) return {"job_id": job_id, "run_id": run_id} - def init_operator(self): - # TODO: check if folder is empty, check for force overwrite - # TODO: check that command is being run from advanced-ds repo (important until ads released) - - operator_folder = self.config["execution"].get("operator_folder_path") - os.makedirs(operator_folder, exist_ok=True) - - operator_folder_name = os.path.basename(os.path.normpath(operator_folder)) - docker_tag = f"{os.path.join(self.config['infrastructure'].get('docker_registry'), operator_folder_name)}:latest" - - self.config["execution"]["operator_folder_name"] = operator_folder_name - self.config["execution"]["docker_tag"] = docker_tag - - operator_slug = self.config["execution"].get("operator_slug") - self._jinja_write(operator_slug, operator_folder) - - # DONE - print( - "\nInitialization Successful.\n" - f"All code should be written in main.py located at: {os.path.join(operator_folder, 'main.py')}\n" - f"Additional libraries should be added to environment.yaml located at: {os.path.join(operator_folder, 'environment.yaml')}\n" - "Any changes to main.py will require re-building the docker image, whereas changes to args in the" - " runtime section of the yaml file do not. Write accordingly.\n" - "Run this cluster with:\n" - f"\tdocker build -t {docker_tag} -f {os.path.join(operator_folder, 'Dockerfile')} .\n" - f"\tads opctl publish-image {docker_tag} \n" - f"\tads opctl run -f {os.path.join(operator_folder, operator_slug + '.yaml')} \n" - ) - return operator_folder - def delete(self): """ Delete Job or Job Run from OCID. @@ -264,25 +232,6 @@ def watch(self): run = DataScienceJobRun.from_ocid(run_id) run.watch(interval=interval, wait=wait) - def _jinja_write(self, operator_slug, operator_folder): - # TODO AH: fill in templates with relevant details - env = Environment( - loader=PackageLoader("ads", f"opctl/operators/{operator_slug}") - ) - - for setup_file in [ - "Dockerfile", - "environment.yaml", - "main.py", - "run.py", - "start_scheduler.sh", - "start_worker.sh", - "dask_cluster.yaml", - ]: - template = env.get_template(setup_file + ".jinja2") - with open(os.path.join(operator_folder, setup_file), "w") as ff: - ff.write(template.render(config=self.config)) - def _create_payload(self, infra=None, name=None) -> Job: if not infra: infra = self.config.get("infrastructure", {}) diff --git a/ads/opctl/cli.py b/ads/opctl/cli.py index 28836edd7..744a0dbd6 100644 --- a/ads/opctl/cli.py +++ b/ads/opctl/cli.py @@ -24,7 +24,6 @@ from ads.opctl.cmds import deactivate as deactivate_cmd from ads.opctl.cmds import delete as delete_cmd from ads.opctl.cmds import init as init_cmd -from ads.opctl.cmds import init_operator as init_operator_cmd from ads.opctl.cmds import init_vscode as init_vscode_cmd from ads.opctl.cmds import predict as predict_cmd from ads.opctl.cmds import run as run_cmd @@ -54,11 +53,12 @@ def commands(): @click.help_option("--help", "-h") @click.option("--debug", "-d", help="set debug mode", is_flag=True, default=False) def configure(debug): + """Sets up the initial configurations for the ADS OPCTL.""" suppress_traceback(debug)(configure_cmd)() @commands.command() -@click.argument("image-type", type=click.Choice(["job-local", "ads-ops-base"])) +@click.argument("image-type", type=click.Choice(["job-local"])) @click.help_option("--help", "-h") @click.option( "--gpu", @@ -68,23 +68,10 @@ def configure(debug): default=False, required=False, ) -@click.option( - "--source-folder", - "-s", - help="when building custom operator image, source folder of the custom operator", - default=None, - required=False, -) -@click.option( - "--image", - "-i", - help="image name, used when building custom image", - default=None, - required=False, -) @click.option("--debug", "-d", help="set debug mode", is_flag=True, default=False) -def build_image(image_type, gpu, source_folder, image, debug): - suppress_traceback(debug)(build_image_cmd)(image_type, gpu, source_folder, image) +def build_image(image_type, gpu, debug): + """Builds the local Data Science Jobs image.""" + suppress_traceback(debug)(build_image_cmd)(image_type, gpu) @commands.command() @@ -101,6 +88,7 @@ def build_image(image_type, gpu, source_folder, image, debug): @click.help_option("--help", "-h") @click.option("--debug", "-d", help="set debug mode", is_flag=True, default=False) def publish_image(**kwargs): + """Publishes image to the OCI Container Registry.""" debug = kwargs.pop("debug") if kwargs.get("registry", None): registry = kwargs["registry"] @@ -449,30 +437,8 @@ def check(file, **kwargs): suppress_traceback(debug)(run_diagnostics_cmd)(config, **kwargs) -@commands.command() -@click.argument("operator_slug", nargs=1) -@click.option( - "--folder_path", - "-fp", - help="the name of the folder wherein to put the operator code", - multiple=True, - required=False, - default=None, -) -@add_options(_options) -def init_operator(**kwargs): - suppress_traceback(kwargs["debug"])(init_operator_cmd)(**kwargs) - - @commands.command() @click.argument("ocid", nargs=1) -@add_options(_model_deployment_options) -@click.option( - "--conda-pack-folder", - required=False, - default=None, - help="folder where conda packs are saved", -) @click.option( "--auth", "-a", @@ -487,6 +453,7 @@ def init_operator(**kwargs): ) @click.option("--debug", "-d", help="set debug mode", is_flag=True, default=False) def delete(**kwargs): + """Deletes a data science service resource.""" suppress_traceback(kwargs["debug"])(delete_cmd)(**kwargs) @@ -513,6 +480,7 @@ def delete(**kwargs): ) @click.option("--debug", "-d", help="set debug mode", is_flag=True, default=False) def cancel(**kwargs): + """Aborts the execution of the OCI resource run.""" suppress_traceback(kwargs["debug"])(cancel_cmd)(**kwargs) @@ -566,7 +534,7 @@ def cancel(**kwargs): @click.option("--debug", "-d", help="set debug mode", is_flag=True, default=False) def watch(**kwargs): """ - ``tail`` logs form a job run, dataflow run or pipeline run. + Tails the logs form a job run, data flow run or pipeline run. Connects to the logging service that was configured with the JobRun, Application Run or Pipeline Run and streams the logs. """ suppress_traceback(kwargs["debug"])(watch_cmd)(**kwargs) @@ -575,13 +543,6 @@ def watch(**kwargs): @commands.command() @click.argument("ocid", nargs=1) @click.option("--debug", "-d", help="Set debug mode", is_flag=True, default=False) -@add_options(_model_deployment_options) -@click.option( - "--conda-pack-folder", - required=False, - default=None, - help="folder where conda packs are saved", -) @click.option( "--auth", "-a", @@ -597,7 +558,7 @@ def watch(**kwargs): @click.option("--debug", "-d", help="set debug mode", is_flag=True, default=False) def activate(**kwargs): """ - Activates a data science service. + Activates a data science service resource. """ suppress_traceback(kwargs["debug"])(activate_cmd)(**kwargs) @@ -627,7 +588,7 @@ def activate(**kwargs): @click.option("--debug", "-d", help="set debug mode", is_flag=True, default=False) def deactivate(**kwargs): """ - Deactivates a data science service. + Deactivates a data science service resource. """ suppress_traceback(kwargs["debug"])(deactivate_cmd)(**kwargs) diff --git a/ads/opctl/cmds.py b/ads/opctl/cmds.py index 71040933d..a0f395e54 100644 --- a/ads/opctl/cmds.py +++ b/ads/opctl/cmds.py @@ -332,31 +332,6 @@ def _update_env_vars(config, env_vars: List): return config -def init_operator(**kwargs) -> str: - """ - Initialize the resources for an operator - - Parameters - ---------- - kwargs: dict - keyword argument, stores command line args - Returns - ------- - folder_path: str - a path to the folder with all of the resources - """ - # TODO: confirm that operator slug is in the set of valid operator slugs - assert kwargs["operator_slug"] == "dask_cluster" - - if kwargs.get("folder_path"): - kwargs["operator_folder_path"] = kwargs.pop("folder_path")[0] - else: - kwargs["operator_folder_path"] = kwargs["operator_slug"] - p = ConfigProcessor().step(ConfigMerger, **kwargs) - print(f"config check: {p.config}") - return _BackendFactory(p.config).backend.init_operator() - - def delete(**kwargs) -> None: """ Delete a MLJob/DataFlow run. diff --git a/ads/opctl/conda/cli.py b/ads/opctl/conda/cli.py index b08d9cb08..aac63eb51 100644 --- a/ads/opctl/conda/cli.py +++ b/ads/opctl/conda/cli.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8; -*- -# Copyright (c) 2022 Oracle and/or its affiliates. +# Copyright (c) 2022, 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ import click @@ -17,6 +17,7 @@ @click.group("conda") @click.help_option("--help", "-h") def commands(): + "The CLI to assist in the management of conda environments." pass diff --git a/ads/opctl/constants.py b/ads/opctl/constants.py index 88e300b5e..0e62a3e0a 100644 --- a/ads/opctl/constants.py +++ b/ads/opctl/constants.py @@ -12,10 +12,8 @@ DEFAULT_MODEL_FOLDER = "~/.ads_ops/models" CONDA_PACK_OS_PREFIX_FORMAT = "oci://@/" DEFAULT_ADS_CONFIG_FOLDER = "~/.ads_ops" -OPS_IMAGE_BASE = "ads-operators-base" ML_JOB_IMAGE = "ml-job" ML_JOB_GPU_IMAGE = "ml-job-gpu" -OPS_IMAGE_GPU_BASE = "ads-operators-gpu-base" DEFAULT_MANIFEST_VERSION = "1.0" ADS_CONFIG_FILE_NAME = "config.ini" ADS_JOBS_CONFIG_FILE_NAME = "ml_job_config.ini" diff --git a/ads/opctl/distributed/cli.py b/ads/opctl/distributed/cli.py index cee55287a..7297a5a15 100644 --- a/ads/opctl/distributed/cli.py +++ b/ads/opctl/distributed/cli.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8; -*- -# Copyright (c) 2022 Oracle and/or its affiliates. +# Copyright (c) 2022, 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ import os @@ -21,6 +21,7 @@ @click.group("distributed-training") @click.help_option("--help", "-h") def commands(): + "The CLI to assist in the management of the distributed training." pass diff --git a/ads/opctl/docker/Dockerfile b/ads/opctl/docker/Dockerfile deleted file mode 100644 index c91343106..000000000 --- a/ads/opctl/docker/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright (c) 2021 Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ - -FROM ghcr.io/oracle/oraclelinux7-instantclient:19 AS base - -RUN rm -rf /var/cache/yum/* && yum clean all && yum install -y gcc mesa-libGL vim && rm -rf /var/cache/yum/* -RUN curl -L https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh >> miniconda.sh -RUN bash ./miniconda.sh -b -p /miniconda; rm ./miniconda.sh; -ENV PATH="/miniconda/bin:$PATH" - -ENV HOME /home/datascience -RUN mkdir -p /etc/datascience -WORKDIR /etc/datascience - -COPY operators/environment.yaml operators/environment.yaml -RUN conda env create -f operators/environment.yaml --name op_env && conda clean -afy -ENV PATH="/miniconda/envs/op_env/bin:$PATH" - -RUN /bin/bash -c "source activate op_env" -COPY operators/run.py operators/run.py -CMD bash - -FROM base -COPY docker/merge_dependencies.py merge_dependencies.py -COPY operators/ operators/ - -RUN pip install pyyaml click && python merge_dependencies.py environment.yaml -RUN conda env update -f environment.yaml --name op_env && conda clean -afy -RUN source activate op_env diff --git a/ads/opctl/docker/Dockerfile.gpu b/ads/opctl/docker/Dockerfile.gpu deleted file mode 100644 index 86b3560dd..000000000 --- a/ads/opctl/docker/Dockerfile.gpu +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2021 Oracle and/or its affiliates. -# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ - -FROM ghcr.io/oracle/oraclelinux7-instantclient:19 AS base - -RUN yum install -y tar gzip - -########################### CUDA INSTALLATION ######################################## - -#Reference: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/centos7/10.1/runtime/cudnn7/Dockerfile -#Reference: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/centos7/10.1/runtime/Dockerfile -#Reference: https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/centos7/10.1/base/Dockerfile - -RUN NVIDIA_GPGKEY_SUM=d1be581509378368edeec8c1eb2958702feedf3bc3d17011adbf24efacce4ab5 && \ -curl -fsSL https://developer.download.nvidia.com/compute/cuda/repos/rhel7/x86_64/7fa2af80.pub | sed '/^Version/d' > /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA && \ - echo "$NVIDIA_GPGKEY_SUM /etc/pki/rpm-gpg/RPM-GPG-KEY-NVIDIA" | sha256sum -c --strict - - -COPY docker/cuda.repo /etc/yum.repos.d/cuda.repo - -ENV CUDA_VERSION 10.1.243 - -ENV CUDA_PKG_VERSION 10-1-$CUDA_VERSION-1 -# For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a -RUN yum install -y \ -cuda-cudart-$CUDA_PKG_VERSION \ -cuda-compat-10-1 \ -&& \ - ln -s cuda-10.1 /usr/local/cuda && \ - rm -rf /var/cache/yum/* - -# nvidia-docker 1.0 -RUN echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \ - echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf - -ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} -ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64 - -# nvidia-container-runtime -ENV NVIDIA_VISIBLE_DEVICES all -ENV NVIDIA_DRIVER_CAPABILITIES compute,utility -ENV NVIDIA_REQUIRE_CUDA "cuda>=10.1 brand=tesla,driver>=384,driver<385 brand=tesla,driver>=396,driver<397 brand=tesla,driver>=410,driver<411" - -ENV CUDNN_VERSION 7.6.5.32 -LABEL com.nvidia.cudnn.version="${CUDNN_VERSION}" - -RUN CUDNN_DOWNLOAD_SUM=7eaec8039a2c30ab0bc758d303588767693def6bf49b22485a2c00bf2e136cb3 && \ - curl -fsSL http://developer.download.nvidia.com/compute/redist/cudnn/v7.6.5/cudnn-10.1-linux-x64-v7.6.5.32.tgz -O && \ - echo "$CUDNN_DOWNLOAD_SUM cudnn-10.1-linux-x64-v7.6.5.32.tgz" | sha256sum -c - -RUN ls -ltr -RUN tar --no-same-owner -xzf cudnn-10.1-linux-x64-v7.6.5.32.tgz -C /usr/local --wildcards 'cuda/lib64/libcudnn.so.*' && \ - rm cudnn-10.1-linux-x64-v7.6.5.32.tgz && \ - ldconfig -##############################################CUDA END######################## - -RUN rm -rf /var/cache/yum/* && yum clean all && yum install -y gcc mesa-libGL vim && rm -rf /var/cache/yum/* -RUN curl -L https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh >> miniconda.sh -RUN bash ./miniconda.sh -b -p /miniconda; rm ./miniconda.sh; -ENV PATH="/miniconda/bin:$PATH" - -ENV HOME /home/datascience -RUN mkdir -p /etc/datascience -WORKDIR /etc/datascience - -COPY operators/environment.yaml operators/environment.yaml -RUN conda env create -f operators/environment.yaml --name op_env && conda clean -afy -ENV PATH="/miniconda/envs/op_env/bin:$PATH" - -RUN /bin/bash -c "source activate op_env" -COPY operators/run.py operators/run.py -CMD bash - -FROM base -COPY docker/merge_dependencies.py merge_dependencies.py -COPY operators/ operators/ - -RUN pip install pyyaml click && python merge_dependencies.py environment.yaml -RUN conda env update -f environment.yaml --name op_env && conda clean -afy -RUN source activate op_env diff --git a/ads/opctl/model/cli.py b/ads/opctl/model/cli.py index 4588d4ba9..8d4875b4c 100644 --- a/ads/opctl/model/cli.py +++ b/ads/opctl/model/cli.py @@ -14,6 +14,7 @@ @click.group("model") @click.help_option("--help", "-h") def commands(): + "The CLI to assist in the management of the Data Science Model Deployment." pass diff --git a/ads/opctl/operator/cli.py b/ads/opctl/operator/cli.py index 5546b12b7..deda800f8 100644 --- a/ads/opctl/operator/cli.py +++ b/ads/opctl/operator/cli.py @@ -30,6 +30,7 @@ @click.group("operator") @click.help_option("--help", "-h") def commands(): + "The CLI to assist in the management of the ADS operators." pass @@ -113,7 +114,7 @@ def init(debug: bool, **kwargs: Dict[str, Any]) -> None: default=False, ) def build_image(debug: bool, **kwargs: Dict[str, Any]) -> None: - """Builds a new image for the particular operator.""" + """Creates a new image for the specified operator.""" suppress_traceback(debug)(cmd_build_image)(**kwargs) @@ -257,7 +258,7 @@ def verify(debug: bool, **kwargs: Dict[str, Any]) -> None: default=None, ) def build_conda(debug: bool, **kwargs: Dict[str, Any]) -> None: - """Builds a new conda environment for the particular operator.""" + """Creates a new conda environment for the specified operator.""" suppress_traceback(debug)(cmd_build_conda)(**kwargs) diff --git a/ads/opctl/spark/cli.py b/ads/opctl/spark/cli.py index f63b0be6b..9d571e997 100644 --- a/ads/opctl/spark/cli.py +++ b/ads/opctl/spark/cli.py @@ -15,6 +15,7 @@ @click.group("spark") @click.help_option("--help", "-h") def commands(): + "The CLI to assist in the management of the Spark workloads." pass diff --git a/ads/opctl/utils.py b/ads/opctl/utils.py index c8ff47a22..fb6804780 100644 --- a/ads/opctl/utils.py +++ b/ads/opctl/utils.py @@ -11,9 +11,7 @@ import subprocess import sys import shlex -import tempfile import urllib.parse -from distutils import dir_util from subprocess import Popen, PIPE, STDOUT from typing import Union, List, Tuple, Dict import yaml @@ -23,9 +21,7 @@ from ads.opctl import logger from ads.opctl.constants import ( ML_JOB_IMAGE, - OPS_IMAGE_BASE, ML_JOB_GPU_IMAGE, - OPS_IMAGE_GPU_BASE, ) from ads.common.decorator.runtime_dependency import ( runtime_dependency, @@ -96,12 +92,6 @@ def get_region_key(auth: dict) -> str: return client.get_tenancy(tenancy).data.home_region_key -# Not needed at the moment -# def _get_compartment_name(compartment_id: str, auth: dict) -> str: -# client = OCIClientFactory(**auth).identity -# return client.get_compartment(compartment_id=compartment_id).data.name - - def publish_image(image: str, registry: str = None) -> None: # pragma: no cover """ Publish an image. @@ -131,22 +121,18 @@ def publish_image(image: str, registry: str = None) -> None: # pragma: no cover return f"{registry}/{os.path.basename(image)}" -def build_image( - image_type: str, gpu: bool = False, source_folder: str = None, dst_image: str = None -) -> None: +def build_image(image_type: str, gpu: bool = False) -> None: """ Build an image for opctl. Parameters ---------- image_type: str - specify the image to build, can take 'job-local' or 'ads-ops-base', + specify the image to build, can take 'job-local', former for running job with conda pack locally, latter for running operators gpu: bool whether to use gpu version of image - source_folder: str - source folder when building custom operator, to be included in custom image dst_image: str image to save as when building custom operator @@ -155,35 +141,29 @@ def build_image( None """ curr_dir = os.path.dirname(os.path.abspath(__file__)) - if image_type == "ads-ops-custom": - if not source_folder or not dst_image: - raise ValueError( - "Please provide both source_folder and image_name to build a image for custom operator." - ) - proc = _build_custom_operator_image(gpu, source_folder, dst_image) - else: - image, dockerfile, target = _get_image_name_dockerfile_target(image_type, gpu) - command = [ - "docker", - "build", - "-t", - image, - "-f", - os.path.join(curr_dir, "docker", dockerfile), - ] - if target: - command += ["--target", target] - if os.environ.get("no_proxy"): - command += ["--build-arg", f"no_proxy={os.environ['no_proxy']}"] - if os.environ.get("http_proxy"): - command += ["--build-arg", f"http_proxy={os.environ['http_proxy']}"] - if os.environ.get("https_proxy"): - command += ["--build-arg", f"https_proxy={os.environ['https_proxy']}"] - if os.environ.get(CONTAINER_NETWORK): - command += ["--network", os.environ[CONTAINER_NETWORK]] - command += [os.path.abspath(curr_dir)] - logger.info("Build image with command %s", command) - proc = run_command(command) + image, dockerfile, target = _get_image_name_dockerfile_target(image_type, gpu) + command = [ + "docker", + "build", + "-t", + image, + "-f", + os.path.join(curr_dir, "docker", dockerfile), + ] + if target: + command += ["--target", target] + if os.environ.get("no_proxy"): + command += ["--build-arg", f"no_proxy={os.environ['no_proxy']}"] + if os.environ.get("http_proxy"): + command += ["--build-arg", f"http_proxy={os.environ['http_proxy']}"] + if os.environ.get("https_proxy"): + command += ["--build-arg", f"https_proxy={os.environ['https_proxy']}"] + if os.environ.get(CONTAINER_NETWORK): + command += ["--network", os.environ[CONTAINER_NETWORK]] + command += [os.path.abspath(curr_dir)] + logger.info("Build image with command %s", command) + proc = run_command(command) + if proc.returncode != 0: raise RuntimeError("Docker build failed.") @@ -192,46 +172,10 @@ def _get_image_name_dockerfile_target(type: str, gpu: bool) -> str: look_up = { ("job-local", False): (ML_JOB_IMAGE, "Dockerfile.job", None), ("job-local", True): (ML_JOB_GPU_IMAGE, "Dockerfile.job.gpu", None), - ("ads-ops-base", False): (OPS_IMAGE_BASE, "Dockerfile", "base"), - ("ads-ops-base", True): (OPS_IMAGE_GPU_BASE, "Dockerfile.gpu", "base"), } return look_up[(type, gpu)] -@runtime_dependency(module="docker", install_from=OptionalDependency.OPCTL) -def _build_custom_operator_image( - gpu: bool, source_folder: str, dst_image: str -) -> None: # pragma: no cover - operator = os.path.basename(source_folder) - base_image_name = OPS_IMAGE_BASE if not gpu else OPS_IMAGE_GPU_BASE - try: - client = docker.from_env() - client.api.inspect_image(base_image_name) - except docker.errors.ImageNotFound: - build_image("ads-ops-base", gpu) - with tempfile.TemporaryDirectory() as td: - dir_util.copy_tree(source_folder, os.path.join(td, operator)) - if os.path.exists(os.path.join(td, operator, "environment.yaml")): - with open(os.path.join(td, "Dockerfile"), "w") as f: - f.write( - f""" -FROM {base_image_name} -COPY ./{operator}/environment.yaml operators/{operator}/environment.yaml -RUN conda env update -f operators/{operator}/environment.yaml --name op_env && conda clean -afy -COPY ./{operator} operators/{operator} - """ - ) - else: - with open(os.path.join(td, "Dockerfile"), "w") as f: - f.write( - f""" -FROM {base_image_name} -COPY ./{operator} operators/{operator} - """ - ) - return run_command(["docker", "build", "-t", f"{dst_image}", "."], td) - - def run_command( cmd: Union[str, List[str]], cwd: str = None, shell: bool = False ) -> Popen: