From 1c2f042506427d74ce2bc803945a126f15afa1ea Mon Sep 17 00:00:00 2001 From: Ping-Lin Chang Date: Mon, 7 Apr 2025 18:48:22 +0100 Subject: [PATCH] feat(cli): enhance CLI functionality and add Docker support This commit introduces several enhancements to the CLI tool, including: - Added functions for checking required fields in model configurations and preparing build environments. - Implemented support for processing ARM64-specific packages and building Docker images with appropriate configurations. - Introduced new command-line arguments for editable project installations and improved error handling. - Created Dockerfiles for standard and ARM architectures, replacing the deprecated init-templates Dockerfile. Additionally, the commit updates the input validation for chat and completion tasks in the ray_io module, ensuring required fields are checked and defaults are overridden as necessary. --- instill/helpers/cli.py | 302 ++++++++++++++++----- instill/helpers/docker/Dockerfile | 40 +++ instill/helpers/docker/Dockerfile.vllm.arm | 92 +++++++ instill/helpers/init-templates/Dockerfile | 31 --- instill/helpers/ray_io.py | 86 +++++- 5 files changed, 438 insertions(+), 113 deletions(-) create mode 100644 instill/helpers/docker/Dockerfile create mode 100644 instill/helpers/docker/Dockerfile.vllm.arm delete mode 100644 instill/helpers/init-templates/Dockerfile diff --git a/instill/helpers/cli.py b/instill/helpers/cli.py index f0495da..201af59 100644 --- a/instill/helpers/cli.py +++ b/instill/helpers/cli.py @@ -24,6 +24,7 @@ def config_check_required_fields(c): + """Check if required fields are present in the model configuration.""" if "build" not in c or c["build"] is None: raise ModelConfigException("build") if "gpu" not in c["build"] or c["build"]["gpu"] is None: @@ -33,6 +34,7 @@ def config_check_required_fields(c): def cli(): + """Command line interface for the Instill CLI tool.""" if platform.machine() in ("i386", "AMD64", "x86_64"): default_platform = "amd64" else: @@ -81,6 +83,13 @@ def cli(): default=None, required=False, ) + build_parser.add_argument( + "-e", + "--editable-project", + help="path to local Python project to install in editable mode (overrides --sdk-wheel if both are specified)", + default=None, + required=False, + ) # push push_parser = subcommands.add_parser("push", help="Push model image") @@ -145,6 +154,7 @@ def cli(): def init(_): + """Initialize a new model directory with template files.""" shutil.copyfile( __file__.replace("cli.py", "init-templates/instill.yaml"), f"{os.getcwd()}/instill.yaml", @@ -159,7 +169,154 @@ def init(_): ) +def find_project_root(start_path): + """Find the Python project root by looking for setup.py or pyproject.toml""" + current_path = os.path.abspath(start_path) + while current_path != "/": + if os.path.exists(os.path.join(current_path, "setup.py")) or os.path.exists( + os.path.join(current_path, "pyproject.toml") + ): + return current_path + current_path = os.path.dirname(current_path) + return None + + +def is_vllm_version_compatible(version_parts): + """Check if vLLM version meets minimum requirements (v0.6.5)""" + return not ( + version_parts[0] < 0 + or (version_parts[0] == 0 and version_parts[1] < 6) + or (version_parts[0] == 0 and version_parts[1] == 6 and version_parts[2] < 5) + ) + + +def prepare_build_environment(build_params): + """Prepare environment variables and settings for the build process.""" + python_version = build_params["python_version"].replace(".", "") + ray_version = ray.__version__ + instill_sdk_version = instill.__version__ + + # Determine CUDA suffix + if not build_params["gpu"]: + cuda_suffix = "" + elif "cuda_version" in build_params and not build_params["cuda_version"] is None: + cuda_suffix = f'-cu{build_params["cuda_version"].replace(".", "")}' + else: + cuda_suffix = "-gpu" + + # Prepare system packages + system_pkg_list = [] + if ( + "system_packages" in build_params + and not build_params["system_packages"] is None + ): + system_pkg_list.extend(build_params["system_packages"]) + system_pkg_str = " ".join(system_pkg_list) + + # Prepare Python packages + python_pkg_list = [] + if ( + "python_packages" in build_params + and build_params["python_packages"] is not None + ): + python_pkg_list.extend(build_params["python_packages"]) + python_pkg_list.extend(DEFAULT_DEPENDENCIES) + + return ( + python_version, + ray_version, + instill_sdk_version, + cuda_suffix, + system_pkg_str, + python_pkg_list, + ) + + +def process_arm64_packages(python_pkg_list, target_arch): + """Process packages for ARM64 architecture, handling vLLM and other dependencies.""" + dockerfile = "Dockerfile" + vllm_version = None + + if target_arch == "arm64": + filtered_pkg_list = [] + for pkg in python_pkg_list: + if pkg.startswith("vllm"): + # Transform version string from "0.6.4.post1" to "v0.6.4" + version = pkg.split("==")[1] + vllm_version = f"v{version.split('.post')[0]}" + # Check if version is at least v0.6.5 + base_version = version.split(".post")[0] + version_parts = [int(x) for x in base_version.split(".")] + if not is_vllm_version_compatible(version_parts): + raise ValueError( + f"[Instill] vLLM version must be at least v0.6.5, got {vllm_version}" + ) + elif pkg.startswith("bitsandbytes"): + raise ValueError( + "[Instill] bitsandbytes is not supported on ARM architecture" + ) + else: + filtered_pkg_list.append(pkg) + + python_pkg_list = filtered_pkg_list + if vllm_version is not None: + dockerfile = "Dockerfile.vllm.arm" + + python_pkg_str = " ".join(python_pkg_list) + target_arch_suffix = "-aarch64" if target_arch == "arm64" else "" + + return dockerfile, vllm_version, python_pkg_str, target_arch_suffix, python_pkg_list + + +def prepare_build_command(args, tmpdir, dockerfile, build_vars): + """Prepare the Docker build command with all necessary arguments.""" + vllm_version, target_arch_suffix, ray_version, python_version = build_vars[:4] + cuda_suffix, python_pkg_str, system_pkg_str, instill_sdk_version = build_vars[4:8] + instill_sdk_project_name = build_vars[8] + + command = [ + "docker", + "buildx", + "build", + "--progress=plain", + "--file", + f"{tmpdir}/{dockerfile}", + "--build-arg", + f"VLLM_VERSION={vllm_version}", + "--build-arg", + f"TARGET_ARCH_SUFFIX={target_arch_suffix}", + "--build-arg", + f"RAY_VERSION={ray_version}", + "--build-arg", + f"PYTHON_VERSION={python_version}", + "--build-arg", + f"CUDA_SUFFIX={cuda_suffix}", + "--build-arg", + f"PYTHON_PACKAGES={python_pkg_str}", + "--build-arg", + f"SYSTEM_PACKAGES={system_pkg_str}", + "--build-arg", + f"INSTILL_SDK_VERSION={instill_sdk_version}", + "--build-arg", + ( + f"INSTILL_SDK_PROJECT_NAME={instill_sdk_project_name}" + if instill_sdk_project_name + else "" + ), + "--platform", + f"linux/{args.target_arch}", + "-t", + f"{args.name}:{args.tag}", + tmpdir, + "--load", + ] + if args.no_cache: + command.append("--no-cache") + return command + + def build(args): + """Build a Docker image for the model with specified configuration.""" try: Logger.i("[Instill] Loading config file...") with open("instill.yaml", "r", encoding="utf8") as f: @@ -167,90 +324,77 @@ def build(args): config = yaml.safe_load(f) config_check_required_fields(config) - build_params = config["build"] - python_version = build_params["python_version"].replace(".", "") - ray_version = ray.__version__ - instill_version = instill.__version__ - - if not build_params["gpu"]: - cuda_suffix = "" - elif ( - "cuda_version" in build_params and not build_params["cuda_version"] is None - ): - cuda_suffix = f'-cu{build_params["cuda_version"].replace(".", "")}' - else: - cuda_suffix = "-gpu" - - system_pkg_list = [] - if ( - "system_packages" in build_params - and not build_params["system_packages"] is None - ): - system_pkg_list.extend(build_params["system_packages"]) - system_pkg_str = " ".join(system_pkg_list) - - python_pkg_list = [] - if ( - "python_packages" in build_params - and not build_params["python_packages"] is None - ): - python_pkg_list.extend(build_params["python_packages"]) - python_pkg_list.extend(DEFAULT_DEPENDENCIES) - python_pkg_str = " ".join(python_pkg_list) + # Prepare build environment + ( + python_version, + ray_version, + instill_sdk_version, + cuda_suffix, + system_pkg_str, + python_pkg_list, + ) = prepare_build_environment(build_params) + + # Process ARM64-specific packages + ( + dockerfile, + vllm_version, + python_pkg_str, + target_arch_suffix, + python_pkg_list, + ) = process_arm64_packages(python_pkg_list, args.target_arch) with tempfile.TemporaryDirectory() as tmpdir: + # Copy files to tmpdir shutil.copyfile( - __file__.replace("cli.py", "init-templates/Dockerfile"), - f"{tmpdir}/Dockerfile", + __file__.replace("cli.py", f"docker/{dockerfile}"), + f"{tmpdir}/{dockerfile}", ) shutil.copytree(os.getcwd(), tmpdir, dirs_exist_ok=True) + # Handle SDK wheel if provided if args.sdk_wheel is not None: shutil.copyfile( args.sdk_wheel, - f"{tmpdir}/instill_sdk-{instill_version}dev-py3-none-any.whl", + f"{tmpdir}/instill_sdk-{instill_sdk_version}dev-py3-none-any.whl", ) - target_arch_suffix = "-aarch64" if args.target_arch == "arm64" else "" + # Handle editable project installation + instill_sdk_project_name = None + if args.editable_project: + project_root = find_project_root(args.editable_project) + if project_root is None: + raise FileNotFoundError( + "[Instill] No Python project found at the specified path (missing setup.py or pyproject.toml)" + ) + instill_sdk_project_name = os.path.basename(project_root) + Logger.i(f"[Instill] Found Python project: {instill_sdk_project_name}") + shutil.copytree( + project_root, + f"{tmpdir}/{instill_sdk_project_name}", + dirs_exist_ok=True, + ) Logger.i("[Instill] Building model image...") - command = [ - "docker", - "buildx", - "build", - "--build-arg", - f"TARGET_ARCH_SUFFIX={target_arch_suffix}", - "--build-arg", - f"RAY_VERSION={ray_version}", - "--build-arg", - f"PYTHON_VERSION={python_version}", - "--build-arg", - f"CUDA_SUFFIX={cuda_suffix}", - "--build-arg", - f"PACKAGES={python_pkg_str}", - "--build-arg", - f"SYSTEM_PACKAGES={system_pkg_str}", - "--build-arg", - f"SDK_VERSION={instill_version}", - "--platform", - f"linux/{args.target_arch}", - "-t", - f"{args.name}:{args.tag}", - tmpdir, - "--load", + build_vars = [ + vllm_version, + target_arch_suffix, + ray_version, + python_version, + cuda_suffix, + python_pkg_str, + system_pkg_str, + instill_sdk_version, + instill_sdk_project_name, ] - if args.no_cache: - command.append("--no-cache") - subprocess.run( - command, - check=True, - ) + command = prepare_build_command(args, tmpdir, dockerfile, build_vars) + + subprocess.run(command, check=True) Logger.i(f"[Instill] {args.name}:{args.tag} built") except subprocess.CalledProcessError: Logger.e("[Instill] Build failed") - except Exception as e: + except (ValueError, FileNotFoundError, OSError, IOError) as e: Logger.e("[Instill] Prepare failed") Logger.e(e) finally: @@ -258,33 +402,43 @@ def build(args): def push(args): + """Push a built model image to a Docker registry.""" + registry = args.url + tagged_image = f"{registry}/{args.name}:{args.tag}" try: - registry = args.url - + # Tag the image subprocess.run( [ "docker", "tag", f"{args.name}:{args.tag}", - f"{registry}/{args.name}:{args.tag}", + tagged_image, ], check=True, ) Logger.i("[Instill] Pushing model image...") - subprocess.run( - ["docker", "push", f"{registry}/{args.name}:{args.tag}"], check=True - ) - Logger.i(f"[Instill] {registry}/{args.name}:{args.tag} pushed") + # Push the image + subprocess.run(["docker", "push", tagged_image], check=True) + Logger.i(f"[Instill] {tagged_image} pushed") except subprocess.CalledProcessError: Logger.e("[Instill] Push failed") - except Exception as e: + except (ConnectionError, OSError, IOError) as e: Logger.e("[Instill] Prepare failed") Logger.e(e) finally: + # Remove the tagged image regardless of success/failure + try: + subprocess.run( + ["docker", "rmi", tagged_image], + check=True, + ) + except subprocess.CalledProcessError: + Logger.e(f"[Instill] Failed to remove tagged image {tagged_image}") Logger.i("[Instill] Done") def run(args): + """Run inference on a model image.""" docker_run = False try: name = uuid.uuid4() @@ -359,7 +513,7 @@ def run(args): Logger.e("[Instill] Run failed") except subprocess.TimeoutExpired: Logger.e("[Instill] Deployment timeout") - except Exception as e: + except (RuntimeError, OSError, IOError) as e: Logger.e("[Instill] Prepare failed") Logger.e(e) finally: diff --git a/instill/helpers/docker/Dockerfile b/instill/helpers/docker/Dockerfile new file mode 100644 index 0000000..f40536b --- /dev/null +++ b/instill/helpers/docker/Dockerfile @@ -0,0 +1,40 @@ +# syntax=docker/dockerfile:1.7-labs +ARG RAY_VERSION +ARG PYTHON_VERSION +ARG CUDA_SUFFIX +ARG TARGET_ARCH_SUFFIX + +FROM rayproject/ray:${RAY_VERSION}-py${PYTHON_VERSION}${CUDA_SUFFIX}${TARGET_ARCH_SUFFIX} + +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + sudo apt-get update && \ + DEBIAN_FRONTEND=noninteractive sudo apt-get install -y tzdata curl vim && \ + sudo rm -rf /var/lib/apt/lists/* + +ARG SYSTEM_PACKAGES +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + sudo apt-get update && \ + for package in ${SYSTEM_PACKAGES}; do \ + DEBIAN_FRONTEND=noninteractive sudo apt-get install -y $package; \ + done && \ + sudo rm -rf /var/lib/apt/lists/* + +ARG PYTHON_PACKAGES +RUN --mount=type=cache,target=/root/.cache/pip,sharing=locked \ + for package in ${PYTHON_PACKAGES}; do \ + pip install --default-timeout=1000 $package; \ + done + +COPY --chown=ray:users --exclude=model.py . . + +ARG SDK_VERSION +RUN --mount=type=cache,target=/root/.cache/pip,sharing=locked \ + if [ ! -f instill_sdk-${SDK_VERSION}dev-py3-none-any.whl ]; then \ + pip install --default-timeout=1000 --no-cache-dir instill-sdk==${SDK_VERSION}; \ + else \ + pip install instill_sdk-${SDK_VERSION}dev-py3-none-any.whl; \ + fi + +COPY --chown=ray:users model.py _model.py diff --git a/instill/helpers/docker/Dockerfile.vllm.arm b/instill/helpers/docker/Dockerfile.vllm.arm new file mode 100644 index 0000000..2d2e1a7 --- /dev/null +++ b/instill/helpers/docker/Dockerfile.vllm.arm @@ -0,0 +1,92 @@ +# syntax=docker/dockerfile:1.7-labs + +##################################################################################################### +# This vLLM Dockerfile is used to construct an image that can build and run vLLM on ARM CPU platform. +##################################################################################################### +ARG RAY_VERSION +ARG PYTHON_VERSION + +FROM rayproject/ray:${RAY_VERSION}-py${PYTHON_VERSION}-aarch64 + +USER root + +ENV CCACHE_DIR=/root/.cache/ccache + +ENV CMAKE_CXX_COMPILER_LAUNCHER=ccache + +RUN --mount=type=cache,target=/var/cache/apt \ + apt-get update -y \ + && apt-get install -y curl ccache git wget vim numactl gcc-12 g++-12 python3 python3-pip libtcmalloc-minimal4 libnuma-dev \ + && apt-get install -y ffmpeg libsm6 libxext6 libgl1 \ + && update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-12 10 --slave /usr/bin/g++ g++ /usr/bin/g++-12 + +# tcmalloc provides better memory allocation efficiency, e.g., holding memory in caches to speed up access of commonly-used objects. +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install py-cpuinfo # Use this to gather CPU info and optimize based on ARM Neoverse cores + +# Set LD_PRELOAD for tcmalloc on ARM +ENV LD_PRELOAD="/usr/lib/aarch64-linux-gnu/libtcmalloc_minimal.so.4" + +RUN echo 'ulimit -c 0' >> ~/.bashrc + +WORKDIR /workspace + +# Clone vLLM repository (cache git objects) +ARG VLLM_VERSION +RUN git clone --branch ${VLLM_VERSION} https://github.com/vllm-project/vllm + +WORKDIR /workspace/vllm + +ARG PIP_EXTRA_INDEX_URL="https://download.pytorch.org/whl/cpu" +ENV PIP_EXTRA_INDEX_URL=${PIP_EXTRA_INDEX_URL} +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install --upgrade pip && \ + pip install -r requirements/build.txt + +RUN --mount=type=cache,target=/root/.cache/pip \ + pip install -v -r requirements/common.txt -r requirements/cpu.txt + +ARG GIT_REPO_CHECK=0 +RUN if [ "$GIT_REPO_CHECK" != 0 ]; then bash tools/check_repo.sh ; fi + +# Disabling AVX512 specific optimizations for ARM +ARG VLLM_CPU_DISABLE_AVX512="true" +ENV VLLM_CPU_DISABLE_AVX512=${VLLM_CPU_DISABLE_AVX512} + +RUN --mount=type=cache,target=/root/.cache/pip \ + --mount=type=cache,target=/root/.cache/ccache \ + VLLM_TARGET_DEVICE=cpu python3 setup.py bdist_wheel && \ + pip install dist/*.whl && \ + rm -rf dist + +############################################################################### +# Instill Core Model Dependencies +############################################################################### + +ARG SYSTEM_PACKAGES +RUN for package in ${SYSTEM_PACKAGES}; do \ + apt-get install $package; \ + done; + +ARG PYTHON_PACKAGES +RUN --mount=type=cache,target=/root/.cache/pip,sharing=locked \ + for package in ${PYTHON_PACKAGES}; do \ + pip install --default-timeout=1000 $package; \ + done + +WORKDIR /home/ray +COPY --chown=ray:users --exclude=model.py . . +COPY --chown=ray:users model.py _model.py + +ARG INSTILL_SDK_PROJECT_NAME +ARG INSTILL_SDK_VERSION +RUN --mount=type=cache,target=/root/.cache/pip,sharing=locked \ + if [ ! -z "$INSTILL_SDK_PROJECT_NAME" ]; then \ + pip install -e ${INSTILL_SDK_PROJECT_NAME}; \ + elif [ -f instill_sdk-${INSTILL_SDK_VERSION}dev-py3-none-any.whl ]; then \ + pip install instill_sdk-${INSTILL_SDK_VERSION}dev-py3-none-any.whl; \ + else \ + pip install --default-timeout=1000 instill-sdk==${INSTILL_SDK_VERSION}; \ + fi; + +USER ray diff --git a/instill/helpers/init-templates/Dockerfile b/instill/helpers/init-templates/Dockerfile deleted file mode 100644 index 76dd1c2..0000000 --- a/instill/helpers/init-templates/Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -# syntax=docker/dockerfile:1.7-labs -ARG RAY_VERSION -ARG PYTHON_VERSION -ARG CUDA_SUFFIX -ARG TARGET_ARCH_SUFFIX - -FROM rayproject/ray:${RAY_VERSION}-py${PYTHON_VERSION}${CUDA_SUFFIX}${TARGET_ARCH_SUFFIX} - -RUN sudo apt-get update && sudo apt-get install curl vim -y - -ARG SYSTEM_PACKAGES -RUN for package in ${SYSTEM_PACKAGES}; do \ - sudo apt-get install -y $package; \ - done; - -ARG PACKAGES -RUN for package in ${PACKAGES}; do \ - pip install --default-timeout=1000 --no-cache-dir $package; \ - done; - -COPY --chown=ray:users --exclude=model.py . . - -ARG SDK_VERSION -RUN if [ ! -f instill_sdk-${SDK_VERSION}dev-py3-none-any.whl ]; then \ - pip install --default-timeout=1000 --no-cache-dir instill-sdk==${SDK_VERSION}; \ - else \ - pip install instill_sdk-${SDK_VERSION}dev-py3-none-any.whl; \ - fi; - -WORKDIR /home/ray -COPY --chown=ray:users model.py _model.py diff --git a/instill/helpers/ray_io.py b/instill/helpers/ray_io.py index 166cb5c..72f2049 100644 --- a/instill/helpers/ray_io.py +++ b/instill/helpers/ray_io.py @@ -400,8 +400,32 @@ async def parse_task_completion_to_completion_input( if isinstance(request, Request): test_data: dict = await request.json() + # Initialize ChatInput with default values inp = CompletionInput() + + # Required field + if "prompt" not in test_data or not test_data["prompt"]: + raise InvalidInputException("prompt is required") inp.prompt = test_data["prompt"] + + # Override defaults only if valid values are provided in test_data + if "max-tokens" in test_data and test_data["max-tokens"] not in ["", None, 0]: + inp.max_tokens = int(test_data["max-tokens"]) + if "n" in test_data and test_data["n"] not in ["", None, 0]: + inp.n = int(test_data["n"]) + if "seed" in test_data and test_data["seed"] not in ["", None]: + inp.seed = int(test_data["seed"]) + if "temperature" in test_data and test_data["temperature"] not in [ + "", + None, + 0.0, + ]: + inp.temperature = float(test_data["temperature"]) + if "top-p" in test_data and test_data["top-p"] not in ["", None, 0]: + inp.top_p = int(test_data["top-p"]) + if "stream" in test_data and test_data["stream"] not in ["", None]: + inp.stream = bool(int(test_data["stream"])) + return [inp] input_list = [] @@ -552,8 +576,32 @@ async def parse_task_chat_to_chat_input( if isinstance(request, Request): test_data: dict = await request.json() + # Initialize ChatInput with default values inp = ChatInput() + + # Required field + if "prompt" not in test_data or not test_data["prompt"]: + raise InvalidInputException("prompt is required") inp.messages = [{"role": "user", "content": test_data["prompt"]}] + + # Override defaults only if valid values are provided in test_data + if "max-tokens" in test_data and test_data["max-tokens"] not in ["", None, 0]: + inp.max_tokens = int(test_data["max-tokens"]) + if "n" in test_data and test_data["n"] not in ["", None, 0]: + inp.n = int(test_data["n"]) + if "seed" in test_data and test_data["seed"] not in ["", None]: + inp.seed = int(test_data["seed"]) + if "temperature" in test_data and test_data["temperature"] not in [ + "", + None, + 0.0, + ]: + inp.temperature = float(test_data["temperature"]) + if "top-p" in test_data and test_data["top-p"] not in ["", None, 0]: + inp.top_p = int(test_data["top-p"]) + if "stream" in test_data and test_data["stream"] not in ["", None]: + inp.stream = bool(int(test_data["stream"])) + return [inp] input_list = [] @@ -751,17 +799,39 @@ async def parse_task_chat_to_multimodal_chat_input( if isinstance(request, Request): test_data: dict = await request.json() + # Initialize ChatMultiModalInput with default values + inp = ChatMultiModalInput() + + # Required fields validation + if "prompt" not in test_data or not test_data["prompt"]: + raise InvalidInputException("prompt is required") + if "image-url" not in test_data or not test_data["image-url"]: + raise InvalidInputException("image-url is required") + + # Set required fields test_prompt = test_data["prompt"] image_url = test_data["image-url"] - - inp = ChatMultiModalInput() - inp.messages = [ - { - "role": "user", - "content": test_prompt, - } - ] + inp.messages = [{"role": "user", "content": test_prompt}] inp.prompt_images = [[url_to_pil_image(image_url)]] + + # Override defaults only if valid values are provided in test_data + if "max-tokens" in test_data and test_data["max-tokens"] not in ["", None, 0]: + inp.max_tokens = int(test_data["max-tokens"]) + if "n" in test_data and test_data["n"] not in ["", None, 0]: + inp.n = int(test_data["n"]) + if "seed" in test_data and test_data["seed"] not in ["", None]: + inp.seed = int(test_data["seed"]) + if "temperature" in test_data and test_data["temperature"] not in [ + "", + None, + 0.0, + ]: + inp.temperature = float(test_data["temperature"]) + if "top-p" in test_data and test_data["top-p"] not in ["", None, 0]: + inp.top_p = int(test_data["top-p"]) + if "stream" in test_data and test_data["stream"] not in ["", None]: + inp.stream = bool(int(test_data["stream"])) + return [inp] input_list = []