diff --git a/.github/workflows/document-scripts.yml b/.github/workflows/document-scripts.yml index 72c49453a..891a1f99b 100644 --- a/.github/workflows/document-scripts.yml +++ b/.github/workflows/document-scripts.yml @@ -56,11 +56,6 @@ jobs: with: fetch-depth: 0 path: automation-scripts - - - name: Set up Git for commit - run: | - git config --global user.name "github-actions[bot]" - git config --global user.email "github-actions[bot]@users.noreply.github.com" - name: Document meta.yaml file run: | @@ -73,8 +68,8 @@ jobs: cd automation-scripts find . -type f -name README.md -exec git add {} + # Use the GitHub actor's name and email - git config --global user.name github-actions[bot] - git config --global user.email "github-actions[bot]@users.noreply.github.com" + git config user.name github-actions[bot] + git config user.email "github-actions[bot]@users.noreply.github.com" # Commit changes git diff-index --quiet HEAD || (git commit -am "[Automated Commit] Document ${{ matrix.modified_metas.file}} [skip ci]" && git push) diff --git a/automation/script/module.py b/automation/script/module.py index 5d903dd0e..399196145 100644 --- a/automation/script/module.py +++ b/automation/script/module.py @@ -809,14 +809,15 @@ def _run(self, i): run_state['script_entry_repo_git'] = script_item.repo.meta.get( 'git', False) - deps = meta.get('deps', []) - post_deps = meta.get('post_deps', []) - prehook_deps = meta.get('prehook_deps', []) - posthook_deps = meta.get('posthook_deps', []) + deps = [] + post_deps = [] + prehook_deps = [] + posthook_deps = [] input_mapping = meta.get('input_mapping', {}) + new_env_keys_from_meta = [] + new_state_keys_from_meta = [] + docker_settings = meta.get('docker') - new_env_keys_from_meta = meta.get('new_env_keys', []) - new_state_keys_from_meta = meta.get('new_state_keys', []) found_script_item = utils.assemble_object( meta['alias'], meta['uid']) @@ -844,22 +845,30 @@ def _run(self, i): for key in script_item_default_env: env.setdefault(key, script_item_default_env[key]) - # Force env from meta['env'] as a CONST - # (env OVERWRITE) - script_item_env = meta.get('env', {}) - # print(f"script meta env= {script_item_env}") + # for update_meta_if_env - utils.merge_dicts({'dict1': env, - 'dict2': script_item_env, - 'append_lists': True, - 'append_unique': True}) - # print(f"env = {env}") + r = update_state_from_meta( + meta, + env, + state, + const, + const_state, + deps, + post_deps, + prehook_deps, + posthook_deps, + new_env_keys_from_meta, + new_state_keys_from_meta, + run_state, + i) + if r['return'] > 0: + return r - script_item_state = meta.get('state', {}) - utils.merge_dicts({'dict1': state, - 'dict2': script_item_state, - 'append_lists': True, - 'append_unique': True}) + # taking from meta or else deps with same names will be ignored + deps = meta.get('deps', []) + post_deps = meta.get('post_deps', []) + prehook_deps = meta.get('prehook_deps', []) + posthook_deps = meta.get('posthook_deps', []) # Store the default_version in run_state -> may be overridden by # variations @@ -5604,7 +5613,7 @@ def convert_env_to_script(env, os_info, start_script=None): os_info['env_var'].replace( 'env_var', key)}""" - env_quote = os_info['env_quote'] + env_quote = os_info.get('env_quote', '"') # Replace placeholders in the platform-specific environment command # and escapes any quote in the env value env_command = os_info['set_env'].replace( diff --git a/script/app-mlperf-inference-nvidia/README.md b/script/app-mlperf-inference-nvidia/README.md index 261cf91ca..31001c13a 100644 --- a/script/app-mlperf-inference-nvidia/README.md +++ b/script/app-mlperf-inference-nvidia/README.md @@ -234,3 +234,4 @@ mlcr reproduce,mlcommons,mlperf,inference,harness,nvidia-harness,nvidia - `v4.1` (base: pre5.0) - `v4.1-dev` (base: pre5.0) (default) - `v5.0` (base: v5.0+) +- `v5.1` (base: v5.0+) diff --git a/script/app-mlperf-inference-nvidia/customize.py b/script/app-mlperf-inference-nvidia/customize.py index 1055c42ce..7431b40d2 100644 --- a/script/app-mlperf-inference-nvidia/customize.py +++ b/script/app-mlperf-inference-nvidia/customize.py @@ -346,7 +346,7 @@ def preprocess(i): cmds.append( f"mkdir -p {os.path.dirname(preprocessed_data_for_accuracy_checker)}") cmds.append( - f"ln -sf {env['MLC_DATASET_OPENORCA_PREPROCESSED_PATH']} {preprocessed_data_for_accuracy_checker}") + f"cp {env['MLC_DATASET_OPENORCA_PREPROCESSED_PATH']} {preprocessed_data_for_accuracy_checker}") model_name = "llama2-70b" model_path = fp8_model_path @@ -361,7 +361,7 @@ def preprocess(i): else: cmds.append(f"make download_model BENCHMARKS='{model_name}'") elif "stable-diffusion" in env['MLC_MODEL']: - if env.get('MLC_MLPERF_INFERENCE_CODE_VERSION') == 'v5.0': + if is_true(env.get('MLC_MLPERF_INFERENCE_POST_5_0')): # Define folder mappings for each model type model_folders = { 'onnx_models': ["clip1", "clip2", "unetxl", "vae"], @@ -440,7 +440,7 @@ def preprocess(i): f"mkdir -p {os.path.dirname(target_preprocessed_data_path)}") if env.get('MLC_DATASET_OPENORCA_PREPROCESSED_PATH'): cmds.append( - f"ln -sf {env['MLC_DATASET_OPENORCA_PREPROCESSED_PATH']} {os.path.join(env['MLPERF_SCRATCH_PATH'], "preprocessed_data", "open_orca")}" + f"cp -r {env['MLC_DATASET_OPENORCA_NVIDIA_PREPROCESSED_PATH']}/* {os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'open_orca')}" ) else: cmds.append( @@ -697,7 +697,8 @@ def preprocess(i): if "llama2" in env["MLC_MODEL"]: run_config += f" --checkpoint_dir={fp8_model_path}" - if env.get('MLC_MLPERF_INFERENCE_POST_5_0'): + run_config += f" --tensor_path={os.path.join(env['MLPERF_SCRATCH_PATH'], 'preprocessed_data', 'open_orca')}" + if is_true(env.get('MLC_MLPERF_INFERENCE_POST_5_0')): run_config += f" --trtllm_build_flags=tensor_parallelism:{tmp_tp_size},pipeline_parallelism:{tmp_pp_size}" else: run_config += f" --tensor_parallelism={tmp_tp_size}" diff --git a/script/app-mlperf-inference-nvidia/meta.yaml b/script/app-mlperf-inference-nvidia/meta.yaml index 479020929..c2f9b3bdc 100644 --- a/script/app-mlperf-inference-nvidia/meta.yaml +++ b/script/app-mlperf-inference-nvidia/meta.yaml @@ -289,6 +289,8 @@ deps: - 'yes' - tags: get,nvidia,mitten + names: + - nvidia-mitten enable_if_env: MLC_NVIDIA_MITTEN_FROM_SRC: - 'yes' @@ -357,6 +359,16 @@ variations: group: batchsize-format-change env: MLC_MLPERF_INFERENCE_POST_5_0: "yes" + v5.1: + base: + - v5.0+ + group: version + env: + MLC_MLPERF_INFERENCE_CODE_VERSION: "v5.1" + MLC_NVIDIA_MITTEN_FROM_SRC: "yes" + adr: + pycuda: + version: "2024.1" v5.0: base: - v5.0+ @@ -786,6 +798,7 @@ variations: - tags: get,generic-python-lib,_package.rouge-score names: - rouge-score + - tags: get,generic-python-lib,_package.typeguard env: MLC_ML_MODEL_STARTING_WEIGHTS_FILENAME: "https://github.com/mlcommons/cm4mlops/blob/b18ff890ff559e21d2e27a3b54cd26467ac1fd9e/script/get-ml-model-llama2/_cm.json#L51" MLC_ML_MODEL_INPUTS_DATA_TYPE: int32 diff --git a/script/app-mlperf-inference/meta.yaml b/script/app-mlperf-inference/meta.yaml index 894bffe9e..7d5dd3fbf 100644 --- a/script/app-mlperf-inference/meta.yaml +++ b/script/app-mlperf-inference/meta.yaml @@ -463,6 +463,35 @@ variations: docker: base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v5.0-cuda12.8-pytorch25.01-ubuntu24.04-aarch64-Grace-release + nvidia-original,r5.1_default: + env: + MLC_NVIDIA_MITTEN_FROM_SRC: 'yes' + docker: + os_version: "24.04" + user: 'ubuntu' + build_deps: + - tags: detect,os + image_name: mlperf-inference-nvidia-v5.1-common + build_env: + ENV: release + deps: + - names: + - numpy + tags: get,generic-python-lib,_package.numpy + version_max: "1.26.999" + version_max_usable: "1.26.4" + update_meta_if_env: + - enable_if_env: + MLC_HOST_PLATFORM_FLAVOR: + - x86_64 + docker: + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v5.1-cuda12.9-pytorch25.05-ubuntu24.04-x86_64 + - skip_if_env: + MLC_HOST_PLATFORM_FLAVOR: + - x86_64 + docker: + base_image: nvcr.io/nvidia/mlperf/mlperf-inference:mlpinf-v5.1-cuda12.9-pytorch25.05-ubuntu24.04-aarch64-Grace + nvidia-original,gptj_: env: BUILD_TRTLLM: 1 @@ -578,6 +607,24 @@ variations: - tags: get,dataset,preprocessed,openorca,_validation,_mlcommons,_nvidia env: BUILD_TRTLLM: 1 + + nvidia-original,r5.1_default,llama2-70b_: + default_variations: + precision: float8 + docker: + deps: + - tags: get,ml-model,llama2-70b,_nvidia,_fp8,_v5.1 + names: + - llama2-model + update_tags_from_env_with_prefix: + _tp-size.: + - MLC_NVIDIA_TP_SIZE + _pp-size.: + - MLC_NVIDIA_PP_SIZE + - tags: get,dataset,preprocessed,openorca,_calibration,_mlcommons,_nvidia + - tags: get,dataset,preprocessed,openorca,_validation,_mlcommons,_nvidia + env: + BUILD_TRTLLM: 1 nvidia-original: docker: @@ -1825,7 +1872,7 @@ variations: mlperf-inference-implementation: tags: _cuda deps: - - tags: get,cuda-devices,_with-pycuda + - tags: get,cuda-devices skip_if_env: MLC_CUDA_DEVICE_PROP_GLOBAL_MEMORY: - "yes" @@ -2227,6 +2274,32 @@ variations: MLC_REGENERATE_MEASURE_FILES: 'yes' MLC_MLPERF_INFERENCE_VERSION: '5.1' + r5.1_default: + group: + reproducibility + add_deps_recursive: + nvidia-inference-common-code: + tags: _mlcommons,_v5.1 + nvidia-inference-server: + version: r5.1 + tags: _mlcommons + nvidia-harness: + tags: _v5.1 + intel-harness: + tags: _v4.1 + nvidia-scratch-space: + tags: _version.5.1 + nvidia-mitten-git-src: + tags: _sha.69fc0e5042ff1acdd0862e13d834d5b1d12e917b + nvidia-mitten: + tags: _default + pycuda: + version_min: "2024.1" + default_env: + MLC_SKIP_SYS_UTILS: 'yes' + MLC_REGENERATE_MEASURE_FILES: 'yes' + MLC_MLPERF_INFERENCE_VERSION: '5.1' + invalid_variation_combinations: - - retinanet @@ -2252,6 +2325,9 @@ invalid_variation_combinations: - - gptj - tf + - + - gptj + - r5.1_default input_description: scenario: @@ -2367,6 +2443,7 @@ docker: - "${{ LLAMA2_CHECKPOINT_PATH }}:${{ LLAMA2_CHECKPOINT_PATH }}" - "${{ LLAMA2_PRE_QUANTIZED_CHECKPOINT_PATH }}:${{ LLAMA2_PRE_QUANTIZED_CHECKPOINT_PATH }}" - "${{ MLC_DATASET_OPENORCA_PREPROCESSED_PATH }}:${{ MLC_DATASET_OPENORCA_PREPROCESSED_PATH }}" + - "${{ MLC_DATASET_OPENORCA_NVIDIA_PREPROCESSED_PATH }}:${{ MLC_DATASET_OPENORCA_NVIDIA_PREPROCESSED_PATH }}" - "${{ MLC_DATASET_OPENORCA_CALIBRATION_PATH }}:${{ MLC_DATASET_OPENORCA_CALIBRATION_PATH }}" - "${{ MLC_NVIDIA_LLAMA_DATASET_FILE_PATH }}:${{ MLC_NVIDIA_LLAMA_DATASET_FILE_PATH }}" - "${{ SDXL_CHECKPOINT_PATH }}:${{ SDXL_CHECKPOINT_PATH }}" diff --git a/script/build-mlperf-inference-server-nvidia/meta.yaml b/script/build-mlperf-inference-server-nvidia/meta.yaml index 8b1a2a0e5..fc814926e 100644 --- a/script/build-mlperf-inference-server-nvidia/meta.yaml +++ b/script/build-mlperf-inference-server-nvidia/meta.yaml @@ -78,7 +78,7 @@ deps: # Detect MLCake - tags: get,cmake - version: "3.26.4" + version: "3.27" # Detect Google Logger - tags: get,generic,sys-util,_glog-dev @@ -238,6 +238,14 @@ variations: numpy: version_max: "1.26.999" version_max_usable: "1.26.4" + + r5.1: + add_deps_recursive: + pycuda: + version: "2024.1" + numpy: + version_max: "1.26.999" + version_max_usable: "1.26.4" versions: diff --git a/script/download-and-extract/meta.yaml b/script/download-and-extract/meta.yaml index 8a3b17d07..85fcd25ac 100644 --- a/script/download-and-extract/meta.yaml +++ b/script/download-and-extract/meta.yaml @@ -62,12 +62,14 @@ tags: tags_help: download-and-extract file uid: c67e81a4ce2649f5 variations: - cmutil: + mlcutil: add_deps: download-script: - tags: _cmutil + tags: _mlcutil default: true group: download-tool + cmutil: + alias: mlcutil curl: add_deps: download-script: diff --git a/script/download-file/customize.py b/script/download-file/customize.py index 6a2eb01e9..dbcde353e 100644 --- a/script/download-file/customize.py +++ b/script/download-file/customize.py @@ -204,6 +204,8 @@ def preprocess(i): logger.info(f"{env['MLC_DOWNLOAD_CMD']}") elif tool == "r2-downloader": + if is_true(env.get('MLC_AUTH_USING_SERVICE_ACCOUNT')): + extra_download_options += " -s " env['MLC_DOWNLOAD_CMD'] = f"bash <(curl -s https://raw.githubusercontent.com/mlcommons/r2-downloader/refs/heads/main/mlc-r2-downloader.sh) " if env["MLC_HOST_OS_TYPE"] == "windows": # have to modify the variable from url to temp_url if it is diff --git a/script/extract-file/customize.py b/script/extract-file/customize.py index f6b185fe0..692deef9e 100644 --- a/script/extract-file/customize.py +++ b/script/extract-file/customize.py @@ -54,6 +54,7 @@ def preprocess(i): if filename.endswith(".zip") or filename.endswith(".pth"): env['MLC_EXTRACT_TOOL'] = "unzip" + elif filename.endswith(".tar.gz"): if windows: x = '"' if ' ' in filename else '' @@ -67,6 +68,7 @@ def preprocess(i): else: env['MLC_EXTRACT_TOOL_OPTIONS'] = ' --skip-old-files -xvzf ' env['MLC_EXTRACT_TOOL'] = 'tar ' + elif filename.endswith(".tar.xz"): if windows: x = '"' if ' ' in filename else '' @@ -77,6 +79,7 @@ def preprocess(i): else: env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvJf' env['MLC_EXTRACT_TOOL'] = 'tar ' + elif filename.endswith(".tar.bz2"): if windows: x = '"' if ' ' in filename else '' @@ -90,9 +93,11 @@ def preprocess(i): else: env['MLC_EXTRACT_TOOL_OPTIONS'] = ' --skip-old-files -xvjf ' env['MLC_EXTRACT_TOOL'] = 'tar ' - elif filename.endswith(".tar"): + + elif filename.endswith(".tar") or filename.endswith(".tgz"): env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvf' env['MLC_EXTRACT_TOOL'] = 'tar ' + elif filename.endswith(".7z"): if windows: env['MLC_EXTRACT_TOOL'] = '7z' @@ -110,6 +115,7 @@ def preprocess(i): # unrar or unar may be available on Unix-like systems env['MLC_EXTRACT_TOOL'] = 'unrar' env['MLC_EXTRACT_TOOL_OPTIONS'] = ' x -y ' + elif filename.endswith(".gz"): # Check target filename extracted_filename = env.get('MLC_EXTRACT_EXTRACTED_FILENAME', '') @@ -123,15 +129,19 @@ def preprocess(i): ' > ' + q + extracted_filename + q + ' < ' env['MLC_EXTRACT_TOOL'] = 'gzip ' + elif is_true(env.get('MLC_EXTRACT_UNZIP', '')): env['MLC_EXTRACT_TOOL'] = 'unzip ' + elif is_true(env.get('MLC_EXTRACT_UNTAR', '')): env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -xvf' env['MLC_EXTRACT_TOOL'] = 'tar ' + elif is_true(env.get('MLC_EXTRACT_GZIP', '')): env['MLC_EXTRACT_CMD'] = 'gzip ' env['MLC_EXTRACT_TOOL_OPTIONS'] = ' -d ' + \ ('-k ' if not remove_extracted else '') + else: return {'return': 1, 'error': 'Neither MLC_EXTRACT_UNZIP nor MLC_EXTRACT_UNTAR is yes'} @@ -229,8 +239,6 @@ def postprocess(i): if not os.path.exists(filepath): return { 'return': 1, 'error': 'Path {} was not created or doesn\'t exist'.format(filepath)} -# return {'return':1, 'error': 'MLC_EXTRACT_EXTRACTED_FILENAME and -# MLC_EXTRACT_TO_FOLDER are not set'} env['MLC_EXTRACT_EXTRACTED_PATH'] = filepath diff --git a/script/extract-file/meta.yaml b/script/extract-file/meta.yaml index b4cbc9131..91bdc24f9 100644 --- a/script/extract-file/meta.yaml +++ b/script/extract-file/meta.yaml @@ -33,6 +33,7 @@ prehook_deps: [] tags: - extract - file +- extract-file tags_help: extract file uid: 3f0b76219d004817 variations: diff --git a/script/get-cuda-devices/README.md b/script/get-cuda-devices/README.md new file mode 100644 index 000000000..f3672bc05 --- /dev/null +++ b/script/get-cuda-devices/README.md @@ -0,0 +1,62 @@ +# README for get-cuda-devices +This README is automatically generated. Add custom content in [info.md](info.md). Please follow the [script execution document](https://docs.mlcommons.org/mlcflow/targets/script/execution-flow/) to understand more about the MLC script execution. + +`mlcflow` stores all local data under `$HOME/MLC` by default. So, if there is space constraint on the home directory and you have more space on say `/mnt/$USER`, you can do +``` +mkdir /mnt/$USER/MLC +ln -s /mnt/$USER/MLC $HOME/MLC +``` +You can also use the `ENV` variable `MLC_REPOS` to control this location but this will need a set after every system reboot. + +## Setup + +If you are not on a Python development environment please refer to the [official docs](https://docs.mlcommons.org/mlcflow/install/) for the installation. + +```bash +python3 -m venv mlcflow +. mlcflow/bin/activate +pip install mlcflow +``` + +- Using a virtual environment is recommended (per `pip` best practices), but you may skip it or use `--break-system-packages` if needed. + +### Pull mlperf-automations + +Once `mlcflow` is installed: + +```bash +mlc pull repo mlcommons@mlperf-automations --pat= +``` +- `--pat` or `--ssh` is only needed if the repo is PRIVATE +- If `--pat` is avoided, you'll be asked to enter the password where you can enter your Private Access Token +- `--ssh` option can be used instead of `--pat=<>` option if you prefer to use SSH for accessing the github repository. +## Run Commands + +```bash +mlcr get,cuda-devices +``` + +No script specific inputs +### Generic Script Inputs + +| Name | Description | Choices | Default | +|------|-------------|---------|------| +| `--input` | Input to the script passed using the env key `MLC_INPUT` | | `` | +| `--output` | Output from the script passed using the env key `MLC_OUTPUT` | | `` | +| `--outdirname` | The directory to store the script output | | `cache directory ($HOME/MLC/repos/local/cache/<>) if the script is cacheable or else the current directory` | +| `--outbasename` | The output file/folder name | | `` | +| `--name` | | | `` | +| `--extra_cache_tags` | Extra cache tags to be added to the cached entry when the script results are saved | | `` | +| `--skip_compile` | Skip compilation | | `False` | +| `--skip_run` | Skip run | | `False` | +| `--accept_license` | Accept the required license requirement to run the script | | `False` | +| `--skip_system_deps` | Skip installing any system dependencies | | `False` | +| `--git_ssh` | Use SSH for git repos | | `False` | +| `--gh_token` | Github Token | | `` | +| `--hf_token` | Huggingface Token | | `` | +| `--verify_ssl` | Verify SSL | | `False` | +## Variations + +### Ungrouped + +- `with-pycuda` diff --git a/script/get-cuda-devices/meta.yaml b/script/get-cuda-devices/meta.yaml index 2dac1175a..2468f9a1b 100644 --- a/script/get-cuda-devices/meta.yaml +++ b/script/get-cuda-devices/meta.yaml @@ -8,7 +8,8 @@ tags: - get - cuda-devices -cache: false +cache: true +cache_expiration: 1d can_force_cache: true diff --git a/script/get-cuda-devices/run.bat b/script/get-cuda-devices/run.bat index 2b2c03d5c..b2f8c1f5f 100644 --- a/script/get-cuda-devices/run.bat +++ b/script/get-cuda-devices/run.bat @@ -16,9 +16,7 @@ echo. echo Compiling program ... echo. -cd %MLC_TMP_CURRENT_SCRIPT_PATH% - -"%MLC_NVCC_BIN_WITH_PATH%" print_cuda_devices.cu -allow-unsupported-compiler -DWINDOWS +"%MLC_NVCC_BIN_WITH_PATH%" %MLC_TMP_CURRENT_SCRIPT_PATH%\print_cuda_devices.cu -allow-unsupported-compiler -DWINDOWS IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% rem Return to the original path obtained in CM @@ -27,7 +25,5 @@ echo. echo Running program ... echo. -cd %MLC_TMP_CURRENT_PATH% - -%MLC_TMP_CURRENT_SCRIPT_PATH%\a.exe > tmp-run.out +.\a.exe > tmp-run.out IF %ERRORLEVEL% NEQ 0 EXIT %ERRORLEVEL% diff --git a/script/get-cuda-devices/run.sh b/script/get-cuda-devices/run.sh index c802ec930..ac2097058 100644 --- a/script/get-cuda-devices/run.sh +++ b/script/get-cuda-devices/run.sh @@ -18,9 +18,7 @@ echo "" echo "Compiling program ..." echo "" -cd ${MLC_TMP_CURRENT_SCRIPT_PATH} - -${MLC_NVCC_BIN_WITH_PATH} -allow-unsupported-compiler print_cuda_devices.cu +${MLC_NVCC_BIN_WITH_PATH} -allow-unsupported-compiler ${MLC_TMP_CURRENT_SCRIPT_PATH}/print_cuda_devices.cu test $? -eq 0 || exit 1 # Return to the original path obtained in MLC @@ -29,7 +27,5 @@ echo "" echo "Running program ..." echo "" -cd ${MLC_TMP_CURRENT_PATH} - -${MLC_TMP_CURRENT_SCRIPT_PATH}/a.out > tmp-run.out +./a.out > tmp-run.out test $? -eq 0 || exit 1 diff --git a/script/get-generic-python-lib/README.md b/script/get-generic-python-lib/README.md new file mode 100644 index 000000000..589d62eab --- /dev/null +++ b/script/get-generic-python-lib/README.md @@ -0,0 +1,174 @@ +# README for get-generic-python-lib +This README is automatically generated. Add custom content in [info.md](info.md). Please follow the [script execution document](https://docs.mlcommons.org/mlcflow/targets/script/execution-flow/) to understand more about the MLC script execution. + +`mlcflow` stores all local data under `$HOME/MLC` by default. So, if there is space constraint on the home directory and you have more space on say `/mnt/$USER`, you can do +``` +mkdir /mnt/$USER/MLC +ln -s /mnt/$USER/MLC $HOME/MLC +``` +You can also use the `ENV` variable `MLC_REPOS` to control this location but this will need a set after every system reboot. + +## Setup + +If you are not on a Python development environment please refer to the [official docs](https://docs.mlcommons.org/mlcflow/install/) for the installation. + +```bash +python3 -m venv mlcflow +. mlcflow/bin/activate +pip install mlcflow +``` + +- Using a virtual environment is recommended (per `pip` best practices), but you may skip it or use `--break-system-packages` if needed. + +### Pull mlperf-automations + +Once `mlcflow` is installed: + +```bash +mlc pull repo mlcommons@mlperf-automations --pat= +``` +- `--pat` or `--ssh` is only needed if the repo is PRIVATE +- If `--pat` is avoided, you'll be asked to enter the password where you can enter your Private Access Token +- `--ssh` option can be used instead of `--pat=<>` option if you prefer to use SSH for accessing the github repository. +## Run Commands + +```bash +mlcr get generic-python-lib +``` + +### Script Inputs + +| Name | Description | Choices | Default | +|------|-------------|---------|------| +| `--extra_index_url` | | | `` | +| `--force_install` | | | `` | +| `--index_url` | | | `` | +### Generic Script Inputs + +| Name | Description | Choices | Default | +|------|-------------|---------|------| +| `--input` | Input to the script passed using the env key `MLC_INPUT` | | `` | +| `--output` | Output from the script passed using the env key `MLC_OUTPUT` | | `` | +| `--outdirname` | The directory to store the script output | | `cache directory ($HOME/MLC/repos/local/cache/<>) if the script is cacheable or else the current directory` | +| `--outbasename` | The output file/folder name | | `` | +| `--name` | | | `` | +| `--extra_cache_tags` | Extra cache tags to be added to the cached entry when the script results are saved | | `` | +| `--skip_compile` | Skip compilation | | `False` | +| `--skip_run` | Skip run | | `False` | +| `--accept_license` | Accept the required license requirement to run the script | | `False` | +| `--skip_system_deps` | Skip installing any system dependencies | | `False` | +| `--git_ssh` | Use SSH for git repos | | `False` | +| `--gh_token` | Github Token | | `` | +| `--hf_token` | Huggingface Token | | `` | +| `--verify_ssl` | Verify SSL | | `False` | +## Variations + +### Ungrouped + +- `Pillow` +- `anthropic` +- `apache-tvm` +- `apex` +- `async_timeout` +- `attr` +- `attrs` +- `boto3` +- `cloudpickle` +- `cmind` +- `colored` +- `conda.#` _(# can be substituted dynamically)_ +- `cupy` +- `custom-python` +- `cxx11-abi` +- `datasets` +- `decorator` +- `deepsparse` +- `dllogger` +- `extra-index-url.#` _(# can be substituted dynamically)_ +- `fiftyone` +- `find_links_url.#` _(# can be substituted dynamically)_ +- `google-api-python-client` +- `google-auth-oauthlib` +- `google-generativeai` +- `groq` +- `huggingface_hub` +- `index-url.#` _(# can be substituted dynamically)_ +- `inflect` +- `jax` +- `jax_cuda` +- `librosa` +- `matplotlib` +- `mlperf_loadgen` +- `mlperf_logging` +- `mpld3` +- `mxeval` +- `nibabel` +- `no-deps` +- `numpy` +- `nvidia-apex` +- `nvidia-apex-from-src` +- `nvidia-dali` +- `nvidia-pycocotools` (base: pycocotools) +- `nvidia-pyindex` +- `nvidia-tensorrt` +- `onnx` +- `onnx-graphsurgeon` +- `onnxruntime` +- `onnxruntime_gpu` +- `openai` +- `opencv-python` +- `package.#` _(# can be substituted dynamically)_ +- `pandas` +- `path.#` _(# can be substituted dynamically)_ +- `pdfplumber` +- `pillow` +- `pip` +- `polygraphy` +- `pre` +- `protobuf` +- `psutil` +- `pycocotools` +- `pycuda` +- `python-dotenv` +- `quark-amd` +- `ray` +- `requests` +- `rocm` +- `safetensors` +- `scikit-learn` +- `scipy` +- `scons` +- `setfit` +- `setuptools` +- `six` +- `sklearn` +- `sox` +- `sparsezoo` +- `streamlit` +- `streamlit_option_menu` +- `tensorboard` +- `tensorflow` +- `tensorrt` +- `tflite` +- `tflite-runtime` +- `tokenization` +- `toml` +- `torch` +- `torch_cuda` +- `torch_tensorrt` +- `torchaudio` +- `torchaudio_cuda` +- `torchvision` +- `torchvision_cuda` +- `tornado` +- `tqdm` +- `transformers` +- `typing_extensions` +- `ujson` +- `unidecode` +- `url.#` _(# can be substituted dynamically)_ +- `wandb` +- `west` +- `whl-url.#` _(# can be substituted dynamically)_ +- `xgboost` +- `xlsxwriter` diff --git a/script/get-generic-python-lib/meta.yaml b/script/get-generic-python-lib/meta.yaml index 6eb0b0a78..13d38c0e5 100644 --- a/script/get-generic-python-lib/meta.yaml +++ b/script/get-generic-python-lib/meta.yaml @@ -166,6 +166,16 @@ variations: MLC_GENERIC_PYTHON_PACKAGE_NAME: google_auth_oauthlib new_env_keys: - MLC_GOOGLE_AUTH_OAUTHLIB_VERSION + google-generativeai: + env: + MLC_GENERIC_PYTHON_PACKAGE_NAME: google-generativeai + new_env_keys: + - MLC_GOOGLE_GENERATIVEAI_VERSION + groq: + env: + MLC_GENERIC_PYTHON_PACKAGE_NAME: groq + new_env_keys: + - MLC_GROQ_VERSION huggingface_hub: env: MLC_GENERIC_PYTHON_PACKAGE_NAME: huggingface_hub @@ -381,6 +391,11 @@ variations: MLC_GENERIC_PYTHON_PIP_EXTRA_INDEX_URL: https://pypi.ngc.nvidia.com new_env_keys: - MLC_POLYGRAPHY_VERSION + pdfplumber: + env: + MLC_GENERIC_PYTHON_PACKAGE_NAME: pdfplumber + new_env_keys: + - MLC_PDFPLUMBER_VERSION pre: env: MLC_GENERIC_PYTHON_DEV_VERSION: 'yes' @@ -394,6 +409,11 @@ variations: MLC_GENERIC_PYTHON_PACKAGE_NAME: psutil new_env_keys: - MLC_PSUTIL_VERSION + python-dotenv: + env: + MLC_GENERIC_PYTHON_PACKAGE_NAME: python-dotenv + new_env_keys: + - MLC_PYTHON_DOTENV_VERSION pycocotools: env: MLC_GENERIC_PYTHON_PACKAGE_NAME: pycocotools diff --git a/script/get-generic-python-lib/validate_cache.sh b/script/get-generic-python-lib/validate_cache.sh index 9a98e610e..e6223df5f 100644 --- a/script/get-generic-python-lib/validate_cache.sh +++ b/script/get-generic-python-lib/validate_cache.sh @@ -1,7 +1,8 @@ #!/bin/bash MLC_TMP_CURRENT_SCRIPT_PATH="${MLC_TMP_CURRENT_SCRIPT_PATH:-$PWD}" - -"${MLC_PYTHON_BIN_WITH_PATH}" "${MLC_TMP_CURRENT_SCRIPT_PATH}/detect-version.py" +cmd="${MLC_PYTHON_BIN_WITH_PATH} ${MLC_TMP_CURRENT_SCRIPT_PATH}/detect-version.py" +echo $cmd +eval $cmd test $? -eq 0 || exit $? exit 0 diff --git a/script/get-generic-sys-util/README.md b/script/get-generic-sys-util/README.md index 659340b99..1150067ac 100644 --- a/script/get-generic-sys-util/README.md +++ b/script/get-generic-sys-util/README.md @@ -84,6 +84,8 @@ mlcr get,sys-util,generic,generic-sys-util - `gcc-11` - `gcc-9` - `gflags-dev` +- `gfortran-12-aarch64-linux-gnu` +- `gfortran-aarch64-linux-gnu` - `git-lfs` - `glog-dev` - `ipmitool` diff --git a/script/get-generic-sys-util/meta.yaml b/script/get-generic-sys-util/meta.yaml index 4862a0d35..fd8565674 100644 --- a/script/get-generic-sys-util/meta.yaml +++ b/script/get-generic-sys-util/meta.yaml @@ -171,12 +171,24 @@ variations: new_env_keys: - MLC_SOX_VERSION state: - ipmitool: # tbd: complete for other flavours of linux + sox: # tbd: complete for other flavours of linux apt: sox brew: sox dnf: sox yum: '' - + gfortran-12-aarch64-linux-gnu: + env: + MLC_SYS_UTIL_NAME: gfortran-12-aarch64 + state: + gfortran-12-aarch64: # tbd: complete for other flavours of linux + apt: gfortran-12-aarch64-linux-gnu + + gfortran-aarch64-linux-gnu: + env: + MLC_SYS_UTIL_NAME: gfortran-aarch64 + state: + gfortran-aarch64: # tbd: complete for other flavours of linux + apt: gfortran-aarch64-linux-gnu libgl: env: MLC_SYS_UTIL_NAME: libgl # tbd: regular expression for version as well as whether its installed? diff --git a/script/get-lib-armpl/README.md b/script/get-lib-armpl/README.md new file mode 100644 index 000000000..644095bd4 --- /dev/null +++ b/script/get-lib-armpl/README.md @@ -0,0 +1,62 @@ +# README for get-lib-armpl +This README is automatically generated. Add custom content in [info.md](info.md). Please follow the [script execution document](https://docs.mlcommons.org/mlcflow/targets/script/execution-flow/) to understand more about the MLC script execution. + +`mlcflow` stores all local data under `$HOME/MLC` by default. So, if there is space constraint on the home directory and you have more space on say `/mnt/$USER`, you can do +``` +mkdir /mnt/$USER/MLC +ln -s /mnt/$USER/MLC $HOME/MLC +``` +You can also use the `ENV` variable `MLC_REPOS` to control this location but this will need a set after every system reboot. + +## Setup + +If you are not on a Python development environment please refer to the [official docs](https://docs.mlcommons.org/mlcflow/install/) for the installation. + +```bash +python3 -m venv mlcflow +. mlcflow/bin/activate +pip install mlcflow +``` + +- Using a virtual environment is recommended (per `pip` best practices), but you may skip it or use `--break-system-packages` if needed. + +### Pull mlperf-automations + +Once `mlcflow` is installed: + +```bash +mlc pull repo mlcommons@mlperf-automations --pat= +``` +- `--pat` or `--ssh` is only needed if the repo is PRIVATE +- If `--pat` is avoided, you'll be asked to enter the password where you can enter your Private Access Token +- `--ssh` option can be used instead of `--pat=<>` option if you prefer to use SSH for accessing the github repository. +## Run Commands + +```bash +mlcr armpl,lib,get,arm +``` + +No script specific inputs +### Generic Script Inputs + +| Name | Description | Choices | Default | +|------|-------------|---------|------| +| `--input` | Input to the script passed using the env key `MLC_INPUT` | | `` | +| `--output` | Output from the script passed using the env key `MLC_OUTPUT` | | `` | +| `--outdirname` | The directory to store the script output | | `cache directory ($HOME/MLC/repos/local/cache/<>) if the script is cacheable or else the current directory` | +| `--outbasename` | The output file/folder name | | `` | +| `--name` | | | `` | +| `--extra_cache_tags` | Extra cache tags to be added to the cached entry when the script results are saved | | `` | +| `--skip_compile` | Skip compilation | | `False` | +| `--skip_run` | Skip run | | `False` | +| `--accept_license` | Accept the required license requirement to run the script | | `False` | +| `--skip_system_deps` | Skip installing any system dependencies | | `False` | +| `--git_ssh` | Use SSH for git repos | | `False` | +| `--gh_token` | Github Token | | `` | +| `--hf_token` | Huggingface Token | | `` | +| `--verify_ssl` | Verify SSL | | `False` | +## Variations + +### Version + +- `version.25.07` (default) diff --git a/script/get-lib-armpl/customize.py b/script/get-lib-armpl/customize.py new file mode 100644 index 000000000..1a51b32cc --- /dev/null +++ b/script/get-lib-armpl/customize.py @@ -0,0 +1,44 @@ +from mlc import utils +import os + + +def preprocess(i): + os_info = i['os_info'] + if os_info['platform'] == 'windows': + return {'return': 1, 'error': 'Windows is not supported in this script yet'} + + env = i['env'] + return {'return': 0} + + +def postprocess(i): + + env = i['env'] + + paths = [ + "+C_INCLUDE_PATH", + "+CPLUS_INCLUDE_PATH", + "+LD_LIBRARY_PATH", + "+DYLD_FALLBACK_LIBRARY_PATH" + ] + + for key in paths: + env[key] = [] + + armpl_install_path = env.get( + 'MLC_EXTRACT_EXTRACTED_SUBDIR_PATH', + env['MLC_ARMPL_INSTALL_PATH']) + + inc_path = os.path.join(armpl_install_path, 'include') + + env['+C_INCLUDE_PATH'].append(inc_path) + env['+CPLUS_INCLUDE_PATH'].append(inc_path) + + lib_path = os.path.join(armpl_install_path, 'lib') + env['+LD_LIBRARY_PATH'].append(lib_path) + env['+DYLD_FALLBACK_LIBRARY_PATH'].append(lib_path) + + env['MLC_ARMPL_INCLUDE_PATH'] = inc_path + env['MLC_ARMPL_LIB_PATH'] = lib_path + + return {'return': 0} diff --git a/script/get-lib-armpl/meta.yaml b/script/get-lib-armpl/meta.yaml new file mode 100644 index 000000000..8ee295a27 --- /dev/null +++ b/script/get-lib-armpl/meta.yaml @@ -0,0 +1,65 @@ +alias: get-lib-armpl +automation_alias: script +automation_uid: 5b4e0237da074764 +cache: true +category: Detection or installation of tools and artifacts +deps: +- tags: detect,os +new_env_keys: +- MLC_ARMPL_SRC_PATH +- MLC_ARMPL_INSTALL_PATH +- MLC_ARMPL_INCLUDE_PATH +- MLC_ARMPL_LIB_PATH +- MLC_ARMPL_VERSION +- +C_INCLUDE_PATH +- +CPLUS_INCLUDE_PATH +- +DYLD_FALLBACK_LIBRARY_PATH +- +LD_LIBRARY_PATH +prehook_deps: + - force_cache: true + extra_cache_tags: armpl,armpl-src + env: + MLC_DAE_FINAL_ENV_NAME: MLC_ARMPL_SRC_PATH + names: + - dae + - dae-armpl + tags: download-and-extract,_extract + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _url.: + - MLC_DOWNLOAD_URL + + - tags: extract,file + env: + MLC_EXTRACT_TO_FOLDER1: armpl-install + MLC_EXTRACT_FINAL_ENV_NAME: MLC_ARMPL_INSTALL_PATH + MLC_EXTRACT_FILE_PATH: <<>>/<<>> + names: + - extract + - extract-armpl + extra_cache_tags: armpl,armpl-lib + force_cache: true + force_env_keys: + - MLC_OUTDIRNAME + update_tags_from_env_with_prefix: + _path.: + - MLC_EXTRACT_FILE_PATH + +tags: +- armpl +- lib +- get +- arm +tests: + run_inputs: + - {} +uid: 01ebcd701d8f480f +variations: + version.25.07: + default: true + group: version + env: + MLC_ARMPL_VERSION: '25.07.0' + MLC_ARMPL_TAR_FILENAME: armpl-25.07.0.tgz + MLC_DOWNLOAD_URL: https://developer.arm.com/-/cdn-downloads/permalink/Arm-Performance-Libraries/Version_25.07/arm-performance-libraries_25.07_acfl2410.tar diff --git a/script/get-ml-model-gptj/meta.yaml b/script/get-ml-model-gptj/meta.yaml index e20408f11..3c7e86cb8 100644 --- a/script/get-ml-model-gptj/meta.yaml +++ b/script/get-ml-model-gptj/meta.yaml @@ -154,7 +154,7 @@ variations: - names: - cuda tags: get,cuda - - tags: get,cuda-devices,_with-pycuda + - tags: get,cuda-devices enable_if_env: MLC_HOST_OS_FLAVOR: - ubuntu diff --git a/script/get-ml-model-llama2/meta.yaml b/script/get-ml-model-llama2/meta.yaml index 97ef5a13e..54397aff9 100644 --- a/script/get-ml-model-llama2/meta.yaml +++ b/script/get-ml-model-llama2/meta.yaml @@ -10,6 +10,9 @@ env: MLC_ML_MODEL_WEIGHT_TRANSFORMATIONS: 'no' input_mapping: checkpoint: LLAMA2_CHECKPOINT_PATH + use_service_account: MLC_AUTH_USING_SERVICE_ACCOUNT + client_id: CF_ACCESS_CLIENT_ID + client_secret: CF_ACCESS_CLIENT_SECRET new_env_keys: - MLC_ML_MODEL_* - LLAMA2_CHECKPOINT_PATH @@ -290,7 +293,7 @@ variations: - names: - cuda tags: get,cuda - - tags: get,cuda-devices,_with-pycuda + - tags: get,cuda-devices - names: - nvidia-inference-common-code tags: get,nvidia,inference,common-code diff --git a/script/get-mlperf-inference-nvidia-common-code/meta.yaml b/script/get-mlperf-inference-nvidia-common-code/meta.yaml index 76fa3b666..f9ad12872 100644 --- a/script/get-mlperf-inference-nvidia-common-code/meta.yaml +++ b/script/get-mlperf-inference-nvidia-common-code/meta.yaml @@ -53,6 +53,10 @@ variations: add_deps_recursive: mlperf-inference-results: tags: _code-only-for-v5.1,_v5.1-dev + v5.1: + add_deps_recursive: + mlperf-inference-results: + tags: _code-only,_v5.1 versions: r2.1: add_deps_recursive: diff --git a/script/get-mlperf-inference-results/meta.yaml b/script/get-mlperf-inference-results/meta.yaml index f669e7477..ae1dda633 100644 --- a/script/get-mlperf-inference-results/meta.yaml +++ b/script/get-mlperf-inference-results/meta.yaml @@ -87,6 +87,12 @@ variations: MLC_GIT_URL: https://github.com/<<>>/inference_results_v5.0.git MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v5.1-dev MLC_VERSION: "v5.0" + v5.1: + group: version + env: + MLC_GIT_URL: https://github.com/<<>>/inference_results_v5.1.git + MLC_MLPERF_INFERENCE_RESULTS_VERSION_NAME: v5.1 + MLC_VERSION: "v5.1" versions: v2.1: env: diff --git a/script/get-mlperf-inference-sut-description/meta.yaml b/script/get-mlperf-inference-sut-description/meta.yaml index 573a6c842..3ba393335 100644 --- a/script/get-mlperf-inference-sut-description/meta.yaml +++ b/script/get-mlperf-inference-sut-description/meta.yaml @@ -22,7 +22,7 @@ deps: MLC_MLPERF_DEVICE: - gpu - cuda - tags: get,cuda-devices,_with-pycuda + tags: get,cuda-devices - enable_if_env: MLC_DETERMINE_MEMORY_CONFIGURATION: - 'yes' diff --git a/script/get-preprocessed-dataset-openorca/customize.py b/script/get-preprocessed-dataset-openorca/customize.py index 96b0bb883..4d6af7503 100644 --- a/script/get-preprocessed-dataset-openorca/customize.py +++ b/script/get-preprocessed-dataset-openorca/customize.py @@ -71,7 +71,7 @@ def postprocess(i): '') == "nvidia": env['MLC_DATASET_OPENORCA_NVIDIA_PREPROCESSED_PATH'] = os.path.join( env['MLC_OPENORCA_PREPROCESSED_ROOT'], - "preprocessed") + "preprocessed_data") env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_OPENORCA_NVIDIA_PREPROCESSED_PATH'] else: env['MLC_GET_DEPENDENT_CACHED_PATH'] = env['MLC_DATASET_PREPROCESSED_PATH'] diff --git a/script/install-llvm-src/README.md b/script/install-llvm-src/README.md new file mode 100644 index 000000000..7a596672b --- /dev/null +++ b/script/install-llvm-src/README.md @@ -0,0 +1,114 @@ +# README for install-llvm-src +This README is automatically generated. Add custom content in [info.md](info.md). Please follow the [script execution document](https://docs.mlcommons.org/mlcflow/targets/script/execution-flow/) to understand more about the MLC script execution. + +`mlcflow` stores all local data under `$HOME/MLC` by default. So, if there is space constraint on the home directory and you have more space on say `/mnt/$USER`, you can do +``` +mkdir /mnt/$USER/MLC +ln -s /mnt/$USER/MLC $HOME/MLC +``` +You can also use the `ENV` variable `MLC_REPOS` to control this location but this will need a set after every system reboot. + +## Setup + +If you are not on a Python development environment please refer to the [official docs](https://docs.mlcommons.org/mlcflow/install/) for the installation. + +```bash +python3 -m venv mlcflow +. mlcflow/bin/activate +pip install mlcflow +``` + +- Using a virtual environment is recommended (per `pip` best practices), but you may skip it or use `--break-system-packages` if needed. + +### Pull mlperf-automations + +Once `mlcflow` is installed: + +```bash +mlc pull repo mlcommons@mlperf-automations --pat= +``` +- `--pat` or `--ssh` is only needed if the repo is PRIVATE +- If `--pat` is avoided, you'll be asked to enter the password where you can enter your Private Access Token +- `--ssh` option can be used instead of `--pat=<>` option if you prefer to use SSH for accessing the github repository. +## Run Commands + +```bash +mlcr install,src,llvm,from.src,src-llvm +``` + +### Script Inputs + +| Name | Description | Choices | Default | +|------|-------------|---------|------| +| `--targets` | | | `` | +| `--target_triple` | | | `` | +| `--extra_options` | | | `` | +### Generic Script Inputs + +| Name | Description | Choices | Default | +|------|-------------|---------|------| +| `--input` | Input to the script passed using the env key `MLC_INPUT` | | `` | +| `--output` | Output from the script passed using the env key `MLC_OUTPUT` | | `` | +| `--outdirname` | The directory to store the script output | | `cache directory ($HOME/MLC/repos/local/cache/<>) if the script is cacheable or else the current directory` | +| `--outbasename` | The output file/folder name | | `` | +| `--name` | | | `` | +| `--extra_cache_tags` | Extra cache tags to be added to the cached entry when the script results are saved | | `` | +| `--skip_compile` | Skip compilation | | `False` | +| `--skip_run` | Skip run | | `False` | +| `--accept_license` | Accept the required license requirement to run the script | | `False` | +| `--skip_system_deps` | Skip installing any system dependencies | | `False` | +| `--git_ssh` | Use SSH for git repos | | `False` | +| `--gh_token` | Github Token | | `` | +| `--hf_token` | Huggingface Token | | `` | +| `--verify_ssl` | Verify SSL | | `False` | +## Variations + +### Build-type + +- `debug` +- `release` (default) + +### Clang + +- `clang` (default) +- `no-clang` + +### Compiler-rt + +- `compiler-rt` (default) +- `no-compiler-rt` + +### Flang + +- `flang` + +### Libcxx + +- `libcxx` (default) +- `no-libcxx` + +### Lld + +- `lld` (default) +- `no-lld` + +### Repo + +- `repo.#` _(# can be substituted dynamically)_ + +### Ungrouped + +- `branch.#` _(# can be substituted dynamically)_ +- `cross-compile-x86-aarch64` +- `for-intel-mlperf-inference-v3.1-bert` (base: tag.llvmorg-15.0.7, clang, release) +- `for-intel-mlperf-inference-v3.1-gptj` (base: tag.llvmorg-16.0.6, clang, release) +- `full-history` +- `path.#` _(# can be substituted dynamically)_ +- `runtimes.#` _(# can be substituted dynamically)_ +- `sha.#` _(# can be substituted dynamically)_ +- `tag.#` _(# can be substituted dynamically)_ (base: full-history) + +### Version + +- `version.#` _(# can be substituted dynamically)_ +- `version.main` (base: branch.main) diff --git a/script/install-llvm-src/meta.yaml b/script/install-llvm-src/meta.yaml index 3928ff868..d145e44c6 100644 --- a/script/install-llvm-src/meta.yaml +++ b/script/install-llvm-src/meta.yaml @@ -93,6 +93,24 @@ variations: +MLC_LLVM_ENABLE_RUNTIMES: - flang-rt group: flang + compiler-rt: + group: compiler-rt + default: true + env: + +MLC_LLVM_ENABLE_PROJECTS: + - compiler-rt + no-compiler-rt: + group: compiler-rt + libcxx: + group: libcxx + default: true + env: + +MLC_LLVM_ENABLE_RUNTIMES: + - libcxx + - libcxxabi + - libunwind + no-libcxx: + group: libcxx lld: default: true env: @@ -114,8 +132,10 @@ variations: cross-compile-x86-aarch64: deps: - tags: get,generic-sys-util,_crossbuild-essential-arm64 + - tags: get,generic-sys-util,_gfortran-12-aarch64-linux-gnu + env: - MLC_LLVM_CROSS_COMPILE_FLAGS: "-DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_PROCESSOR=aarch64 -DLLVM_DEFAULT_TARGET_TRIPLE=aarch64-linux-gnu -DCMAKE_Fortran_COMPILER=aarch64-linux-gnu-gfortran -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++" + MLC_LLVM_CROSS_COMPILE_FLAGS: "-DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_PROCESSOR=aarch64 -DLLVM_DEFAULT_TARGET_TRIPLE=aarch64-linux-gnu -DCMAKE_Fortran_COMPILER=aarch64-linux-gnu-gfortran-12 -DCMAKE_C_COMPILER=aarch64-linux-gnu-gcc -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++" for-intel-mlperf-inference-v3.1-bert: adr: conda-package: diff --git a/script/run-docker-container/customize.py b/script/run-docker-container/customize.py index aa27dcfb6..ee763ed6d 100644 --- a/script/run-docker-container/customize.py +++ b/script/run-docker-container/customize.py @@ -288,7 +288,7 @@ def postprocess(i): stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) - logger.info("Command Output:", result.stdout) + logger.info(f"Command Output: {result.stdout}") except subprocess.CalledProcessError as e: logger.error("Error Occurred!") logger.info(f"Command: {e.cmd}") diff --git a/script/run-mlperf-inference-app/README.md b/script/run-mlperf-inference-app/README.md index f00a4cda1..8f6f6ff27 100644 --- a/script/run-mlperf-inference-app/README.md +++ b/script/run-mlperf-inference-app/README.md @@ -57,6 +57,8 @@ mlcr run-mlperf,inference | `--adr.mlperf-power-client.power_server` | MLPerf Power server IP address | | `192.168.0.15` | | `--adr.mlperf-power-client.port` | MLPerf Power server port | | `4950` | | `--results_dir` | Alias for output_dir | | `` | +| `--use_dataset_from_host` | Run the dataset download script on the host machine and mount the dataset into the Docker container to avoid repeated downloads. | [True, False] | `no` | +| `--use_model_from_host` | Run the model download script on the host machine and mount the model files into the Docker container to avoid repeated downloads. | [True, False] | `no` | | `--adr.compiler.tags` | Compiler for loadgen and any C/C++ part of implementation | | `` | | `--adr.inference-src-loadgen.env.MLC_GIT_URL` | Git URL for MLPerf inference sources to build LoadGen (to enable non-reference implementations) | | `` | | `--adr.inference-src.env.MLC_GIT_URL` | Git URL for MLPerf inference sources to run benchmarks (to enable non-reference implementations) | | `` | @@ -133,13 +135,14 @@ mlcr run-mlperf,inference | `--max_test_duration` | | | `` | | `--all_models` | | | `` | | `--criteo_day23_raw_data_path` | | | `` | -| `--use_dataset_from_host` | | | `` | -| `--use_model_from_host` | | | `` | | `--rgat_checkpoint_path` | | | `` | | `--pointpainting_checkpoint_path` | | | `` | | `--deeplab_resnet50_path` | | | `` | | `--waymo_path` | | | `` | | `--nm_model_zoo_stub` | | | `` | +| `--use_service_account` | | | `` | +| `--client_id` | | | `` | +| `--client_secret` | | | `` | ### Generic Script Inputs | Name | Description | Choices | Default | @@ -171,6 +174,7 @@ mlcr run-mlperf,inference - `r4.1-dev` - `r5.0` - `r5.0-dev` +- `r5.1` - `r5.1-dev` (default) ### Mode diff --git a/script/run-mlperf-inference-app/meta.yaml b/script/run-mlperf-inference-app/meta.yaml index fd503d090..3457f4c06 100644 --- a/script/run-mlperf-inference-app/meta.yaml +++ b/script/run-mlperf-inference-app/meta.yaml @@ -127,6 +127,9 @@ input_mapping: deeplab_resnet50_path: MLC_ML_MODEL_DPLAB_RESNET50_PATH waymo_path: MLC_DATASET_WAYMO_PATH nm_model_zoo_stub: MLC_MLPERF_NEURALMAGIC_MODEL_ZOO_STUB + use_service_account: MLC_AUTH_USING_SERVICE_ACCOUNT + client_id: CF_ACCESS_CLIENT_ID + client_secret: CF_ACCESS_CLIENT_SECRET new_state_keys: - app_mlperf_inference_* @@ -392,6 +395,22 @@ variations: mlperf-inference-nvidia-scratch-space: tags: _version.r5.1-dev + r5.1: + env: + MLC_MLPERF_INFERENCE_VERSION: '5.1' + MLC_RUN_MLPERF_INFERENCE_APP_DEFAULTS: r5.1_default + MLC_MLPERF_SUBMISSION_CHECKER_VERSION: v5.1 + MLC_CUSTOM_SYSTEM_NVIDIA: no # prevents build-mlperf-inference-server-nvidia from calling add-custom-system-nvidia + SYSTEM_NAME: '<<>>' + group: benchmark-version + adr: + get-mlperf-inference-results-dir: + tags: _version.r5.1 + get-mlperf-inference-submission-dir: + tags: _version.r5.1 + mlperf-inference-nvidia-scratch-space: + tags: _version.r5.1 + short: add_deps_recursive: submission-checker: @@ -549,6 +568,18 @@ input_description: submission_dir: desc: Folder path to store MLPerf submission tree sort: 1000 + use_dataset_from_host: + desc: Run the dataset download script on the host machine and mount the dataset into the Docker container to avoid repeated downloads. + choices: + - yes + - no + default: 'no' + use_model_from_host: + desc: Run the model download script on the host machine and mount the model files into the Docker container to avoid repeated downloads. + choices: + - yes + - no + default: 'no' adr.compiler.tags: desc: Compiler for loadgen and any C/C++ part of implementation