From b70e9b93f564ef8d2b94c6d9bca57832a6ff521d Mon Sep 17 00:00:00 2001 From: Jiafu Zhang Date: Fri, 21 Jun 2024 19:46:00 +0800 Subject: [PATCH] fixed benchmark error after removing HF token from build log Signed-off-by: Jiafu Zhang --- .github/workflows/workflow_test_benchmark.yml | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/.github/workflows/workflow_test_benchmark.yml b/.github/workflows/workflow_test_benchmark.yml index ee8f7f43..d737675c 100644 --- a/.github/workflows/workflow_test_benchmark.yml +++ b/.github/workflows/workflow_test_benchmark.yml @@ -92,24 +92,6 @@ jobs: TARGET=${{steps.target.outputs.target}} # Additional libraries required for pytest docker exec "${TARGET}" bash -c "pip install -r tests/requirements.txt" - CMD=$(cat << EOF - import yaml - conf_path = "llm_on_ray/inference/models/llama-2-7b-chat-hf.yaml" - with open(conf_path, encoding="utf-8") as reader: - result = yaml.load(reader, Loader=yaml.FullLoader) - result['model_description']["config"]["use_auth_token"] = "${{ env.HF_ACCESS_TOKEN }}" - with open(conf_path, 'w') as output: - yaml.dump(result, output, sort_keys=False) - conf_path = "llm_on_ray/inference/models/vllm/llama-2-7b-chat-hf-vllm.yaml" - with open(conf_path, encoding="utf-8") as reader: - result = yaml.load(reader, Loader=yaml.FullLoader) - result['model_description']["config"]["use_auth_token"] = "${{ env.HF_ACCESS_TOKEN }}" - with open(conf_path, 'w') as output: - yaml.dump(result, output, sort_keys=False) - EOF - ) - docker exec "${TARGET}" python -c "$CMD" - docker exec "${TARGET}" bash -c "huggingface-cli login --token ${{ env.HF_ACCESS_TOKEN }}" docker exec "${TARGET}" bash -c "./tests/run-tests-benchmark.sh" - name: Stop Ray run: |