diff --git a/.azure-pipelines/compatibility-test.yml b/.azure-pipelines/compatibility-test.yml new file mode 100644 index 000000000..3cdb8ace6 --- /dev/null +++ b/.azure-pipelines/compatibility-test.yml @@ -0,0 +1,79 @@ +trigger: none + +pr: + autoCancel: true + drafts: false + branches: + include: + - main + paths: + include: + - auto_round + - auto_round_extension + - setup.py + - setup.cfg + - requirements.txt + - requirements-cpu.txt + - .azure-pipelines/compatibility-test.yml + exclude: + - "*.md" + - "**/*.md" + +stages: + - stage: + displayName: Compatibility Test + dependsOn: [] + jobs: + - job: + timeoutInMinutes: 20 + strategy: + matrix: + Python310_Linux: + python_version: '3.10' + vmImage: 'ubuntu-latest' + Python311_Linux: + python_version: '3.11' + vmImage: 'ubuntu-latest' + Python312_Linux: + python_version: '3.12' + vmImage: 'ubuntu-latest' + Python313_Linux: + python_version: '3.13' + vmImage: 'ubuntu-latest' + + Python310_Windows: + python_version: '3.10' + vmImage: 'windows-latest' + Python311_Windows: + python_version: '3.11' + vmImage: 'windows-latest' + Python312_Windows: + python_version: '3.12' + vmImage: 'windows-latest' + Python313_Windows: + python_version: '3.13' + vmImage: 'windows-latest' + + pool: + vmImage: $(vmImage) + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '$(python_version)' + displayName: 'Use Python $(python_version)' + + - script: | + python -m pip install --upgrade pip uv + uv pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu + uv pip install . + pip list + env: + PYTHONUNBUFFERED: '1' + UV_NO_PROGRESS: '1' + UV_SYSTEM_PYTHON: '1' + displayName: 'Install dependencies' + + - script: | + python -c "import auto_round" + displayName: 'Run compatibility test' diff --git a/.azure-pipelines/scripts/ut/run_ut.sh b/.azure-pipelines/scripts/ut/run_ut.sh index cf3997357..50f8238bd 100644 --- a/.azure-pipelines/scripts/ut/run_ut.sh +++ b/.azure-pipelines/scripts/ut/run_ut.sh @@ -27,12 +27,20 @@ mkdir -p ${LOG_DIR} ut_log_name=${LOG_DIR}/ut.log # Split test files into 5 parts -find . -name "test*.py" ! -name "*hpu_only*.py" | sort > all_tests.txt +find . -name "test*.py" | sort > all_tests.txt total_lines=$(wc -l < all_tests.txt) NUM_CHUNKS=5 -chunk_size=$(( (total_lines + NUM_CHUNKS - 1) / NUM_CHUNKS )) -start_line=$(( (test_part - 1) * chunk_size + 1 )) -selected_files=$(tail -n +$start_line all_tests.txt | head -n $chunk_size) +q=$(( total_lines / NUM_CHUNKS )) +r=$(( total_lines % NUM_CHUNKS )) +if [ "$test_part" -le "$r" ]; then + chunk_size=$(( q + 1 )) + start_line=$(( (test_part - 1) * chunk_size + 1 )) +else + chunk_size=$q + start_line=$(( r * (q + 1) + (test_part - r - 1) * q + 1 )) +fi +end_line=$(( start_line + chunk_size - 1 )) +selected_files=$(sed -n "${start_line},${end_line}p" all_tests.txt) printf '%s\n' "${selected_files}" | sed "s,\.\/,python -m pytest --cov=\"${auto_round_path}\" --cov-report term --html=report.html --self-contained-html --cov-report xml:coverage.xml --cov-append -vs --disable-warnings ,g" > run.sh cat run.sh bash run.sh 2>&1 | tee "${ut_log_name}" diff --git a/.azure-pipelines/scripts/ut/run_ut_cuda.sh b/.azure-pipelines/scripts/ut/run_ut_cuda.sh index 92fb15490..a7b6eddd7 100644 --- a/.azure-pipelines/scripts/ut/run_ut_cuda.sh +++ b/.azure-pipelines/scripts/ut/run_ut_cuda.sh @@ -5,6 +5,10 @@ CONDA_ENV_NAME="unittest_cuda" PYTHON_VERSION="3.10" REPO_PATH=$(git rev-parse --show-toplevel) LOG_DIR=${REPO_PATH}/ut_log_dir +SUMMARY_LOG=${LOG_DIR}/results_summary.log + +rm -rf ${LOG_DIR} && mkdir -p ${LOG_DIR} +touch ${SUMMARY_LOG} [[ -z "$CUDA_VISIBLE_DEVICES" ]] && export CUDA_VISIBLE_DEVICES=0 function create_conda_env() { @@ -34,6 +38,55 @@ function create_conda_env() { uv pip install pytest-cov pytest-html cmake==4.0.2 } +function print_test_results_table() { + local log_pattern=$1 + local test_type=$2 + + echo "" + echo "==========================================" >> ${SUMMARY_LOG} + echo "Test Results Summary - ${test_type}" >> ${SUMMARY_LOG} + echo "==========================================" >> ${SUMMARY_LOG} + printf "%-30s %-10s %-50s\n" "Test Case" "Result" "Log File" >> ${SUMMARY_LOG} + printf "%-30s %-10s %-50s\n" "----------" "------" "--------" >> ${SUMMARY_LOG} + + local total_tests=0 + local passed_tests=0 + local failed_tests=0 + + for log_file in ${LOG_DIR}/${log_pattern}; do + if [ -f "${log_file}" ]; then + local test_name=$(basename "${log_file}" .log) + # Remove prefix to get clean test case name + test_name=${test_name#unittest_cuda_} + test_name=${test_name#unittest_cuda_vlm_} + + local result="UNKNOWN" + local failure_count=$(grep -c '== FAILURES ==' "${log_file}" 2>/dev/null || echo 0) + local error_count=$(grep -c '== ERRORS ==' "${log_file}" 2>/dev/null || echo 0) + local passed_count=$(grep -c ' passed' "${log_file}" 2>/dev/null || echo 0) + + if [ ${failure_count} -gt 0 ] || [ ${error_count} -gt 0 ]; then + result="FAILED" + failed_tests=$((failed_tests + 1)) + elif [ ${passed_count} -gt 0 ]; then + result="PASSED" + passed_tests=$((passed_tests + 1)) + else + result="NO_TESTS" + fi + + total_tests=$((total_tests + 1)) + local log_filename=$(basename "${log_file}") + printf "%-30s %-10s %-50s\n" "${test_name}" "${result}" "${log_filename}" >> ${SUMMARY_LOG} + fi + done + + echo "==========================================" >> ${SUMMARY_LOG} + printf "Total: %d, Passed: %d, Failed: %d\n" ${total_tests} ${passed_tests} ${failed_tests} >> ${SUMMARY_LOG} + echo "==========================================" >> ${SUMMARY_LOG} + echo "" >> ${SUMMARY_LOG} +} + function run_unit_test() { # install unit test dependencies create_conda_env @@ -49,24 +102,25 @@ function run_unit_test() { uv pip install -r requirements.txt uv pip install -r requirements_diffusion.txt - uv pip list + pip list > ${LOG_DIR}/ut_pip_list.txt export COVERAGE_RCFILE=${REPO_PATH}/.azure-pipelines/scripts/ut/.coverage local auto_round_path=$(python -c 'import auto_round; print(auto_round.__path__[0])') - - # setup test env - mkdir -p ${LOG_DIR} - local ut_log_name=${LOG_DIR}/unittest_cuda.log - find . -name "test_*.py" | sed "s,\.\/,python -m pytest --cov=\"${auto_round_path}\" --cov-report term --html=report.html --self-contained-html --cov-report xml:coverage.xml --cov-append -vs --disable-warnings ,g" >run.sh - cat run.sh - - # run unit test - bash run.sh 2>&1 | tee ${ut_log_name} - - cp report.html ${LOG_DIR}/ - cp coverage.xml ${LOG_DIR}/ - - if [ $(grep -c '== FAILURES ==' ${ut_log_name}) != 0 ] || [ $(grep -c '== ERRORS ==' ${ut_log_name}) != 0 ] || [ $(grep -c ' passed' ${ut_log_name}) == 0 ]; then - echo "Find errors in pytest case, please check the output..." + + # run unit tests individually with separate logs + for test_file in $(find . -name "test_*.py"); do + local test_basename=$(basename ${test_file} .py) + local ut_log_name=${LOG_DIR}/unittest_cuda_${test_basename}.log + echo "Running ${test_file}..." + + python -m pytest --cov="${auto_round_path}" --cov-report term --html=report.html --self-contained-html --cov-report xml:coverage.xml --cov-append -vs --disable-warnings ${test_file} 2>&1 | tee ${ut_log_name} + done + + mv report.html ${LOG_DIR}/ + mv coverage.xml ${LOG_DIR}/ + + # Print test results table and check for failures + if ! print_test_results_table "unittest_cuda_test_*.log" "CUDA Unit Tests"; then + echo "Some CUDA unit tests failed. Please check the individual log files for details." fi } @@ -85,30 +139,32 @@ function run_unit_test_vlm() { uv pip install flash-attn==2.7.4.post1 --no-build-isolation uv pip install -r requirements_vlm.txt - uv pip list + pip list > ${LOG_DIR}/vlm_ut_pip_list.txt export COVERAGE_RCFILE=${REPO_PATH}/.azure-pipelines/scripts/ut/.coverage local auto_round_path=$(python -c 'import auto_round; print(auto_round.__path__[0])') - # setup test env - mkdir -p ${LOG_DIR} - local ut_log_name=${LOG_DIR}/unittest_cuda_vlm.log - find . -name "test*vlms.py" | sed "s,\.\/,python -m pytest --cov=\"${auto_round_path}\" --cov-report term --html=report_vlms.html --self-contained-html --cov-report xml:coverage_vlms.xml --cov-append -vs --disable-warnings ,g" >run_vlms.sh - cat run_vlms.sh + # run VLM unit tests individually with separate logs + for test_file in $(find . -name "test*vlms.py"); do + local test_basename=$(basename ${test_file} .py) + local ut_log_name=${LOG_DIR}/unittest_cuda_vlm_${test_basename}.log + echo "Running ${test_file}..." - # run unit test - bash run_vlms.sh 2>&1 | tee ${ut_log_name} + python -m pytest --cov="${auto_round_path}" --cov-report term --html=report_vlms.html --self-contained-html --cov-report xml:coverage_vlms.xml --cov-append -vs --disable-warnings ${test_file} 2>&1 | tee ${ut_log_name} + done - cp report_vlms.html ${LOG_DIR}/ - cp coverage_vlms.xml ${LOG_DIR}/ + mv report_vlms.html ${LOG_DIR}/ + mv coverage_vlms.xml ${LOG_DIR}/ - if [ $(grep -c '== FAILURES ==' ${ut_log_name}) != 0 ] || [ $(grep -c '== ERRORS ==' ${ut_log_name}) != 0 ] || [ $(grep -c ' passed' ${ut_log_name}) == 0 ]; then - echo "Find errors in pytest case, please check the output..." + # Print test results table and check for failures + if ! print_test_results_table "unittest_cuda_vlm_test*.log" "CUDA VLM Tests"; then + echo "Some CUDA VLM tests failed. Please check the individual log files for details." fi } function main() { run_unit_test_vlm run_unit_test + cat ${SUMMARY_LOG} } main diff --git a/.azure-pipelines/template/ut-template.yml b/.azure-pipelines/template/ut-template.yml index 3962f072d..e5a081b0a 100644 --- a/.azure-pipelines/template/ut-template.yml +++ b/.azure-pipelines/template/ut-template.yml @@ -55,7 +55,7 @@ steps: displayName: "Run UT" - task: PublishPipelineArtifact@1 - condition: succeededOrFailed() + condition: succeeded() inputs: targetPath: ${{ parameters.uploadPath }} artifact: ${{ parameters.utArtifact }}_coverage