diff --git a/.ci/caffe2/test.sh b/.ci/caffe2/test.sh index f224608729320..a8adfc1fa0c74 100755 --- a/.ci/caffe2/test.sh +++ b/.ci/caffe2/test.sh @@ -5,7 +5,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/common.sh" if [[ ${BUILD_ENVIRONMENT} == *onnx* ]]; then pip install click mock tabulate networkx==2.0 - pip -q install --user "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx" + pip -q install "file:///var/lib/jenkins/workspace/third_party/onnx#egg=onnx" fi # Skip tests in environments where they are not built/applicable @@ -151,8 +151,8 @@ export DNNL_MAX_CPU_ISA=AVX2 if [[ "${SHARD_NUMBER:-1}" == "1" ]]; then # TODO(sdym@meta.com) remove this when the linked issue resolved. # py is temporary until https://github.com/Teemu/pytest-sugar/issues/241 is fixed - pip install --user py==1.11.0 - pip install --user pytest-sugar + pip install py==1.11.0 + pip install pytest-sugar # NB: Warnings are disabled because they make it harder to see what # the actual erroring test is "$PYTHON" \ diff --git a/.ci/docker/requirements-ci.txt b/.ci/docker/requirements-ci.txt index eca516fd4e937..4ebe4dbd3c5cb 100644 --- a/.ci/docker/requirements-ci.txt +++ b/.ci/docker/requirements-ci.txt @@ -263,7 +263,7 @@ tb-nightly==2.13.0a20230426 #Pinned versions: #test that import: -tlparse==0.3.25 +tlparse==0.3.30 #Description: parse logs produced by torch.compile #Pinned versions: #test that import: dynamo/test_structured_trace.py diff --git a/.ci/onnx/test.sh b/.ci/onnx/test.sh index a7d3b72c62a7e..d42ca2c218dec 100755 --- a/.ci/onnx/test.sh +++ b/.ci/onnx/test.sh @@ -19,7 +19,7 @@ git config --global --add safe.directory /var/lib/jenkins/workspace if [[ "$BUILD_ENVIRONMENT" == *onnx* ]]; then # TODO: This can be removed later once vision is also part of the Docker image - pip install -q --user --no-use-pep517 "git+https://github.com/pytorch/vision.git@$(cat .github/ci_commit_pins/vision.txt)" + pip install -q --no-use-pep517 "git+https://github.com/pytorch/vision.git@$(cat .github/ci_commit_pins/vision.txt)" # JIT C++ extensions require ninja, so put it into PATH. export PATH="/var/lib/jenkins/.local/bin:$PATH" # NB: ONNX test is fast (~15m) so it's ok to retry it few more times to avoid any flaky issue, we diff --git a/.ci/pytorch/common_utils.sh b/.ci/pytorch/common_utils.sh index 138cece4ab151..0c8b56fdc3633 100644 --- a/.ci/pytorch/common_utils.sh +++ b/.ci/pytorch/common_utils.sh @@ -127,9 +127,9 @@ function install_torchaudio() { if [[ "$1" == "cuda" ]]; then # TODO: This is better to be passed as a parameter from _linux-test workflow # so that it can be consistent with what is set in build - TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${commit}" + TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install --no-use-pep517 "git+https://github.com/pytorch/audio.git@${commit}" else - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/audio.git@${commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/audio.git@${commit}" fi } @@ -139,8 +139,8 @@ function install_torchtext() { local text_commit data_commit=$(get_pinned_commit data) text_commit=$(get_pinned_commit text) - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/data.git@${data_commit}" - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/text.git@${text_commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/data.git@${data_commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/text.git@${text_commit}" } function install_torchvision() { @@ -153,16 +153,12 @@ function install_torchvision() { echo 'char* dlerror(void) { return "";}'|gcc -fpic -shared -o "${HOME}/dlerror.so" -x c - LD_PRELOAD=${orig_preload}:${HOME}/dlerror.so fi - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/vision.git@${commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/vision.git@${commit}" if [ -n "${LD_PRELOAD}" ]; then LD_PRELOAD=${orig_preload} fi } -function install_tlparse() { - pip_install --user "tlparse==0.3.30" - PATH="$(python -m site --user-base)/bin:$PATH" -} function install_torchrec_and_fbgemm() { local torchrec_commit @@ -178,7 +174,7 @@ function install_torchrec_and_fbgemm() { if [[ "$BUILD_ENVIRONMENT" == *rocm* ]] ; then # install torchrec first because it installs fbgemm nightly on top of rocm fbgemm - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" pip_uninstall fbgemm-gpu-nightly pip_install tabulate # needed for newer fbgemm @@ -195,8 +191,8 @@ function install_torchrec_and_fbgemm() { rm -rf fbgemm else # See https://github.com/pytorch/pytorch/issues/106971 - CUDA_PATH=/usr/local/cuda-12.1 pip_install --no-use-pep517 --user "git+https://github.com/pytorch/FBGEMM.git@${fbgemm_commit}#egg=fbgemm-gpu&subdirectory=fbgemm_gpu" - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" + CUDA_PATH=/usr/local/cuda-12.1 pip_install --no-use-pep517 "git+https://github.com/pytorch/FBGEMM.git@${fbgemm_commit}#egg=fbgemm-gpu&subdirectory=fbgemm_gpu" + pip_install --no-use-pep517 "git+https://github.com/pytorch/torchrec.git@${torchrec_commit}" fi } @@ -239,7 +235,7 @@ function checkout_install_torchbench() { function install_torchao() { local commit commit=$(get_pinned_commit torchao) - pip_install --no-use-pep517 --user "git+https://github.com/pytorch/ao.git@${commit}" + pip_install --no-use-pep517 "git+https://github.com/pytorch/ao.git@${commit}" } function print_sccache_stats() { diff --git a/.ci/pytorch/macos-test.sh b/.ci/pytorch/macos-test.sh index 0d10382605d1c..179556cc59d05 100755 --- a/.ci/pytorch/macos-test.sh +++ b/.ci/pytorch/macos-test.sh @@ -277,8 +277,6 @@ test_timm_perf() { echo "timm benchmark on mps device completed" } -install_tlparse - if [[ $TEST_CONFIG == *"perf_all"* ]]; then test_torchbench_perf test_hf_perf diff --git a/.ci/pytorch/test.sh b/.ci/pytorch/test.sh index 1e6b50f04f26d..4159973f884fa 100755 --- a/.ci/pytorch/test.sh +++ b/.ci/pytorch/test.sh @@ -197,7 +197,7 @@ fi if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]] ; then # JIT C++ extensions require ninja. - pip_install --user "ninja==1.10.2" + pip_install "ninja==1.10.2" # ninja is installed in $HOME/.local/bin, e.g., /var/lib/jenkins/.local/bin for CI user jenkins # but this script should be runnable by any user, including root export PATH="$HOME/.local/bin:$PATH" @@ -208,8 +208,6 @@ if [[ "$BUILD_ENVIRONMENT" == *aarch64* ]]; then export VALGRIND=OFF fi -install_tlparse - # DANGER WILL ROBINSON. The LD_PRELOAD here could cause you problems # if you're not careful. Check this if you made some changes and the # ASAN test is not working @@ -464,7 +462,7 @@ DYNAMO_BENCHMARK_FLAGS=() pr_time_benchmarks() { - pip_install --user "fbscribelogger" + pip_install "fbscribelogger" TEST_REPORTS_DIR=$(pwd)/test/test-reports mkdir -p "$TEST_REPORTS_DIR" @@ -1423,8 +1421,8 @@ test_bazel() { test_benchmarks() { if [[ "$BUILD_ENVIRONMENT" == *cuda* && $TEST_CONFIG != *nogpu* ]]; then - pip_install --user "pytest-benchmark==3.2.3" - pip_install --user "requests" + pip_install "pytest-benchmark==3.2.3" + pip_install "requests" BENCHMARK_DATA="benchmarks/.data" mkdir -p ${BENCHMARK_DATA} pytest benchmarks/fastrnns/test_bench.py --benchmark-sort=Name --benchmark-json=${BENCHMARK_DATA}/fastrnns_default.json --fuser=default --executor=default