Skip to content

build(deps): bump Lightning-AI/utilities from 0.11.0 to 0.11.2 #1103

build(deps): bump Lightning-AI/utilities from 0.11.0 to 0.11.2

build(deps): bump Lightning-AI/utilities from 0.11.0 to 0.11.2 #1103

Workflow file for this run

name: CI testing
# see: https://help.github.com/en/actions/reference/events-that-trigger-workflows
on: # Trigger the workflow on push or pull request, but only for the master branch
push:
branches: [master]
pull_request: {}
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}
cancel-in-progress: ${{ ! (github.ref == 'refs/heads/master' || startsWith(github.ref, 'refs/heads/release/')) }}
defaults:
run:
shell: bash
jobs:
pytester:
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: ["ubuntu-20.04", "macOS-11"]
python-version: ["3.8", "3.9", "3.10"]
requires: ["latest"] # + "oldest"
include:
- { os: 'windows-2022', python-version: "3.9" }
- { os: 'ubuntu-20.04', python-version: "3.8", requires: "oldest" }
- { os: 'macOS-11', python-version: "3.8", requires: "oldest" }
env:
FREEZE_REQUIREMENTS: 1
TORCH_URL: "https://download.pytorch.org/whl/cpu/torch_stable.html"
TRANSFORMERS_CACHE: _hf_cache
# Timeout: https://stackoverflow.com/a/59076067/4521646
# the reason for high number is MUCH slower tests on macOS and py3.8
timeout-minutes: 50
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Set min. dependencies
if: matrix.requires == 'oldest'
run: |
import os, glob
ls = ['requirements.txt'] + glob.glob(os.path.join("requirements", "*.txt"))
for fpath in ls:
req = open(fpath).read().replace('>=', '==')
open(fpath, 'w').write(req)
shell: python
- name: Get pip cache dir
id: pip-cache
run: echo "dir=$(pip cache dir)" >> $GITHUB_OUTPUT
- name: Restore pip cache
uses: actions/cache/restore@v4
id: restore-cache
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: pip-dependencies
- name: Install package
run: |
python -m pip install "pip==22.3.1" # todo: drop after resolving extras
pip install -e . -U --prefer-binary -f ${TORCH_URL}
pip list
- name: Test Package [only]
working-directory: ./src
run: |
pip install "coverage[toml]" pytest -q
# TODO: package shall be fine to run full without any ignores
python -m pytest . \
--ignore=pl_bolts/datamodules \
--ignore=pl_bolts/datasets \
--ignore=pl_bolts/models/rl
- name: Install dependencies
run: |
pip install -r requirements/devel.txt -U -q -f ${TORCH_URL}
pip list
- name: Save pip cache
if: github.ref == 'refs/heads/master'
uses: actions/cache/save@v4
with:
path: ${{ steps.pip-cache.outputs.dir }}
key: pip-dependencies
- name: Cache datasets
id: cache-datasets
uses: actions/cache@v4
with:
path: ./_datasets
enableCrossOsArchive: true
# bump this date if you need update cache
key: datasets-20230630
- name: Setup macOS
if: runner.os == 'macOS' && steps.cache-datasets.outputs.cache-hit != 'true'
run: |
brew update
brew install rar
- name: Setup Ubuntu
if: runner.os == 'Linux'&& steps.cache-datasets.outputs.cache-hit != 'true'
run: |
sudo apt-get update
sudo apt-get install -y unrar
- name: Setup Windows
if: runner.os == 'Windows'&& steps.cache-datasets.outputs.cache-hit != 'true'
run: |
choco install unrar
- name: Download ROMs
if: steps.cache-datasets.outputs.cache-hit != 'true'
run: |
mkdir -p _datasets
cd _datasets
curl http://www.atarimania.com/roms/Roms.rar -o Roms.rar
unrar x -y Roms.rar
rm Roms.rar
- name: Init ROMs
working-directory: _datasets/
run: python -m atari_py.import_roms ROMS
- name: Restore HF cache
uses: actions/cache/restore@v4
with:
path: ${{ env.TRANSFORMERS_CACHE }}
key: cache-transformers
- name: Testing
run: python -m pytest tests -v --cov=pl_bolts --timeout=200
- name: Save HF cache
if: github.ref == 'refs/heads/master'
uses: actions/cache/save@v4
with:
path: ${{ env.TRANSFORMERS_CACHE }}
key: cache-transformers
- name: Statistics
if: success()
run: |
coverage report
coverage xml
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v4
if: always()
# see: https://github.com/actions/toolkit/issues/399
continue-on-error: true
with:
token: ${{ secrets.CODECOV_TOKEN }}
file: coverage.xml
flags: cpu,pytest
env_vars: OS,PYTHON
name: codecov-umbrella
fail_ci_if_error: false
testing-guardian:
runs-on: ubuntu-latest
needs: pytester
if: always()
steps:
- run: echo "${{ needs.pytester.result }}"
- name: failing...
if: needs.pytester.result == 'failure'
run: exit 1
- name: cancelled or skipped...
if: contains(fromJSON('["cancelled", "skipped"]'), needs.pytester.result)
timeout-minutes: 1
run: sleep 90