diff --git a/.github/workflows/pr_master_test.yml b/.github/workflows/pr_master_test.yml new file mode 100644 index 00000000..d6211b09 --- /dev/null +++ b/.github/workflows/pr_master_test.yml @@ -0,0 +1,208 @@ +name: GitHub CI + +on: + push: + branches: + - master + pull_request: + branches: + - master + +defaults: + run: + shell: bash -l {0} + +env: + PYTHONWARNINGS: ignore::UserWarning + PYTHON_BASE_PKGS: > + coverage xlrd + +jobs: + pyomo-tests: + name: ${{ matrix.TARGET }}/${{ matrix.python }}${{ matrix.NAME }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + python: [2.7, 3.5, 3.6, 3.7, 3.8, pypy2, pypy3] + include: + - os: ubuntu-latest + TARGET: linux + PYENV: pip + + - os: macos-latest + TARGET: osx + PYENV: pip + + - os: windows-latest + TARGET: win + PYENV: conda + PACKAGES: setuptools coverage nose xlrd + + exclude: + - {os: macos-latest, python: pypy2} + - {os: macos-latest, python: pypy3} + - {os: windows-latest, python: pypy2} + - {os: windows-latest, python: pypy3} + + steps: + - uses: actions/checkout@v2 + + - name: Pip package cache + uses: actions/cache@v2 + if: matrix.PYENV == 'pip' + id: pip-cache + with: + path: cache/pip + key: pip-v2-${{runner.os}}-${{matrix.python}} + + - name: Download cache + uses: actions/cache@v2 + id: download-cache + with: + path: cache/download + key: download-v4-${{runner.os}} + + - name: Configure curl + run: | + CURLRC="$(cat < ${GITHUB_WORKSPACE}/.curlrc + echo "$CURLRC" > ${GITHUB_WORKSPACE}/_curlrc + echo "::set-env name=CURL_HOME::$GITHUB_WORKSPACE" + + - name: Set up Python ${{ matrix.python }} + if: matrix.PYENV == 'pip' + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python }} + + - name: Set up Miniconda Python ${{ matrix.python }} + if: matrix.PYENV == 'conda' + uses: goanpeca/setup-miniconda@v1 + with: + auto-update-conda: true + python-version: ${{ matrix.python }} + + # GitHub actions is very fragile when it comes to setting up various + # Python interpreters, expecially the setup-miniconda interface. + # Per the setup-miniconda documentation, it is important to always + # invoke bash as a login shell ('shell: bash -l {0}') so that the + # conda environment is properly activated. However, running within + # a login shell appears to foul up the link to python from + # setup-python. Further, we have anecdotal evidence that + # subprocesses invoked through $(python -c ...) and `python -c ...` + # will not pick up the python activated by setup-python on OSX. + # + # Our solution is to define a PYTHON_EXE environment variable that + # can be explicitly called within subprocess calls to reach the + # correct interpreter. Note that we must explicitly run in a *non* + # login shell to set up the environment variable for the + # setup-python environments. + + - name: Install Python Packages (pip) + if: matrix.PYENV == 'pip' + shell: bash + run: | + python -m pip install --cache-dir cache/pip --upgrade pip + pip install --cache-dir cache/pip ${PYTHON_BASE_PKGS} + if [[ ${{matrix.python}} == pypy ]] || [[ ${{matrix.python}} == 2.7 ]]; then + pip install --cache-dir cache/pip pyro + else + pip install --cache-dir cache/pip pyro4 + fi + if [[ ${{matrix.python}} == 3.4 ]]; then + pip install --cache-dir cache/pip pyyaml<=5.2 + elif [[ ${{matrix.python}} != 3.6 ]]; then + pip install --cache-dir cache/pip pyyaml + fi + python -c 'import sys; print("::set-env name=PYTHON_EXE::%s" \ + % (sys.executable,))' + + - name: Install Python packages (conda) + if: matrix.PYENV == 'conda' + run: | + mkdir -p $GITHUB_WORKSPACE/cache/conda + conda config --set always_yes yes + conda config --set auto_update_conda false + conda config --prepend pkgs_dirs $GITHUB_WORKSPACE/cache/conda + conda info + conda config --show-sources + conda list --show-channel-urls + conda install -q -y -c conda-forge -c anaconda ${{matrix.PACKAGES}} + python -c 'import sys; print("::set-env name=PYTHON_EXE::%s" \ + % (sys.executable,))' + + - name: Install Pyutilib + run: | + $PYTHON_EXE setup.py develop + + - name: Set up coverage tracking + run: | + if test "${{matrix.TARGET}}" == win; then + COVERAGE_BASE=${GITHUB_WORKSPACE}\\.cover + else + COVERAGE_BASE=${GITHUB_WORKSPACE}/.cover + fi + COVERAGE_RC=${COVERAGE_BASE}_rc + echo "::set-env name=COVERAGE_RCFILE::$COVERAGE_RC" + echo "::set-env name=COVERAGE_PROCESS_START::$COVERAGE_RC" + cp ${GITHUB_WORKSPACE}/.coveragerc ${COVERAGE_RC} + echo "[run]" >> ${COVERAGE_RC} + echo "data_file=${COVERAGE_BASE}age" >> ${COVERAGE_RC} + SITE_PACKAGES=$($PYTHON_EXE -c "from distutils.sysconfig import \ + get_python_lib; print(get_python_lib())") + echo "Python site-packages: $SITE_PACKAGES" + echo 'import coverage; coverage.process_startup()' \ + > ${SITE_PACKAGES}/run_coverage_at_startup.pth + cat ${COVERAGE_RC} + + - name: Run Pyutilib tests + run: | + if [ ${{matrix.TARGET}} == 'osx' ]; then + PYTHON_DIR=`dirname $PYTHON_EXE` + export PATH=$PYTHON_DIR:$PATH + fi + test.pyutilib --cat=all -v + + - name: Process code coverage report + env: + CODECOV_NAME: ${{matrix.TARGET}}/${{matrix.python}}${{matrix.NAME}} + run: | + [ ! -d ${GITHUB_WORKSPACE}/cache/download ] && mkdir -pv ${GITHUB_WORKSPACE}/cache/download + coverage report -i + coverage xml -i + ls -la + set +e + # Always attempt to update the codecov script, but fall back on + # the previously cached script if it fails + CODECOV="${GITHUB_WORKSPACE}/cache/download/codecov.sh" + for i in `seq 3`; do + echo "Downloading current codecov script (attempt ${i})" + curl -L https://codecov.io/bash -o $CODECOV + if test $? == 0; then + break + fi + DELAY=$(( RANDOM % 30 + 30)) + echo "Pausing $DELAY seconds before re-attempting download" + sleep $DELAY + done + ls -la ${GITHUB_WORKSPACE}/cache/ + i=0 + while : ; do + ((i+=1)) + echo "Uploading coverage to codecov (attempt ${i})" + bash $CODECOV -Z -X gcov -X s3 -f coverage.xml + if test $? == 0; then + break + elif test $i -ge 5; then + exit 1 + fi + DELAY=$(( RANDOM % 30 + 30)) + echo "Pausing $DELAY seconds before re-attempting upload" + sleep $DELAY + done diff --git a/doc/workflow/examples/test_example.py b/doc/workflow/examples/test_example.py index 18b7ee16..a6834ff7 100644 --- a/doc/workflow/examples/test_example.py +++ b/doc/workflow/examples/test_example.py @@ -12,7 +12,8 @@ def filter(line): return 'Running' in line or "IGNORE" in line or line.startswith('usage:') or 'Sub-commands' in line # Declare an empty TestCase class -class Test(unittest.TestCase): pass +class Test(unittest.TestCase): + pass # Find all example*.py files, and use them to define baseline tests for file in glob.glob(datadir+'example*.py'): @@ -32,8 +33,9 @@ class Test(unittest.TestCase): pass for file in glob.glob(datadir+'*.sh'): bname = basename(file) name=bname.split('.')[0] - Test.add_baseline_test(cmd='cd %s; /usr/bin/env sh %s' % (datadir, file), baseline=datadir+name+'.txt', name=name, filter=filter) + Test.add_baseline_test(cmd='cd %s; /usr/bin/env bash %s' % (datadir, file), baseline=datadir+name+'.txt', name=name, filter=filter) # Execute the tests if __name__ == '__main__': + unittest.main() diff --git a/pyutilib/workflow/tests/test_doc.py b/pyutilib/workflow/tests/test_doc.py index 8767e4e8..3e58267e 100644 --- a/pyutilib/workflow/tests/test_doc.py +++ b/pyutilib/workflow/tests/test_doc.py @@ -1,7 +1,7 @@ # Imports import sys import os -from os.path import dirname, abspath, abspath +from os.path import dirname, abspath import pyutilib.th as unittest