From 2eefb88957dec212fc92134fda4ec61ab16168c3 Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Sat, 16 Nov 2019 09:52:41 +0100 Subject: [PATCH 1/2] Enable CI on Github Add a GitHub workflow `.github/workflows/ci.yml` that will trigger on push and pull requests. It will run the five actions it defines: * `conda`: will install conda and create the environment * `docs`: builds the documentation with nitpick warning * `pre-commit`: runs pre-commit on all files * `tests`: runs `verdi devel tests` and the stand alone test files * `verdi`: runs the tests that check the load time is acceptable All tests are performed for python 3.7 except for the `tests` action that is done for 3.5 as well. Both python versions are run for both database backends in a matrix strategy. Even though we support 3.6 as well we do not explicitly test it since it will require another two builds and testing 3.5 and 3.7 should give decent guarantees. Finally the argument for multiple individual actions instead of joining them is based on the fact that there does not seem to be a limit on concurrent number of actions on GitHub as of this writing. This means that by spreading them out, allows running them in parallel which should reduce the overal runtime of the continuous integration workflow. --- .ci/test_daemon.py | 157 ++++++++++-------- .github/config/doubler.yaml | 9 + .github/config/localhost-config.yaml | 2 + .github/config/localhost.yaml | 12 ++ .github/config/profile.yaml | 14 ++ .github/workflows/ci.yml | 147 ++++++++++++++++ .github/workflows/conda.sh | 16 ++ .github/workflows/setup.sh | 26 +++ .github/workflows/tests.sh | 17 ++ .github/workflows/verdi.sh | 36 ++++ .pre-commit-config.yaml | 1 - .pylintrc | 2 +- aiida/backends/djsite/manager.py | 4 +- aiida/backends/sqlalchemy/manager.py | 2 +- .../versions/041a79fc615f_dblog_cleaning.py | 2 +- .../tests/cmdline/commands/test_database.py | 2 +- aiida/backends/tests/orm/data/test_upf.py | 4 +- .../importexport/migration/test_v02_to_v03.py | 1 + aiida/orm/convert.py | 1 - aiida/orm/implementation/django/computers.py | 2 +- .../implementation/sqlalchemy/computers.py | 2 +- aiida/orm/utils/links.py | 4 +- setup.json | 2 +- 23 files changed, 381 insertions(+), 84 deletions(-) create mode 100644 .github/config/doubler.yaml create mode 100644 .github/config/localhost-config.yaml create mode 100644 .github/config/localhost.yaml create mode 100644 .github/config/profile.yaml create mode 100644 .github/workflows/ci.yml create mode 100755 .github/workflows/conda.sh create mode 100755 .github/workflows/setup.sh create mode 100755 .github/workflows/tests.sh create mode 100755 .github/workflows/verdi.sh diff --git a/.ci/test_daemon.py b/.ci/test_daemon.py index 4b24647dd..26d9b89b4 100644 --- a/.ci/test_daemon.py +++ b/.ci/test_daemon.py @@ -7,47 +7,47 @@ # For further information on the license, see the LICENSE.txt file # # For further information please visit http://www.aiida.net # ########################################################################### - +# pylint: disable=no-name-in-module +"""Tests to run with a running daemon.""" import subprocess import sys import time - from aiida.common import exceptions from aiida.engine import run_get_node, submit from aiida.engine.daemon.client import get_daemon_client from aiida.engine.persistence import ObjectLoader from aiida.manage.caching import enable_caching -from aiida.orm import CalcJobNode, Code, load_node, Int, Str, List -from aiida.plugins import CalculationFactory, DataFactory +from aiida.orm import CalcJobNode, load_node, Int, Str, List, Dict, load_code +from aiida.plugins import CalculationFactory from workchains import ( NestedWorkChain, DynamicNonDbInput, DynamicDbInput, DynamicMixedInput, ListEcho, CalcFunctionRunnerWorkChain, WorkFunctionRunnerWorkChain, NestedInputNamespace, SerializeWorkChain ) - -Dict = DataFactory('dict') - -codename = 'doubler@torquessh' -timeout_secs = 4 * 60 # 4 minutes -number_calculations = 15 # Number of calculations to submit -number_workchains = 8 # Number of workchains to submit +CODENAME = 'doubler' +TIMEOUTSECS = 4 * 60 # 4 minutes +NUMBER_CALCULATIONS = 15 # Number of calculations to submit +NUMBER_WORKCHAINS = 8 # Number of workchains to submit def print_daemon_log(): + """Print daemon log.""" daemon_client = get_daemon_client() daemon_log = daemon_client.daemon_log_file print("Output of 'cat {}':".format(daemon_log)) try: print(subprocess.check_output( - ['cat', '{}'.format(daemon_log)], stderr=subprocess.STDOUT, + ['cat', '{}'.format(daemon_log)], + stderr=subprocess.STDOUT, )) - except subprocess.CalledProcessError as e: - print('Note: the command failed, message: {}'.format(e)) + except subprocess.CalledProcessError as exception: + print('Note: the command failed, message: {}'.format(exception)) def jobs_have_finished(pks): + """Check if jobs with given pks have finished.""" finished_list = [load_node(pk).is_terminated for pk in pks] node_list = [load_node(pk) for pk in pks] num_finished = len([_ for _ in finished_list if _]) @@ -56,10 +56,11 @@ def jobs_have_finished(pks): if not node.is_terminated: print('not terminated: {} [{}]'.format(node.pk, node.process_state)) print('{}/{} finished'.format(num_finished, len(finished_list))) - return not (False in finished_list) + return False not in finished_list def print_report(pk): + """Print the process report for given pk.""" print("Output of 'verdi process report {}':".format(pk)) try: print(subprocess.check_output( @@ -71,13 +72,17 @@ def print_report(pk): def validate_calculations(expected_results): + """Validate the calculations.""" valid = True actual_dict = {} for pk, expected_dict in expected_results.items(): calc = load_node(pk) if not calc.is_finished_ok: - print('Calculation<{}> not finished ok: process_state<{}> exit_status<{}>' - .format(pk, calc.process_state, calc.exit_status)) + print( + 'Calculation<{}> not finished ok: process_state<{}> exit_status<{}>'.format( + pk, calc.process_state, calc.exit_status + ) + ) print_report(pk) valid = False @@ -95,14 +100,14 @@ def validate_calculations(expected_results): pass if actual_dict != expected_dict: - print('* UNEXPECTED VALUE {} for calc pk={}: I expected {}' - .format(actual_dict, pk, expected_dict)) + print('* UNEXPECTED VALUE {} for calc pk={}: I expected {}'.format(actual_dict, pk, expected_dict)) valid = False return valid def validate_workchains(expected_results): + """Validate the workchains.""" valid = True for pk, expected_value in expected_results.items(): this_valid = True @@ -110,24 +115,33 @@ def validate_workchains(expected_results): calc = load_node(pk) actual_value = calc.outputs.output except (exceptions.NotExistent, AttributeError) as exception: - print('* UNABLE TO RETRIEVE VALUE for workchain pk={}: I expected {}, I got {}: {}' - .format(pk, expected_value, type(exception), exception)) + print( + '* UNABLE TO RETRIEVE VALUE for workchain pk={}: I expected {}, I got {}: {}'.format( + pk, expected_value, type(exception), exception + ) + ) valid = False this_valid = False actual_value = None # I check only if this_valid, otherwise calc could not exist if this_valid and not calc.is_finished_ok: - print('Calculation<{}> not finished ok: process_state<{}> exit_status<{}>' - .format(pk, calc.process_state, calc.exit_status)) + print( + 'Calculation<{}> not finished ok: process_state<{}> exit_status<{}>'.format( + pk, calc.process_state, calc.exit_status + ) + ) print_report(pk) valid = False this_valid = False # I check only if this_valid, otherwise actual_value could be unset if this_valid and actual_value != expected_value: - print('* UNEXPECTED VALUE {}, type {} for workchain pk={}: I expected {}, type {}' - .format(actual_value, type(actual_value), pk, expected_value, type(expected_value))) + print( + '* UNEXPECTED VALUE {}, type {} for workchain pk={}: I expected {}, type {}'.format( + actual_value, type(actual_value), pk, expected_value, type(expected_value) + ) + ) valid = False this_valid = False @@ -142,8 +156,11 @@ def validate_cached(cached_calcs): for calc in cached_calcs: if not calc.is_finished_ok: - print('Cached calculation<{}> not finished ok: process_state<{}> exit_status<{}>' - .format(calc.pk, calc.process_state, calc.exit_status)) + print( + 'Cached calculation<{}> not finished ok: process_state<{}> exit_status<{}>'.format( + calc.pk, calc.process_state, calc.exit_status + ) + ) print_report(calc.pk) valid = False @@ -162,13 +179,19 @@ def validate_cached(cached_calcs): print_report(calc.pk) valid = False if not files_original: - print('Original calculation <{}> does not have any raw inputs files after being cached from.' - .format(original_calc.pk)) + print( + 'Original calculation <{}> does not have any raw inputs files after being cached from.'.format( + original_calc.pk + ) + ) valid = False if set(files_original) != set(files_cached): - print('different raw input files [{}] vs [{}] for original<{}> and cached<{}> calculation'.format( - set(files_original), set(files_cached), original_calc.pk, calc.pk)) + print( + 'different raw input files [{}] vs [{}] for original<{}> and cached<{}> calculation'.format( + set(files_original), set(files_cached), original_calc.pk, calc.pk + ) + ) valid = False return valid @@ -189,7 +212,7 @@ def run_calculation(code, counter, inputval): Run a calculation through the Process layer. """ process, inputs, expected_result = create_calculation_process(code=code, inputval=inputval) - result, calc = run_get_node(process, **inputs) + _, calc = run_get_node(process, **inputs) print('[{}] ran calculation {}, pk={}'.format(counter, calc.uuid, calc.pk)) return calc, expected_result @@ -200,19 +223,21 @@ def create_calculation_process(code, inputval): """ TemplatereplacerCalculation = CalculationFactory('templatereplacer') parameters = Dict(dict={'value': inputval}) - template = Dict(dict={ - # The following line adds a significant sleep time. - # I set it to 1 second to speed up tests - # I keep it to a non-zero value because I want - # To test the case when AiiDA finds some calcs - # in a queued state - # 'cmdline_params': ["{}".format(counter % 3)], # Sleep time - 'cmdline_params': ['1'], - 'input_file_template': '{value}', # File just contains the value to double - 'input_file_name': 'value_to_double.txt', - 'output_file_name': 'output.txt', - 'retrieve_temporary_files': ['triple_value.tmp'] - }) + template = Dict( + dict={ + # The following line adds a significant sleep time. + # I set it to 1 second to speed up tests + # I keep it to a non-zero value because I want + # To test the case when AiiDA finds some calcs + # in a queued state + # 'cmdline_params': ["{}".format(counter % 3)], # Sleep time + 'cmdline_params': ['1'], + 'input_file_template': '{value}', # File just contains the value to double + 'input_file_name': 'value_to_double.txt', + 'output_file_name': 'output.txt', + 'retrieve_temporary_files': ['triple_value.tmp'] + } + ) options = { 'resources': { 'num_machines': 1 @@ -222,12 +247,7 @@ def create_calculation_process(code, inputval): 'parser_name': 'templatereplacer.doubler', } - expected_result = { - 'value': 2 * inputval, - 'retrieved_temporary_files': { - 'triple_value.tmp': str(inputval * 3) - } - } + expected_result = {'value': 2 * inputval, 'retrieved_temporary_files': {'triple_value.tmp': str(inputval * 3)}} inputs = { 'code': code, @@ -241,24 +261,24 @@ def create_calculation_process(code, inputval): def main(): + """Launch a bunch of calculation jobs and workchains.""" + # pylint: disable=too-many-locals,too-many-statements expected_results_calculations = {} expected_results_workchains = {} - code = Code.get_from_string(codename) + code = load_code(CODENAME) # Submitting the Calculations the new way directly through the launchers - print('Submitting {} calculations to the daemon'.format(number_calculations)) - for counter in range(1, number_calculations + 1): + print('Submitting {} calculations to the daemon'.format(NUMBER_CALCULATIONS)) + for counter in range(1, NUMBER_CALCULATIONS + 1): inputval = counter - calc, expected_result = launch_calculation( - code=code, counter=counter, inputval=inputval - ) + calc, expected_result = launch_calculation(code=code, counter=counter, inputval=inputval) expected_results_calculations[calc.pk] = expected_result # Submitting the Workchains - print('Submitting {} workchains to the daemon'.format(number_workchains)) - for index in range(number_workchains): + print('Submitting {} workchains to the daemon'.format(NUMBER_WORKCHAINS)) + for index in range(NUMBER_WORKCHAINS): inp = Int(index) - result, node = run_get_node(NestedWorkChain, inp=inp) + _, node = run_get_node(NestedWorkChain, inp=inp) expected_results_workchains[node.pk] = index print("Submitting a workchain with 'submit'.") @@ -315,7 +335,7 @@ def main(): print('Wating for end of execution...') start_time = time.time() exited_with_timeout = True - while time.time() - start_time < timeout_secs: + while time.time() - start_time < TIMEOUTSECS: time.sleep(15) # Wait a few seconds # Print some debug info, both for debugging reasons and to avoid @@ -330,8 +350,8 @@ def main(): ['verdi', 'process', 'list', '-a'], stderr=subprocess.STDOUT, )) - except subprocess.CalledProcessError as e: - print('Note: the command failed, message: {}'.format(e)) + except subprocess.CalledProcessError as exception: + print('Note: the command failed, message: {}'.format(exception)) print("Output of 'verdi daemon status':") try: @@ -339,8 +359,8 @@ def main(): ['verdi', 'daemon', 'status'], stderr=subprocess.STDOUT, )) - except subprocess.CalledProcessError as e: - print('Note: the command failed, message: {}'.format(e)) + except subprocess.CalledProcessError as exception: + print('Note: the command failed, message: {}'.format(exception)) if jobs_have_finished(pks): print('Calculation terminated its execution') @@ -350,13 +370,13 @@ def main(): if exited_with_timeout: print_daemon_log() print('') - print('Timeout!! Calculation did not complete after {} seconds'.format(timeout_secs)) + print('Timeout!! Calculation did not complete after {} seconds'.format(TIMEOUTSECS)) sys.exit(2) else: # Launch the same calculations but with caching enabled -- these should be FINISHED immediately cached_calcs = [] with enable_caching(identifier='aiida.calculations:templatereplacer'): - for counter in range(1, number_calculations + 1): + for counter in range(1, NUMBER_CALCULATIONS + 1): inputval = counter calc, expected_result = run_calculation(code=code, counter=counter, inputval=inputval) cached_calcs.append(calc) @@ -364,8 +384,7 @@ def main(): if ( validate_calculations(expected_results_calculations) and - validate_workchains(expected_results_workchains) and - validate_cached(cached_calcs) + validate_workchains(expected_results_workchains) and validate_cached(cached_calcs) ): print_daemon_log() print('') diff --git a/.github/config/doubler.yaml b/.github/config/doubler.yaml new file mode 100644 index 000000000..e20033691 --- /dev/null +++ b/.github/config/doubler.yaml @@ -0,0 +1,9 @@ +--- +label: doubler +description: doubler +input_plugin: templatereplacer +on_computer: true +computer: localhost +remote_abs_path: PLACEHOLDER_REMOTE_ABS_PATH +prepend_text: ' ' +append_text: ' ' diff --git a/.github/config/localhost-config.yaml b/.github/config/localhost-config.yaml new file mode 100644 index 000000000..fbc1bd07d --- /dev/null +++ b/.github/config/localhost-config.yaml @@ -0,0 +1,2 @@ +--- +safe_interval: 0 \ No newline at end of file diff --git a/.github/config/localhost.yaml b/.github/config/localhost.yaml new file mode 100644 index 000000000..1d218786d --- /dev/null +++ b/.github/config/localhost.yaml @@ -0,0 +1,12 @@ +--- +label: localhost +description: localhost +hostname: localhost +transport: local +scheduler: direct +shebang: '#!/usr/bin/env bash' +work_dir: PLACEHOLDER_WORK_DIR +mpirun_command: ' ' +mpiprocs_per_machine: 1 +prepend_text: ' ' +append_text: ' ' \ No newline at end of file diff --git a/.github/config/profile.yaml b/.github/config/profile.yaml new file mode 100644 index 000000000..c5e0dfa3c --- /dev/null +++ b/.github/config/profile.yaml @@ -0,0 +1,14 @@ +--- +profile: PLACEHOLDER_PROFILE +email: aiida@localhost +first_name: Giuseppe +last_name: Verdi +institution: Khedivial +db_backend: PLACEHOLDER_BACKEND +db_engine: postgresql_psycopg2 +db_host: localhost +db_port: 5432 +db_name: PLACEHOLDER_DATABASE_NAME +db_username: postgres +db_password: '' +repository: PLACEHOLDER_REPOSITORY \ No newline at end of file diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..d26c6cd21 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,147 @@ +name: aiida-core + +on: [push, pull_request] + +jobs: + + conda: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v1 + + - name: Set up Python 3.7 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + + - name: Conda install + env: + PYTHON_VERSION: 3.7 + run: + .github/workflows/conda.sh + + docs: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v1 + + - name: Set up Python 3.7 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + + - name: Install system dependencies + run: | + sudo apt update + sudo apt install texlive-base texlive-generic-recommended texlive-fonts-recommended texlive-latex-base texlive-latex-recommended texlive-latex-extra dvipng dvidvi + + - name: Install python dependencies + run: + pip install -e .[docs,testing] + + - name: Build documentation + env: + READTHEDOCS: 'True' + run: + SPHINXOPTS='-nW' make -C docs html + + pre-commit: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v1 + + - name: Set up Python 3.7 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + + - name: Install system dependencies + run: | + sudo apt update + sudo apt install libkrb5-dev ruby ruby-dev + + - name: Install python dependencies + run: + pip install -e .[all] + + - name: Run pre-commit + run: + pre-commit run --all-files || ( git status --short ; git diff ; exit 1 ) + + tests: + + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + python-version: [3.5, 3.7] + backend: ['django', 'sqlalchemy'] + + steps: + - uses: actions/checkout@v1 + - uses: harmon758/postgresql-action@v1 + with: + postgresql version: '11' + postgresql db: test_${{ matrix.backend }} + postgresql user: 'postgres' + postgresql password: '' + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v1 + with: + python-version: ${{ matrix.python-version }} + + - name: Install system dependencies + run: | + wget -O - "https://github.com/rabbitmq/signing-keys/releases/download/2.0/rabbitmq-release-signing-key.asc" | sudo apt-key add - + echo 'deb https://dl.bintray.com/rabbitmq-erlang/debian bionic erlang' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list + echo 'deb https://dl.bintray.com/rabbitmq/debian bionic main' | sudo tee -a /etc/apt/sources.list.d/bintray.rabbitmq.list + sudo apt update + sudo apt install postgresql postgresql-server-dev-all postgresql-client rabbitmq-server graphviz + sudo systemctl status rabbitmq-server.service + + - name: Install python dependencies + run: | + pip install --upgrade pip + pip install -e .[atomic_tools,docs,notebook,rest,testing] + reentry scan + + - name: Setup environment + env: + AIIDA_TEST_BACKEND: ${{ matrix.backend }} + run: + .github/workflows/setup.sh + + - name: Run test suite + env: + AIIDA_TEST_BACKEND: ${{ matrix.backend }} + run: + .github/workflows/tests.sh + + verdi: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v1 + + - name: Set up Python 3.7 + uses: actions/setup-python@v1 + with: + python-version: 3.7 + + - name: Install python dependencies + run: + pip install -e . + + - name: Run verdi + run: | + verdi devel check-load-time + .github/workflows/verdi.sh diff --git a/.github/workflows/conda.sh b/.github/workflows/conda.sh new file mode 100755 index 000000000..608fe81c0 --- /dev/null +++ b/.github/workflows/conda.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +set -ev + +wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; +bash miniconda.sh -b -p $HOME/miniconda +export PATH="$HOME/miniconda/bin:$PATH" +hash -r +conda config --set always_yes yes --set changeps1 no + +# Workaround for https://github.com/conda/conda/issues/9337 +pip uninstall -y setuptools +conda install setuptools + +conda update -q conda +conda info -a +conda env create -f environment.yml -n test-environment python=$PYTHON_VERSION diff --git a/.github/workflows/setup.sh b/.github/workflows/setup.sh new file mode 100755 index 000000000..615570650 --- /dev/null +++ b/.github/workflows/setup.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +set -ev + +ssh-keygen -q -t rsa -b 4096 -m PEM -N "" -f "${HOME}/.ssh/id_rsa" +ssh-keygen -y -f "${HOME}/.ssh/id_rsa" >> "${HOME}/.ssh/authorized_keys" +ssh-keyscan -H localhost >> "${HOME}/.ssh/known_hosts" + +# The permissions on the GitHub runner are 777 which will cause SSH to refuse the keys and cause authentication to fail +chmod 755 "${HOME}" + +# Replace the placeholders in configuration files with actual values +CONFIG="${GITHUB_WORKSPACE}/.github/config" +sed -i "s|PLACEHOLDER_BACKEND|${AIIDA_TEST_BACKEND}|" "${CONFIG}/profile.yaml" +sed -i "s|PLACEHOLDER_PROFILE|test_${AIIDA_TEST_BACKEND}|" "${CONFIG}/profile.yaml" +sed -i "s|PLACEHOLDER_DATABASE_NAME|test_${AIIDA_TEST_BACKEND}|" "${CONFIG}/profile.yaml" +sed -i "s|PLACEHOLDER_REPOSITORY|/tmp/test_repository_test_${AIIDA_TEST_BACKEND}/|" "${CONFIG}/profile.yaml" +sed -i "s|PLACEHOLDER_WORK_DIR|${GITHUB_WORKSPACE}|" "${CONFIG}/localhost.yaml" +sed -i "s|PLACEHOLDER_REMOTE_ABS_PATH|${GITHUB_WORKSPACE}/.ci/doubler.sh|" "${CONFIG}/doubler.yaml" + +verdi setup --config "${CONFIG}/profile.yaml" +verdi computer setup --config "${CONFIG}/localhost.yaml" +verdi computer configure local localhost --config "${CONFIG}/localhost-config.yaml" +verdi code setup --config "${CONFIG}/doubler.yaml" + +verdi profile setdefault test_${AIIDA_TEST_BACKEND} +verdi config runner.poll.interval 0 \ No newline at end of file diff --git a/.github/workflows/tests.sh b/.github/workflows/tests.sh new file mode 100755 index 000000000..2a775f1bb --- /dev/null +++ b/.github/workflows/tests.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -ev + +# Make sure the folder containing the workchains is in the python path before the daemon is started +export PYTHONPATH="${PYTHONPATH}:${GITHUB_WORKSPACE}/.ci" + +verdi daemon start 4 +verdi -p test_${AIIDA_TEST_BACKEND} run .ci/test_daemon.py +verdi daemon stop + +verdi -p test_$AIIDA_TEST_BACKEND devel tests +pytest aiida/sphinxext/tests +pytest .ci/test_test_manager.py +pytest .ci/test_profile_manager.py +python .ci/test_plugin_testcase.py +pytest .ci/pytest +AIIDA_TEST_PROFILE=test_$AIIDA_TEST_BACKEND pytest .ci/pytest diff --git a/.github/workflows/verdi.sh b/.github/workflows/verdi.sh new file mode 100755 index 000000000..f2871bae9 --- /dev/null +++ b/.github/workflows/verdi.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +# Test the loading time of `verdi`. This is and attempt to catch changes to the imports in `aiida.cmdline` that will +# indirectly load the `aiida.orm` module which will trigger loading of the backend environment. This slows down `verdi` +# significantly, making tab-completion unusable. +VERDI=`which verdi` + +# Typically, the loading time of `verdi` should be around ~0.2 seconds. When loading the database environment this +# tends to go towards ~0.8 seconds. Since these timings are obviously machine and environment dependent, typically these +# types of tests are fragile. But with a load limit of more than twice the ideal loading time, if exceeded, should give +# a reasonably sure indication that the loading of `verdi` is unacceptably slowed down. +LOAD_LIMIT=0.4 +MAX_NUMBER_ATTEMPTS=5 + +iteration=0 + +while true; do + + iteration=$((iteration+1)) + load_time=$(/usr/bin/time -q -f "%e" $VERDI 2>&1 > /dev/null) + + if (( $(echo "$load_time < $LOAD_LIMIT" | bc -l) )); then + echo "SUCCESS: loading time $load_time at iteration $iteration below $load_limit" + break + else + echo "WARNING: loading time $load_time at iteration $iteration above $load_limit" + + if [ $iteration -eq $MAX_NUMBER_ATTEMPTS ]; then + echo "ERROR: loading time exceeded the load limit $iteration consecutive times." + echo "ERROR: please check that 'aiida.cmdline' does not import 'aiida.orm' at module level, even indirectly" + echo "ERROR: also, the database backend environment should not be loaded." + exit 2 + fi + fi + +done diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9ea9d8dff..15a5f506e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -155,7 +155,6 @@ aiida/transports/plugins/test_all_plugins.py| aiida/transports/plugins/test_local.py| aiida/transports/plugins/test_ssh.py| - .ci/test_daemon.py| .ci/workchains.py| )$ diff --git a/.pylintrc b/.pylintrc index 7a9c565f2..d6b53cd59 100644 --- a/.pylintrc +++ b/.pylintrc @@ -136,7 +136,7 @@ function-name-hint=(([a-z][a-z0-9_]{2,40})|(_[a-z0-9_]*))$ function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ # Good variable names which should always be accepted, separated by a comma -good-names=i,j,k,ex,Run,_,pk +good-names=i,j,k,ex,Run,_,pk,TemplatereplacerCalculation # Include a hint for the correct naming format with invalid-name include-naming-hint=no diff --git a/aiida/backends/djsite/manager.py b/aiida/backends/djsite/manager.py index a08022b82..cde8f4e67 100644 --- a/aiida/backends/djsite/manager.py +++ b/aiida/backends/djsite/manager.py @@ -108,7 +108,7 @@ def _migrate_database_generation(self): """ # pylint: disable=cyclic-import from aiida.manage.manager import get_manager - super(DjangoBackendManager, self)._migrate_database_generation() + super()._migrate_database_generation() backend = get_manager()._load_backend(schema_check=False) # pylint: disable=protected-access backend.execute_raw(r"""DELETE FROM django_migrations WHERE app = 'db';""") @@ -118,7 +118,7 @@ def _migrate_database_generation(self): def _migrate_database_version(self): """Migrate the database to the current schema version.""" - super(DjangoBackendManager, self)._migrate_database_version() + super()._migrate_database_version() from django.core.management import call_command # pylint: disable=no-name-in-module,import-error call_command('migrate') diff --git a/aiida/backends/sqlalchemy/manager.py b/aiida/backends/sqlalchemy/manager.py index 662a29ecf..be6d5bf1b 100644 --- a/aiida/backends/sqlalchemy/manager.py +++ b/aiida/backends/sqlalchemy/manager.py @@ -137,7 +137,7 @@ def set_schema_version_database(self, version): def _migrate_database_version(self): """Migrate the database to the current schema version.""" - super(SqlaBackendManager, self)._migrate_database_version() + super()._migrate_database_version() with self.alembic_config() as config: command.upgrade(config, 'head') diff --git a/aiida/backends/sqlalchemy/migrations/versions/041a79fc615f_dblog_cleaning.py b/aiida/backends/sqlalchemy/migrations/versions/041a79fc615f_dblog_cleaning.py index 8a4d9d7a5..b5b46f1ba 100644 --- a/aiida/backends/sqlalchemy/migrations/versions/041a79fc615f_dblog_cleaning.py +++ b/aiida/backends/sqlalchemy/migrations/versions/041a79fc615f_dblog_cleaning.py @@ -22,9 +22,9 @@ import click +from alembic import op import sqlalchemy as sa from sqlalchemy.sql import text -from alembic import op from aiida.backends.general.migrations.utils import dumps_json from aiida.manage import configuration diff --git a/aiida/backends/tests/cmdline/commands/test_database.py b/aiida/backends/tests/cmdline/commands/test_database.py index 7681d4b96..131a2b7c6 100644 --- a/aiida/backends/tests/cmdline/commands/test_database.py +++ b/aiida/backends/tests/cmdline/commands/test_database.py @@ -26,7 +26,7 @@ class TestVerdiDatabasaIntegrity(AiidaTestCase): @classmethod def setUpClass(cls, *args, **kwargs): """Create a basic valid graph that should help detect false positives.""" - super(TestVerdiDatabasaIntegrity, cls).setUpClass(*args, **kwargs) + super().setUpClass(*args, **kwargs) data_input = Data().store() data_output = Data().store() calculation = CalculationNode() diff --git a/aiida/backends/tests/orm/data/test_upf.py b/aiida/backends/tests/orm/data/test_upf.py index 94a4357c3..228f8d9b7 100644 --- a/aiida/backends/tests/orm/data/test_upf.py +++ b/aiida/backends/tests/orm/data/test_upf.py @@ -329,7 +329,7 @@ def test_upf1_to_json_carbon(self): json_string, _ = self.pseudo_carbon._prepare_json() filepath_base = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, os.pardir, 'fixtures', 'pseudos')) reference_dict = json.load(open(os.path.join(filepath_base, 'C.json'), 'r')) - pp_dict = json.loads(json_string) + pp_dict = json.loads(json_string.decode('utf-8')) # remove path information pp_dict['pseudo_potential']['header']['original_upf_file'] = '' reference_dict['pseudo_potential']['header']['original_upf_file'] = '' @@ -341,7 +341,7 @@ def test_upf2_to_json_barium(self): json_string, _ = self.pseudo_barium._prepare_json() filepath_base = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, os.pardir, 'fixtures', 'pseudos')) reference_dict = json.load(open(os.path.join(filepath_base, 'Ba.json'), 'r')) - pp_dict = json.loads(json_string) + pp_dict = json.loads(json_string.decode('utf-8')) # remove path information pp_dict['pseudo_potential']['header']['original_upf_file'] = '' reference_dict['pseudo_potential']['header']['original_upf_file'] = '' diff --git a/aiida/backends/tests/tools/importexport/migration/test_v02_to_v03.py b/aiida/backends/tests/tools/importexport/migration/test_v02_to_v03.py index 26e6f3780..5912e22e1 100644 --- a/aiida/backends/tests/tools/importexport/migration/test_v02_to_v03.py +++ b/aiida/backends/tests/tools/importexport/migration/test_v02_to_v03.py @@ -154,6 +154,7 @@ def test_compare_migration_with_aiida_made(self): 'made': [] } } # User is special, see below + add = None for entity, details in entities.items(): for node in data_v2['export_data'][entity].values(): if entity == 'Node': # Node diff --git a/aiida/orm/convert.py b/aiida/orm/convert.py index 2aa287b42..2413d082a 100644 --- a/aiida/orm/convert.py +++ b/aiida/orm/convert.py @@ -9,7 +9,6 @@ ########################################################################### # pylint: disable=cyclic-import """Module for converting backend entities into frontend, ORM, entities""" - from collections.abc import Mapping, Iterator, Sized from functools import singledispatch diff --git a/aiida/orm/implementation/django/computers.py b/aiida/orm/implementation/django/computers.py index cf52380f4..1a4bb41ac 100644 --- a/aiida/orm/implementation/django/computers.py +++ b/aiida/orm/implementation/django/computers.py @@ -43,7 +43,7 @@ def copy(self): dbomputer = models.DbComputer.objects.get(pk=self.pk) dbomputer.pk = None - newobject = self.__class__.from_dbmodel(dbomputer) + newobject = self.__class__.from_dbmodel(dbomputer) # pylint: disable=no-value-for-parameter return newobject diff --git a/aiida/orm/implementation/sqlalchemy/computers.py b/aiida/orm/implementation/sqlalchemy/computers.py index 8c33d2318..54ff93b04 100644 --- a/aiida/orm/implementation/sqlalchemy/computers.py +++ b/aiida/orm/implementation/sqlalchemy/computers.py @@ -62,7 +62,7 @@ def copy(self): make_transient(dbcomputer) session.add(dbcomputer) - newobject = self.__class__.from_dbmodel(dbcomputer) + newobject = self.__class__.from_dbmodel(dbcomputer) # pylint: disable=no-value-for-parameter return newobject diff --git a/aiida/orm/utils/links.py b/aiida/orm/utils/links.py index ae1e06f26..b8802be97 100644 --- a/aiida/orm/utils/links.py +++ b/aiida/orm/utils/links.py @@ -8,8 +8,8 @@ # For further information please visit http://www.aiida.net # ########################################################################### """Utilities for dealing with links between nodes.""" - -from collections import namedtuple, OrderedDict, Mapping +from collections import namedtuple, OrderedDict +from collections.abc import Mapping from aiida.common import exceptions from aiida.common.lang import type_check diff --git a/setup.json b/setup.json index f7c812204..639eab7b7 100644 --- a/setup.json +++ b/setup.json @@ -109,7 +109,7 @@ ], "dev_precommit": [ "astroid==2.2.5", - "pep8-naming==0.8.2", + "pep8-naming==0.4.1", "pre-commit==1.18.3", "prospector==1.1.7", "pylint==2.3.1", From fab1c56ee5b1a59fa68fc3f4b191e8daf37c9c5a Mon Sep 17 00:00:00 2001 From: Sebastiaan Huber Date: Tue, 26 Nov 2019 12:32:06 +0100 Subject: [PATCH 2/2] Remove Travis configuration and simplify Jenkins setup The majority of tests are now run through a GitHub CI workflow. This means it is not necessary to also run those on Jenkins, especially that Jenkins was originally configured to just run additional tests that were more computationally intensive. Therefore, we only keep the RPN test to run on Jenkins for now. --- .ci/Jenkinsfile | 39 +------- .ci/README | 2 - .ci/before_install.sh | 24 ----- .ci/before_script.sh | 68 ------------- .ci/coverage/.gitignore | 2 - .ci/create_coverage_info.sh | 20 ---- .ci/install_conda.sh | 19 ---- .ci/prep_ssh.sh | 16 --- .ci/setup.sh | 34 +++++++ .ci/setup_profiles.sh | 36 ------- .ci/test_rpn.sh | 4 +- .ci/test_script.sh | 78 --------------- .ci/torquessh-doubler/Dockerfile | 10 -- .ci/torquessh-doubler/README.md | 10 -- .ci/torquessh-doubler/doubler.sh | 14 --- .ci/wait-for-it.sh | 161 ------------------------------- .travis.yml | 117 ---------------------- open_source_licenses.txt | 28 ------ 18 files changed, 39 insertions(+), 643 deletions(-) delete mode 100644 .ci/README delete mode 100755 .ci/before_install.sh delete mode 100755 .ci/before_script.sh delete mode 100644 .ci/coverage/.gitignore delete mode 100755 .ci/create_coverage_info.sh delete mode 100755 .ci/install_conda.sh delete mode 100755 .ci/prep_ssh.sh create mode 100755 .ci/setup.sh delete mode 100755 .ci/setup_profiles.sh delete mode 100755 .ci/test_script.sh delete mode 100644 .ci/torquessh-doubler/Dockerfile delete mode 100644 .ci/torquessh-doubler/README.md delete mode 100755 .ci/torquessh-doubler/doubler.sh delete mode 100755 .ci/wait-for-it.sh delete mode 100644 .travis.yml diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 9c42f03b7..e1d3ee5cf 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -29,11 +29,7 @@ pipeline { } } environment { - // I define some environment variables that are used - // internally by the travis scripts that I call - TEST_TYPE="tests" - // To mock what TRAVIS WOULD USE - TRAVIS_BUILD_DIR="." + WORKSPACE_PATH="." COMPUTER_SETUP_TYPE="jenkins" // The following two variables allow to run selectively tests only for one backend RUN_ALSO_DJANGO="true" @@ -70,10 +66,6 @@ pipeline { // Debug: check that I can connect without password sh 'echo "SELECT datname FROM pg_database" | psql -h localhost -U postgres -w' - // We skip the creation of a docker image - // to ssh into, as it is done in travis. Here, in Jenkins, - // we instead ssh to localhost to investigate. - // Add the line to the .bashrc, but before it stops when non-interactive // So it can find the location of 'verdi' sh "sed -i '/interactively/iexport PATH=\${PATH}:~/.local/bin' ~/.bashrc" @@ -109,18 +101,13 @@ pipeline { // they might get at the point of writing the config.json at the // same time and one of the two would crash AIIDA_PATH="/tmp/aiida-django-folder" - // To collect coverage info for each backend in a different file - // and avoiding potential problems - COVERAGE_FILE=".coverage.django" } when { // This allows to selectively run only one backend environment name: 'RUN_ALSO_DJANGO', value: 'true' } steps { - sh '.ci/setup_profiles.sh' - sh '.ci/before_script.sh' - sh '.ci/test_script.sh' + sh '.ci/setup.sh' sh '.ci/test_rpn.sh' } } @@ -128,38 +115,18 @@ pipeline { environment { AIIDA_TEST_BACKEND="sqlalchemy" AIIDA_PATH="/tmp/aiida-sqla-folder" - COVERAGE_FILE=".coverage.sqlalchemy" } when { // This allows to selectively run only one backend environment name: 'RUN_ALSO_SQLALCHEMY', value: 'true' } steps { - sh '.ci/setup_profiles.sh' - sh '.ci/before_script.sh' - sh '.ci/test_script.sh' + sh '.ci/setup.sh' sh '.ci/test_rpn.sh' } } } } - stage('Final') { - steps { - // create the final coverage info, summed over the various backends - sh '.ci/create_coverage_info.sh' - } - // post { - // always { - // // note: junit does not like the XML output, it says - // // 'None of the test reports contained any result' - // // (maybe because it's coverage and not testing?) - // // For now I'm not doing it as it's ~3 MB every time - // // NOTE MOREOVER that one should run 'zip -r html.zip html' in the - // // coverage folder, first - // archiveArtifacts artifacts: '.ci/coverage/html.zip', fingerprint: true - // } - // } - } } post { always { diff --git a/.ci/README b/.ci/README deleted file mode 100644 index 9ddb974a2..000000000 --- a/.ci/README +++ /dev/null @@ -1,2 +0,0 @@ -Files for continuous integration tests, -both on Travis and on Jenkins diff --git a/.ci/before_install.sh b/.ci/before_install.sh deleted file mode 100755 index 674985135..000000000 --- a/.ci/before_install.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash - -# Be verbose, and stop with error as soon there's one -set -ev - -if [[ "$TEST_TYPE" == "tests" ]] -then - THEKEY=`ssh-keygen -y -f "${HOME}/.ssh/id_rsa"` - echo 'AUTHORIZED_KEY='"$THEKEY" > "${TRAVIS_BUILD_DIR}/torquessh.env" - docker build -t torquessh "${TRAVIS_BUILD_DIR}/.ci/torquessh-doubler" - # Run it in the background, mapping port 22 of the container - # to port 10022 outside, and passing the environment variable - docker run -d --privileged -p=10022:22 --name="torquesshmachine" --env-file "${TRAVIS_BUILD_DIR}/torquessh.env" torquessh - # Docker ps to see what is going on - echo "Running docker ps to see if the 'torquessh' docker image is up..." - docker ps - # Wait for SSH to be up - "${TRAVIS_BUILD_DIR}"/.ci/wait-for-it.sh localhost:10022 -t 0 - - # I will add the key to the known_hosts later, to give the time to ssh - # to be really up - see the before_script script - #ssh-keyscan -p 10022 localhost >> ${HOME}/.ssh/known_hosts - -fi diff --git a/.ci/before_script.sh b/.ci/before_script.sh deleted file mode 100755 index 0b5fc2934..000000000 --- a/.ci/before_script.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash - -# Be verbose, and stop with error as soon there's one -set -ev - -# The following is needed on jenkins, for some reason -# bashrc is not reloaded automatically -if [ -e ~/.bashrc ] ; then source ~/.bashrc ; fi - -if [[ "$TEST_TYPE" == "tests" ]] -then - # Add the .ci and the polish folder to the python path such that defined workchains can be found by the daemon - export PYTHONPATH="${PYTHONPATH}:${TRAVIS_BUILD_DIR}/.ci" - export PYTHONPATH="${PYTHONPATH}:${TRAVIS_BUILD_DIR}/.ci/polish" - - # Start the daemon for the correct profile and add four additional workers to prevent deadlock with integration tests - verdi -p $AIIDA_TEST_BACKEND daemon start - verdi -p $AIIDA_TEST_BACKEND daemon incr 4 - - if [[ "$COMPUTER_SETUP_TYPE" != "jenkins" ]] - then - # Setup the torquessh computer - verdi -p $AIIDA_TEST_BACKEND computer setup --non-interactive --label=torquessh --hostname=localhost --transport=ssh --scheduler=torque --mpiprocs-per-machine=1 --prepend-text="" --append-text="" - - # Configure the torquessh computer - verdi -p $AIIDA_TEST_BACKEND computer configure ssh torquessh --non-interactive --safe-interval=1 --username=app --port=10022 --key-filename=~/.ssh/id_rsa --timeout=60 --compress --gss-host=localhost --load-system-host-keys --key-policy=RejectPolicy - - # Configure the 'doubler' code inside torquessh - verdi -p $AIIDA_TEST_BACKEND code setup -n -L doubler \ - -D "simple script that doubles a number and sleeps for a given number of seconds" \ - --on-computer -P templatereplacer -Y torquessh \ - --remote-abs-path=/usr/local/bin/d\"o\'ub\ ler.sh - - # Make sure that the torquessh (localhost:10022) key is hashed - # in the known_hosts file - echo "'ssh-keyscan -p 10022 -t rsa localhost' output:" - ssh-keyscan -p 10022 -t rsa localhost > /tmp/localhost10022key.txt - cat /tmp/localhost10022key.txt - - # Patch for OpenSSH 6, that does not write the port number in the - # known_hosts file. OpenSSH 7 would work, instead - if grep -e '^localhost' /tmp/localhost10022key.txt > /dev/null 2>&1 ; then cat /tmp/localhost10022key.txt | sed 's/^localhost/[localhost]:10022/' >> ${HOME}/.ssh/known_hosts ; else cat /tmp/localhost10022key.txt >> ${HOME}/.ssh/known_hosts; fi - - echo "Content of the known_hosts file:" - cat ${HOME}/.ssh/known_hosts - else - # Computer configuration on Jenkins - - # Setup the torquessh computer - this one is custom, using direct scheduler - verdi -p $AIIDA_TEST_BACKEND computer setup --non-interactive --label=torquessh --hostname=localhost --transport=ssh --scheduler=direct --mpiprocs-per-machine=1 --prepend-text="" --append-text="" - - # Configure the torquessh computer - this one is custom, using port 22 - verdi -p $AIIDA_TEST_BACKEND computer configure ssh torquessh --non-interactive --safe-interval=1 --username=jenkins --port=22 --key-filename=~/.ssh/id_rsa --timeout=60 --compress --gss-host=localhost --load-system-host-keys --key-policy=RejectPolicy - - # Configure the 'doubler' code inside torquessh - verdi -p $AIIDA_TEST_BACKEND code setup -n -L doubler \ - -D "simple script that doubles a number and sleeps for a given number of seconds" \ - --on-computer -P templatereplacer -Y torquessh \ - --remote-abs-path=/usr/local/bin/d\"o\'ub\ ler.sh - - # Configure the 'add' code inside torquessh, which is only required for the integrations test on Jenkins - verdi -p $AIIDA_TEST_BACKEND code setup -n -L add \ - -D "simple script that adds two numbers" --on-computer -P arithmetic.add \ - -Y torquessh --remote-abs-path=/usr/local/bin/add.sh - - ## The key of localhost should be already set in the Jenkinsfile - fi -fi diff --git a/.ci/coverage/.gitignore b/.ci/coverage/.gitignore deleted file mode 100644 index fb0b0fc00..000000000 --- a/.ci/coverage/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# created by coverage -html/ diff --git a/.ci/create_coverage_info.sh b/.ci/create_coverage_info.sh deleted file mode 100755 index e65b0b4ca..000000000 --- a/.ci/create_coverage_info.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -set -e - -# Needed on Jenkins -if [ -e ~/.bashrc ] ; then source ~/.bashrc ; fi - -# Collect all coverage files of different backends, see -# http://coverage.readthedocs.io/en/latest/cmd.html#combining-data-files -coverage combine - -# Create XML file -# coverage xml -o coverage.xml - -# Create HTML file(s) -# location set in the .coveragerc config file -# coverage html - -# Create text report -coverage report diff --git a/.ci/install_conda.sh b/.ci/install_conda.sh deleted file mode 100755 index 66aaf3126..000000000 --- a/.ci/install_conda.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# See https://conda.io/docs/user-guide/tasks/use-conda-with-travis-ci.html#the-travis-yml-file -if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then - wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh; -else - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; -fi -bash miniconda.sh -b -p $HOME/miniconda -export PATH="$HOME/miniconda/bin:$PATH" -hash -r -conda config --set always_yes yes --set changeps1 no - -# workaround for https://github.com/conda/conda/issues/9337 -pip uninstall -y setuptools -conda install setuptools - -conda update -q conda -# Useful for debugging any issues with conda -conda info -a diff --git a/.ci/prep_ssh.sh b/.ci/prep_ssh.sh deleted file mode 100755 index ae27c92d9..000000000 --- a/.ci/prep_ssh.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# Be verbose, and stop with error as soon there's one -set -ev - -# This is needed for the SSH tests (being able to ssh to localhost) -# And will also be used for the docker test. - -# Make sure we don't overwrite an existing SSH key (seems to be the case for private Travis instances) -[ -f "${HOME}/.ssh/id_rsa" ] || ssh-keygen -q -t rsa -N "" -f "${HOME}/.ssh/id_rsa" - -# Extract the public key directly from the private key without requiring .pub file (again: private Travis instances) -ssh-keygen -y -f "${HOME}/.ssh/id_rsa" >> "${HOME}/.ssh/authorized_keys" - -# Register the current hosts fingerprint -ssh-keyscan -H localhost >> "${HOME}/.ssh/known_hosts" diff --git a/.ci/setup.sh b/.ci/setup.sh new file mode 100755 index 000000000..d29f1e8f1 --- /dev/null +++ b/.ci/setup.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -ev + +# The following is needed on jenkins, for some reason bashrc is not reloaded automatically +if [ -e ~/.bashrc ] ; then source ~/.bashrc ; fi + +# Add the .ci and the polish folder to the python path such that defined workchains can be found by the daemon +export PYTHONPATH="${PYTHONPATH}:${WORKSPACE_PATH}/.ci" +export PYTHONPATH="${PYTHONPATH}:${WORKSPACE_PATH}/.ci/polish" + +PSQL_COMMAND="CREATE DATABASE $AIIDA_TEST_BACKEND ENCODING \"UTF8\" LC_COLLATE=\"en_US.UTF-8\" LC_CTYPE=\"en_US.UTF-8\" TEMPLATE=template0;" +psql -h localhost -c "${PSQL_COMMAND}" -U postgres -w + +verdi setup --profile $AIIDA_TEST_BACKEND \ + --email="aiida@localhost" --first-name=AiiDA --last-name=test --institution="AiiDA Team" \ + --db-engine 'postgresql_psycopg2' --db-backend=$AIIDA_TEST_BACKEND --db-host="localhost" --db-port=5432 \ + --db-name="$AIIDA_TEST_BACKEND" --db-username=postgres --db-password='' \ + --repository="/tmp/repository_${AIIDA_TEST_BACKEND}/" --non-interactive + +verdi profile setdefault $AIIDA_TEST_BACKEND +verdi config runner.poll.interval 0 + +# Start the daemon for the correct profile and add four additional workers to prevent deadlock with integration tests +verdi -p $AIIDA_TEST_BACKEND daemon start +verdi -p $AIIDA_TEST_BACKEND daemon incr 4 + +verdi -p $AIIDA_TEST_BACKEND computer setup --non-interactive --label=localhost --hostname=localhost --transport=local \ + --scheduler=direct --mpiprocs-per-machine=1 --prepend-text="" --append-text="" +verdi -p $AIIDA_TEST_BACKEND computer configure local localhost --non-interactive --safe-interval=0 + +# Configure the 'add' code inside localhost +verdi -p $AIIDA_TEST_BACKEND code setup -n -L add \ + -D "simple script that adds two numbers" --on-computer -P arithmetic.add \ + -Y localhost --remote-abs-path=/usr/local/bin/add.sh diff --git a/.ci/setup_profiles.sh b/.ci/setup_profiles.sh deleted file mode 100755 index 28526f825..000000000 --- a/.ci/setup_profiles.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -set -ev - -# Needed on Jenkins -if [ -e ~/.bashrc ] ; then source ~/.bashrc ; fi - -if [[ "$TEST_TYPE" == "tests" || "$TEST_TYPE" == "docs" ]] -then - # Create the main database - PSQL_COMMAND="CREATE DATABASE $AIIDA_TEST_BACKEND ENCODING \"UTF8\" LC_COLLATE=\"en_US.UTF-8\" LC_CTYPE=\"en_US.UTF-8\" TEMPLATE=template0;" - psql -h localhost -c "${PSQL_COMMAND}" -U postgres -w - - # Create the test database - PSQL_COMMAND="CREATE DATABASE test_$AIIDA_TEST_BACKEND ENCODING \"UTF8\" LC_COLLATE=\"en_US.UTF-8\" LC_CTYPE=\"en_US.UTF-8\" TEMPLATE=template0;" - psql -h localhost -c "${PSQL_COMMAND}" -U postgres -w - - # Setup the main profile - verdi setup --profile $AIIDA_TEST_BACKEND \ - --email="aiida@localhost" --first-name=AiiDA --last-name=test --institution="AiiDA Team" \ - --db-engine 'postgresql_psycopg2' --db-backend=$AIIDA_TEST_BACKEND --db-host="localhost" --db-port=5432 \ - --db-name="$AIIDA_TEST_BACKEND" --db-username=postgres --db-password='' \ - --repository="/tmp/repository_${AIIDA_TEST_BACKEND}/" --non-interactive - - # Setup the test profile - verdi setup --profile test_$AIIDA_TEST_BACKEND \ - --email="aiida@localhost" --first-name=AiiDA --last-name=test --institution="AiiDA Team" \ - --db-engine 'postgresql_psycopg2' --db-backend=$AIIDA_TEST_BACKEND --db-host="localhost" --db-port=5432 \ - --db-name="test_$AIIDA_TEST_BACKEND" --db-username=postgres --db-password='' \ - --repository="/tmp/test_repository_test_${AIIDA_TEST_BACKEND}/" --non-interactive - - # Set the main profile as the default - verdi profile setdefault $AIIDA_TEST_BACKEND - - # Set the polling interval to 0 otherwise the tests take too long - verdi config runner.poll.interval 0 -fi diff --git a/.ci/test_rpn.sh b/.ci/test_rpn.sh index 92d36b51c..6f96ac4ed 100755 --- a/.ci/test_rpn.sh +++ b/.ci/test_rpn.sh @@ -6,13 +6,13 @@ set -ev declare -a EXPRESSIONS=("1 -2 -1 4 -5 -5 * * * * +" "2 1 3 3 -1 + ^ ^ +" "3 -5 -1 -4 + * ^" "2 4 2 -4 * * +" "3 1 1 5 ^ ^ ^" "3 1 3 4 -4 2 * + + ^ ^") NUMBER_WORKCHAINS=5 TIMEOUT=600 -CODE='add@torquessh' +CODE='add!' # Note the exclamation point is necessary to force the value to be interpreted as LABEL type identifier # Needed on Jenkins if [ -e ~/.bashrc ] ; then source ~/.bashrc ; fi # Define the absolute path to the RPN cli script -DATA_DIR="${TRAVIS_BUILD_DIR}/.ci" +DATA_DIR="${WORKSPACE_PATH}/.ci" CLI_SCRIPT="${DATA_DIR}/polish/cli.py" # Export the polish module to the python path so generated workchains can be imported diff --git a/.ci/test_script.sh b/.ci/test_script.sh deleted file mode 100755 index aea2f5fde..000000000 --- a/.ci/test_script.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -# Be verbose, and stop with error as soon there's one -set -ev - -# Needed on Jenkins -if [ -e ~/.bashrc ] ; then source ~/.bashrc ; fi - -CI_DIR="${TRAVIS_BUILD_DIR}/.ci" - -case "$TEST_TYPE" in - docs) - # Compile the docs (HTML format); - # -C change to 'docs' directory before doing anything - # -n to warn about all missing references - # -W to convert warnings in errors - SPHINXOPTS="-nW" make -C docs - ;; - tests) - VERDI=`which verdi` - - # The `check-load-time` command will check for indicators that typically slow down the `verdi` command - coverage run -a $VERDI devel check-load-time - - # Test the loading time of `verdi` to ensure the database environment is not loaded - set +e - "${CI_DIR}/test_verdi_load_time.sh" - - # If the load time test failed, only exit if we are on travis - if [ $? -gt 0 ] && [ -n "${TRAVIS}" ]; then - exit 2 - fi - set -e - - # Add the .ci folder to the python path so workchains within it can be found by the daemon - export PYTHONPATH="${PYTHONPATH}:${CI_DIR}" - - # Clean up coverage file (there shouldn't be any, but just in case) - coverage erase - - # Run preliminary tests - coverage run -a "${CI_DIR}/test_test_manager.py" - coverage run -a "${CI_DIR}/test_profile_manager.py" - coverage run -a "${CI_DIR}/test_plugin_testcase.py" - # append to coverage file, do not create final report - pytest --cov=aiida --cov-append --cov-report= "${CI_DIR}/pytest" - # rerun tests with existing profile - TEST_AIIDA_PROFILE=test_${AIIDA_TEST_BACKEND} pytest --cov=aiida --cov-append --cov-report= "${CI_DIR}/pytest" - - # Run verdi devel tests - coverage run -a $VERDI -p test_${AIIDA_TEST_BACKEND} devel tests -v - - # Run the daemon tests using docker - # Note: This is not a typo, the profile is called ${AIIDA_TEST_BACKEND} - - # In case of error, I do some debugging, but I make sure I anyway exit with an exit error - coverage run -a $VERDI -p ${AIIDA_TEST_BACKEND} run "${CI_DIR}/test_daemon.py" || ( if which docker > /dev/null ; then docker ps -a ; docker exec torquesshmachine cat /var/log/syslog ; fi ; exit 1 ) - - # Run the sphinxext tests, append to coverage file, do not create final report - coverage run --append -m pytest aiida/sphinxext/tests - - # Now, we run all the tests and we manually create the final report - # Note that this is only the partial coverage for this backend - coverage report - ;; - pre-commit) - pre-commit run --all-files || ( git status --short ; git diff ; exit 1 ) - ;; - conda) - # Note: Not added to install in order not to slow down other tests - source ${CI_DIR}/install_conda.sh - - # Replace dep1 dep2 ... with your dependencies - conda env create -f environment.yml -n test-environment python=$TRAVIS_PYTHON_VERSION - source activate test-environment - verdi --help - ;; -esac diff --git a/.ci/torquessh-doubler/Dockerfile b/.ci/torquessh-doubler/Dockerfile deleted file mode 100644 index f7977fd92..000000000 --- a/.ci/torquessh-doubler/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM aiidateam/torquessh_base:1.0 -MAINTAINER AiiDA Team - -# Use baseimage-docker's init system. -CMD ["/sbin/my_init"] - -COPY doubler.sh /usr/local/bin/ - -# Use messed-up filename to test quoting robustness -RUN mv /usr/local/bin/doubler.sh /usr/local/bin/d\"o\'ub\ ler.sh diff --git a/.ci/torquessh-doubler/README.md b/.ci/torquessh-doubler/README.md deleted file mode 100644 index 257482de8..000000000 --- a/.ci/torquessh-doubler/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Content - -This folder contains an example of an extension of the torquessh-base image, -where we add a very basic script to double a number. -This is meant to be a very lightweight 'code' to test daemon functionality. - -# Notes - -Inside the docker image, we use a filename including single quotes, double -quotes and spaces in order to test the robustness of AiiDA's escaping routines. diff --git a/.ci/torquessh-doubler/doubler.sh b/.ci/torquessh-doubler/doubler.sh deleted file mode 100755 index a109fa6d2..000000000 --- a/.ci/torquessh-doubler/doubler.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -# Read cmdline parameter and sleep that number of seconds (or zero if not present) -# Then read integer from file 'value_to_double.txt`, multiply by two and echo that value - -set -e - -if [ "$1" != "" ] -then - sleep $1 -fi - -INPUTVALUE=`cat value_to_double.txt` -echo $(( $INPUTVALUE * 2 )) -echo $(( $INPUTVALUE * 3 )) > 'triple_value.tmp' diff --git a/.ci/wait-for-it.sh b/.ci/wait-for-it.sh deleted file mode 100755 index 904d06875..000000000 --- a/.ci/wait-for-it.sh +++ /dev/null @@ -1,161 +0,0 @@ -#!/usr/bin/env bash -# Use this script to test if a given TCP host/port are available - -cmdname=$(basename $0) - -echoerr() { if [[ $QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } - -usage() -{ - cat << USAGE >&2 -Usage: - $cmdname host:port [-s] [-t timeout] [-- command args] - -h HOST | --host=HOST "Host or IP under test" - -p PORT | --port=PORT "TCP port under test" - "Alternatively, you specify the host and port as host:port" - -s | --strict "Only execute subcommand if the test succeeds" - -q | --quiet "Don't output any status messages" - -t TIMEOUT | --timeout=TIMEOUT - "Timeout in seconds, zero for no timeout" - -- COMMAND ARGS "Execute command with args after the test finishes" -USAGE - exit 1 -} - -wait_for() -{ - if [[ $TIMEOUT -gt 0 ]]; then - echoerr "$cmdname: waiting $TIMEOUT seconds for $HOST:$PORT" - else - echoerr "$cmdname: waiting for $HOST:$PORT without a timeout" - fi - start_ts=$(date +%s) - while : - do - (echo > /dev/tcp/$HOST/$PORT) >/dev/null 2>&1 - result=$? - if [[ $result -eq 0 ]]; then - end_ts=$(date +%s) - echoerr "$cmdname: $HOST:$PORT is available after $((end_ts - start_ts)) seconds" - break - fi - sleep 1 - done - return $result -} - -wait_for_wrapper() -{ - # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 - if [[ $QUIET -eq 1 ]]; then - timeout $TIMEOUT $0 --quiet --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & - else - timeout $TIMEOUT $0 --child --host=$HOST --port=$PORT --timeout=$TIMEOUT & - fi - PID=$! - trap "kill -INT -$PID" INT - wait $PID - RESULT=$? - if [[ $RESULT -ne 0 ]]; then - echoerr "$cmdname: timeout occurred after waiting $TIMEOUT seconds for $HOST:$PORT" - fi - return $RESULT -} - -# process arguments -while [[ $# -gt 0 ]] -do - case "$1" in - *:* ) - hostport=(${1//:/ }) - HOST=${hostport[0]} - PORT=${hostport[1]} - shift 1 - ;; - --child) - CHILD=1 - shift 1 - ;; - -q | --quiet) - QUIET=1 - shift 1 - ;; - -s | --strict) - STRICT=1 - shift 1 - ;; - -h) - HOST="$2" - if [[ $HOST == "" ]]; then break; fi - shift 2 - ;; - --host=*) - HOST="${1#*=}" - shift 1 - ;; - -p) - PORT="$2" - if [[ $PORT == "" ]]; then break; fi - shift 2 - ;; - --port=*) - PORT="${1#*=}" - shift 1 - ;; - -t) - TIMEOUT="$2" - if [[ $TIMEOUT == "" ]]; then break; fi - shift 2 - ;; - --timeout=*) - TIMEOUT="${1#*=}" - shift 1 - ;; - --) - shift - CLI="$@" - break - ;; - --help) - usage - ;; - *) - echoerr "Unknown argument: $1" - usage - ;; - esac -done - -if [[ "$HOST" == "" || "$PORT" == "" ]]; then - echoerr "Error: you need to provide a host and port to test." - usage -fi - -TIMEOUT=${TIMEOUT:-15} -STRICT=${STRICT:-0} -CHILD=${CHILD:-0} -QUIET=${QUIET:-0} - -if [[ $CHILD -gt 0 ]]; then - wait_for - RESULT=$? - exit $RESULT -else - if [[ $TIMEOUT -gt 0 ]]; then - wait_for_wrapper - RESULT=$? - else - wait_for - RESULT=$? - fi -fi - -if [[ $CLI != "" ]]; then - if [[ $RESULT -ne 0 && $STRICT -eq 1 ]]; then - echoerr "$cmdname: strict mode, refusing to execute subprocess" - exit $RESULT - fi - exec $CLI -else - exit $RESULT -fi diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 3cb175d09..000000000 --- a/.travis.yml +++ /dev/null @@ -1,117 +0,0 @@ -dist: trusty -sudo: required - -language: python - -python: - - "3.6" - -cache: pip - -services: - - rabbitmq - - postgresql - - docker - -addons: - # make sure the path in .ci/test_script.sh matches the version requested here - postgresql: "9.6" - - apt: - packages: - - postgresql-server-dev-9.6 - - texlive-base - - texlive-generic-recommended - - texlive-fonts-recommended - - texlive-latex-base - - texlive-latex-recommended - - texlive-latex-extra - - dvipng - - dvidvi - - graphviz - -before_install: - # We need to replace `TRAVIS_HOME` with `HOME` because the former won't be set when SSH'ing to localhost on the - # the Travis machine, causing certain scripts sourced in the `.bashrc` to fail - - sed -i 's/TRAVIS_HOME/HOME/g' /home/travis/.travis/job_stages - # This is needed for the SSH tests (being able to ssh to localhost) - # And will also be used for the docker test - - ssh-keygen -t rsa -N "" -f "${HOME}/.ssh/id_rsa" - - cp "${HOME}/.ssh/id_rsa.pub" "${HOME}/.ssh/authorized_keys" - - ssh-keyscan -H localhost >> "${HOME}/.ssh/known_hosts" - - # Needed to have 'locate' work properly - - sudo updatedb - - .ci/prep_ssh.sh - - # Build the docker image if needed - - .ci/before_install.sh - -install: - - pip install --upgrade pip wheel setuptools coveralls - - if [ "$TEST_TYPE" == "docs" ]; then pip install . && pip install -r docs/requirements_for_rtd.txt; else pip install .[all]; fi - -env: - ## Build matrix to test both backends, and the docs - ## I still let it create the test backend for django - ## also when building the docs - ## because otherwise the code would complain. Also, I need latex. - - TEST_TYPE="pre-commit" - - AIIDA_TEST_BACKEND=django TEST_TYPE="docs" - - AIIDA_TEST_BACKEND=django TEST_TYPE="tests" - - AIIDA_TEST_BACKEND=sqlalchemy TEST_TYPE="tests" - - TEST_TYPE="conda" - -before_script: - - .ci/setup_profiles.sh - - .ci/before_script.sh - -script: .ci/test_script.sh - - -after_success: - # upload coverage report to coveralls.io - - if [ "$TEST_TYPE" == "tests" ]; then coveralls; fi - -git: - depth: 3 - -jobs: - include: - - stage: deploy - if: "tag =~ ^v[0-9]+\\.[0-9]+\\.[0-9]+(a|b|rc)?[0-9]*$" - services: ignore - addons: skip - python: 3.6 - before_install: skip - install: skip - before_script: skip - script: skip - env: ignore - before_deploy: - - echo "We'll deploy $TRAVIS_TAG" - after_deploy: - - echo "Deployed $TRAVIS_TAG" - - echo "We'll hook up some things in the future" - deploy: &pypi - provider: pypi - skip_existing: true - username: aiida-bot - password: - secure: NhV11e1kVJaumNMb7YGENngZwa6qJjzoGWzmzqdU5BwFgpXaxbaFAk2fn+Cckrkz4MmNJaW1HaKEAhn07WsbrkjOXK7QVNK48/eagiquKbCQbyNZNjzF+C24EYQHI93y40JQcl2JaCahSqKXbYQCpLlX0Rbtob4psQH29uHRcbq4lm5t3Jmb8WckEzcDLMZX3+uPFwDJxMbbsDW+pONGF1z2/B0j+7m4beTgCepuvIEaXS97rTQj2egKYEJV+V3DbH2o2nr0+3z4lzH2FAdoAnZMlFwHfWoY3WIuYcHcwXDWyLGWQKvncaoh1sLU7gfjjup3dZ4Iq74Zp43x2eXUhbmZBp2cPN3CZpN9baE4NE6MNKeQ/erkg31qdq50OG/rjGgPKyfg5ShO3Kt1CyVbSJX5dqPxtDQblYE8TAGhHfO2O8M1kLhyWvrV5TaQuVB/IAsTkRC+t1NtdWTxaU6wdMz36O5so89oKbFljjk9744m4Ei8DCgc98WH6b96qn0VifjxGuBs6o1aqRoa8O7e29a0TwDVAwxkczgjxbXkP6okRY5IAxsr5Rsbm8urXEo874uQA8n1gGyN6+YKSbjvPbHD9RIRl9IddC6y2wKTv/1JBifNjHcLhadl7RVRPSU7vFSs2cknN+5Gkw3FgZ/jA8SoMxMLFsvHcFxchrurAkE= - on: - repo: aiidateam/aiida-core - all_branches: true - tags: true - - stage: transifex_docs - if: "tag =~ ^v[0-9]+\\.[0-9]+\\.[0-9]+(a|b|rc)?[0-9]*$" - services: ignore - addons: skip - python: 3.6 - before_install: skip - install: skip - before_script: skip - script: skip - env: ignore - after_success: - - .ci/transifex_upload.sh diff --git a/open_source_licenses.txt b/open_source_licenses.txt index 9b4f56f49..40fc90a67 100644 --- a/open_source_licenses.txt +++ b/open_source_licenses.txt @@ -12,9 +12,6 @@ Python: Fastep: * utils/fastentrypoints.py -wait-for-it: - * .ci/wait-for-it.sh - The respective copyright notices are reproduced below. -------------------------------------------------------------------------- @@ -361,28 +358,3 @@ fastentrypoints.py Licence: LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------------------- - -wait-for-it License (https://github.com/vishnubob/wait-for-it/blob/master/LICENSE): - -The MIT License (MIT) -Copyright (c) 2016 Giles Hall - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE.