Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

increase minimal python version to 3.6 #553

Merged
merged 13 commits into from Jan 29, 2019
Merged
8 changes: 4 additions & 4 deletions .ci/appveyor.yml
Expand Up @@ -3,16 +3,16 @@ environment:
# Pre-installed Python versions, which Appveyor may upgrade to
# a later point release.
# See: http://www.appveyor.com/docs/installed-software#python
- PYTHON_VERSION: "3.5" # currently 3.5.1
- PYTHON_VERSION: "3.6" # currently 3.5.1
pmli marked this conversation as resolved.
Show resolved Hide resolved
PYTHON_ARCH: "32"
CONDA: "C:\\Miniconda35"
CONDA: "C:\\Miniconda36"
MARKER: "not grid"
QT_API: "pyqt5"
ALLOW_FAIL: true

- PYTHON_VERSION: "3.5" # currently 3.5.1
- PYTHON_VERSION: "3.6" # currently 3.5.1
pmli marked this conversation as resolved.
Show resolved Hide resolved
PYTHON_ARCH: "32"
CONDA: "C:\\Miniconda35"
CONDA: "C:\\Miniconda36"
MARKER: "grid"
QT_API: "pyqt5"
ALLOW_FAIL: true
Expand Down
4 changes: 2 additions & 2 deletions .ci/travis/deploy.bash
Expand Up @@ -19,7 +19,7 @@ rm -rf ~/.ssh
set -x
mkdir -p ${BUILDER_WHEELHOUSE}
git clone git@github.com:pymor/wheels.pymor.org ${REPODIR}
for py in 3.5 3.6 3.7 ; do
for py in 3.6 3.7 ; do
BUILDER_IMAGE=pymor/wheelbuilder:py${py}
git clean -xdf
docker pull ${BUILDER_IMAGE} 1> /dev/null
Expand All @@ -33,7 +33,7 @@ for os in debian_stable debian_testing centos_7 ; do
docker build --build-arg tag=${os} ${BUILDER_WHEELHOUSE}
done

for py in 3.5 3.6 3.7 ; do
for py in 3.6 3.7 ; do
${REPODIR}/add_wheels.py ${TRAVIS_BRANCH} ${BUILDER_WHEELHOUSE}/pymor*manylinux*.whl
done

Expand Down
2 changes: 1 addition & 1 deletion .ci/travis/run_travis_builders.py
Expand Up @@ -39,7 +39,7 @@ def _run_config(tm, clone_dir, commit):
return True, ''


docker_tags = env.list('PYMOR_DOCKER_TAG', default=['3.5', '3.6', '3.7-rc'])
docker_tags = env.list('PYMOR_DOCKER_TAG', default=['3.6', '3.7'])
pytest_marker = env.list('PYMOR_PYTEST_MARKER', default=["None", 'PIP_ONLY', 'MPI'])
commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode().strip()
variations = list(product(docker_tags, pytest_marker))
Expand Down
12 changes: 0 additions & 12 deletions .ci/travis/travis.yml
Expand Up @@ -50,12 +50,6 @@ jobs:
include:
- stage: test_top
env: PYMOR_PYTEST_MARKER="NUMPY" DOCKER_TAG="3.6" PYMOR_TEST_HALF="TOP"
- stage: test_top
env: PYMOR_PYTEST_MARKER="None" DOCKER_TAG="3.5" PYMOR_TEST_HALF="TOP"
- stage: test_top
env: PYMOR_PYTEST_MARKER="PIP_ONLY" DOCKER_TAG="3.5" PYMOR_TEST_HALF="TOP"
- stage: test_top
env: PYMOR_PYTEST_MARKER="MPI" DOCKER_TAG="3.5" PYMOR_TEST_HALF="TOP"
- stage: test_top
env: PYMOR_PYTEST_MARKER="None" DOCKER_TAG="3.6" PYMOR_TEST_HALF="TOP"
- stage: test_top
Expand All @@ -71,12 +65,6 @@ jobs:

- stage: test_bottom
env: PYMOR_PYTEST_MARKER="NUMPY" DOCKER_TAG="3.6" PYMOR_TEST_HALF="BOTTOM"
- stage: test_bottom
env: PYMOR_PYTEST_MARKER="None" DOCKER_TAG="3.5" PYMOR_TEST_HALF="BOTTOM"
- stage: test_bottom
env: PYMOR_PYTEST_MARKER="PIP_ONLY" DOCKER_TAG="3.5" PYMOR_TEST_HALF="BOTTOM"
- stage: test_bottom
env: PYMOR_PYTEST_MARKER="MPI" DOCKER_TAG="3.5" PYMOR_TEST_HALF="BOTTOM"
- stage: test_bottom
env: PYMOR_PYTEST_MARKER="None" DOCKER_TAG="3.6" PYMOR_TEST_HALF="BOTTOM"
- stage: test_bottom
Expand Down
2 changes: 1 addition & 1 deletion .ci/travis/yml_template.py
Expand Up @@ -84,7 +84,7 @@
import sys
from itertools import product
tpl = jinja2.Template(tpl)
pythons = ['3.5', '3.6', '3.7']
pythons = ['3.6', '3.7']
marker = [None, "PIP_ONLY", "MPI"]
with open(os.path.join(os.path.dirname(__file__), 'travis.yml'), 'wt') as yml:
matrix = list(product(pythons, marker))
Expand Down
3 changes: 1 addition & 2 deletions dependencies.py
Expand Up @@ -36,11 +36,10 @@ def _pymess(rev, major, minor, marker=True):
'pytest-cov': 'pytest_cov',
'pytest-flakes': 'pytest_flakes',
'pytest-pep8': 'pytest_pep8',
_pymess('1.0.0', 3, 5, False): 'pymess',
_pymess('1.0.0', 3, 6, False): 'pymess',
_pymess('1.0.0', 3, 7, False): 'pymess',
'pyopengl': 'OpenGL'}
optional_requirements_file_only = [_pymess('1.0.0', 3, 5),_pymess('1.0.0', 3, 6),_pymess('1.0.0', 3, 7),
optional_requirements_file_only = [_pymess('1.0.0', 3, 6),_pymess('1.0.0', 3, 7),
'slycot>=0.3.3', 'mpi4py']


Expand Down
1 change: 0 additions & 1 deletion requirements-optional.txt
Expand Up @@ -3,7 +3,6 @@
PyQt5
docker
envparse
https://pymor.github.io/wheels/pymess-1.0.0-cp35-cp35m-manylinux1_x86_64.whl ; python_version == "3.5" and "linux" in sys_platform
https://pymor.github.io/wheels/pymess-1.0.0-cp36-cp36m-manylinux1_x86_64.whl ; python_version == "3.6" and "linux" in sys_platform
https://pymor.github.io/wheels/pymess-1.0.0-cp37-cp37m-manylinux1_x86_64.whl ; python_version == "3.7" and "linux" in sys_platform
ipyparallel
Expand Down
1 change: 0 additions & 1 deletion setup.py
Expand Up @@ -166,7 +166,6 @@ def setup_package():
extras_require = dependencies.extras(),
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Intended Audience :: Science/Research',
Expand Down
10 changes: 5 additions & 5 deletions src/pymor-demo
Expand Up @@ -15,7 +15,7 @@ import functools

if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Launcher script for all available pyMOR demos.',
epilog='Example: {} burgers 1'.format(sys.argv[0]))
epilog=f'Example: {sys.argv[0]} burgers 1')

def _run(module):
# only need to remove the modname from args, rest is automatic
Expand All @@ -31,14 +31,14 @@ if __name__ == '__main__':
shorts.append(short)

def usage():
msg = '''Usage:
{} DEMO_NAME | -h [DEMO_OPTIONS]
msg = f'''Usage:
{sys.argv[0]} DEMO_NAME | -h [DEMO_OPTIONS]

Arguments:
-h this message
DEMO_NAME select one from these: {}
DEMO_NAME select one from these: {",".join(shorts)}
DEMO_OPTIONS any arguments for the demo, including -h for detailed help
'''.format(sys.argv[0], ','.join(shorts))
'''
print(msg)
sys.exit(0)

Expand Down
2 changes: 1 addition & 1 deletion src/pymor/__init__.py
Expand Up @@ -56,7 +56,7 @@ def quit_event_loop():
if not mpi.finished:
mpi.quit()
else:
print('Rank {}: MPI parallel run detected. Launching event loop ...'.format(mpi.rank))
print(f'Rank {mpi.rank}: MPI parallel run detected. Launching event loop ...')
mpi.event_loop()
import sys
sys.exit(0)
29 changes: 14 additions & 15 deletions src/pymor/algorithms/adaptivegreedy.py
Expand Up @@ -111,7 +111,7 @@ def estimate(mus):
if pool is None or pool is dummy_pool:
pool = dummy_pool
else:
logger.info('Using pool of {} workers for parallel greedy search'.format(len(pool)))
logger.info(f'Using pool of {len(pool)} workers for parallel greedy search')

with RemoteObjectManager() as rom:
# Push everything we need during the greedy search to the workers.
Expand All @@ -131,8 +131,7 @@ def estimate(mus):
validation_set = parameter_space.sample_randomly(validation_mus)
if visualize and sample_set.dim not in (2, 3):
raise NotImplementedError
logger.info('Training set size: {}. Validation set size: {}'
.format(len(sample_set.vertex_mus), len(validation_set)))
logger.info(f'Training set size: {len(sample_set.vertex_mus)}. Validation set size: {len(validation_set)}')

extensions = 0
max_errs = []
Expand All @@ -154,14 +153,14 @@ def estimate(mus):
errors = estimate(sample_set.vertex_mus)
max_err_ind = np.argmax(errors)
max_err, max_err_mu = errors[max_err_ind], sample_set.vertex_mus[max_err_ind]
logger.info('Maximum error after {} extensions: {} (mu = {})'.format(extensions, max_err, max_err_mu))
logger.info(f'Maximum error after {extensions} extensions: {max_err} (mu = {max_err_mu})')

# estimate on validation set
val_errors = estimate(validation_set)
max_val_err_ind = np.argmax(val_errors)
max_val_err, max_val_err_mu = val_errors[max_val_err_ind], validation_set[max_val_err_ind]
logger.info('Maximum validation error: {}'.format(max_val_err))
logger.info('Validation error to training error ratio: {:.3e}'.format(max_val_err / max_err))
logger.info(f'Maximum validation error: {max_val_err}')
logger.info(f'Validation error to training error ratio: {max_val_err/max_err:.3e}')

if max_val_err >= max_err * rho: # overfitting?

Expand All @@ -180,7 +179,7 @@ def estimate(mus):
# select elements
sorted_indicators_inds = np.argsort(indicators)[::-1]
refinement_elements = sorted_indicators_inds[:max(int(len(sorted_indicators_inds) * theta), 1)]
logger.info('Refining {} elements: {}'.format(len(refinement_elements), refinement_elements))
logger.info(f'Refining {len(refinement_elements)} elements: {refinement_elements}')

# visualization
if visualize:
Expand Down Expand Up @@ -223,9 +222,9 @@ def estimate(mus):
if validation_mus <= 0:
validation_set = sample_set.center_mus + parameter_space.sample_randomly(-validation_mus)

logger.info('New training set size: {}. New validation set size: {}'
.format(len(sample_set.vertex_mus), len(validation_set)))
logger.info('Number of refinements: {}'.format(sample_set.refinement_count))
logger.info(f'New training set size: {len(sample_set.vertex_mus)}. '
f'New validation set size: {len(validation_set)}')
logger.info(f'Number of refinements: {sample_set.refinement_count}')
logger.info('')
else:
break # no overfitting, leave the refinement loop
Expand All @@ -239,11 +238,11 @@ def estimate(mus):

# break if traget error reached
if target_error is not None and max_err <= target_error:
logger.info('Reached maximal error on snapshots of {} <= {}'.format(max_err, target_error))
logger.info(f'Reached maximal error on snapshots of {max_err} <= {target_error}')
break

# basis extension
with logger.block('Computing solution snapshot for mu = {} ...'.format(max_err_mu)):
with logger.block(f'Computing solution snapshot for mu = {max_err_mu} ...'):
U = d.solve(max_err_mu)
with logger.block('Extending basis with solution snapshot ...'):
try:
Expand All @@ -257,13 +256,13 @@ def estimate(mus):

# break if prescribed basis size reached
if max_extensions is not None and extensions >= max_extensions:
logger.info('Maximum number of {} extensions reached.'.format(max_extensions))
logger.info(f'Maximum number of {max_extensions} extensions reached.')
with logger.block('Reducing once more ...'):
rd = reductor.reduce()
break

tictoc = time.time() - tic
logger.info('Greedy search took {} seconds'.format(tictoc))
logger.info(f'Greedy search took {tictoc} seconds')
return {'rd': rd,
'max_errs': max_errs, 'max_err_mus': max_err_mus, 'extensions': extensions,
'max_val_errs': max_val_errs, 'max_val_err_mus': max_val_err_mus,
Expand Down Expand Up @@ -470,7 +469,7 @@ def _add_vertex(self, v):
self._vertex_to_id_map[v] = v_id
return v_id

class Element(object):
class Element:
__slots__ = ['level', 'center', 'vertex_ids', 'children', 'creation_time']

def __init__(self, level, center, sample_set):
Expand Down
42 changes: 19 additions & 23 deletions src/pymor/algorithms/ei.py
Expand Up @@ -102,12 +102,12 @@ def ei_greedy(U, error_norm=None, atol=None, rtol=None, max_interpolation_dofs=N
while True:
if max_interpolation_dofs is not None and len(interpolation_dofs) >= max_interpolation_dofs:
logger.info('Maximum number of interpolation DOFs reached. Stopping extension loop.')
logger.info('Final maximum interpolation error with {} interpolation DOFs: {}'.format(
len(interpolation_dofs), max_err))
logger.info(f'Final maximum interpolation error with'
f'{len(interpolation_dofs)} interpolation DOFs: {max_err}')
break

logger.info('Maximum interpolation error with {} interpolation DOFs: {}'
.format(len(interpolation_dofs), max_err))
logger.info(f'Maximum interpolation error with '
f'{len(interpolation_dofs)} interpolation DOFs: {max_err}')

if atol is not None and max_err <= atol:
logger.info('Absolute error tolerance reached! Stopping extension loop.')
Expand All @@ -121,12 +121,11 @@ def ei_greedy(U, error_norm=None, atol=None, rtol=None, max_interpolation_dofs=N
new_vec = U[max_err_ind].copy()
new_dof = new_vec.amax()[0][0]
if new_dof in interpolation_dofs:
logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))
logger.info(f'DOF {new_dof} selected twice for interplation! Stopping extension loop.')
break
new_dof_value = new_vec.dofs([new_dof])[0, 0]
if new_dof_value == 0.:
logger.info('DOF {} selected for interpolation has zero maximum error! Stopping extension loop.'
.format(new_dof))
logger.info(f'DOF {new_dof} selected for interpolation has zero maximum error! Stopping extension loop.')
break
new_vec *= 1 / new_dof_value
interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
Expand All @@ -146,8 +145,7 @@ def ei_greedy(U, error_norm=None, atol=None, rtol=None, max_interpolation_dofs=N
triangularity_errs.append(np.max(triangularity_errors[:d, :d]))

if len(triangularity_errs) > 0:
logger.info('Interpolation matrix is not lower triangular with maximum error of {}'
.format(triangularity_errs[-1]))
logger.info(f'Interpolation matrix is not lower triangular with maximum error of {triangularity_errs[-1]}')

data = {'errors': max_errs, 'triangularity_errors': triangularity_errs}

Expand Down Expand Up @@ -202,7 +200,7 @@ def deim(U, modes=None, atol=None, rtol=None, product=None, pod_options={}):
interpolation_matrix = np.zeros((0, 0))

for i in range(len(collateral_basis)):
logger.info('Choosing interpolation point for basis vector {}.'.format(i))
logger.info(f'Choosing interpolation point for basis vector {i}.')

if len(interpolation_dofs) > 0:
coefficients = np.linalg.solve(interpolation_matrix,
Expand All @@ -217,7 +215,7 @@ def deim(U, modes=None, atol=None, rtol=None, product=None, pod_options={}):
new_dof = ERR.amax()[0][0]

if new_dof in interpolation_dofs:
logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))
logger.info(f'DOF {new_dof} selected twice for interplation! Stopping extension loop.')
break

interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
Expand Down Expand Up @@ -299,7 +297,7 @@ def interpolate_operators(d, operator_names, parameter_sample, error_norm=None,
operators = [d.operators[operator_name] for operator_name in operator_names]
with logger.block('Computing operator evaluations on solution snapshots ...'):
if pool:
logger.info('Using pool of {} workers for parallel evaluation'.format(len(pool)))
logger.info(f'Using pool of {len(pool)} workers for parallel evaluation')
evaluations = rom.manage(pool.push(d.solution_space.empty()))
pool.map(_interpolate_operators_build_evaluations, parameter_sample,
d=d, operators=operators, evaluations=evaluations)
Expand Down Expand Up @@ -333,7 +331,7 @@ def interpolate_operators(d, operator_names, parameter_sample, error_norm=None,
for name, operator in zip(operator_names, operators)}
operators_dict = d.operators.copy()
operators_dict.update(ei_operators)
ei_d = d.with_(operators=operators_dict, name='{}_ei'.format(d.name))
ei_d = d.with_(operators=operators_dict, name=f'{d.name}_ei')

data.update({'dofs': dofs, 'basis': basis})
return ei_d, data
Expand All @@ -351,7 +349,7 @@ def _parallel_ei_greedy(U, pool, error_norm=None, atol=None, rtol=None, max_inte

logger = getLogger('pymor.algorithms.ei.ei_greedy')
logger.info('Generating Interpolation Data ...')
logger.info('Using pool of {} workers for parallel greedy search'.format(len(pool)))
logger.info(f'Using pool of {len(pool)} workers for parallel greedy search')

interpolation_dofs = np.zeros((0,), dtype=np.int32)
collateral_basis = pool.apply_only(_parallel_ei_greedy_get_empty, 0, U=U)
Expand All @@ -369,12 +367,11 @@ def _parallel_ei_greedy(U, pool, error_norm=None, atol=None, rtol=None, max_inte

if max_interpolation_dofs is not None and len(interpolation_dofs) >= max_interpolation_dofs:
logger.info('Maximum number of interpolation DOFs reached. Stopping extension loop.')
logger.info('Final maximum interpolation error with {} interpolation DOFs: {}'
.format(len(interpolation_dofs), max_err))
logger.info(f'Final maximum interpolation error with '
f'{len(interpolation_dofs)} interpolation DOFs: {max_err}')
break

logger.info('Maximum interpolation error with {} interpolation DOFs: {}'
.format(len(interpolation_dofs), max_err))
logger.info(f'Maximum interpolation error with {len(interpolation_dofs)} interpolation DOFs: {max_err}')

if atol is not None and max_err <= atol:
logger.info('Absolute error tolerance reached! Stopping extension loop.')
Expand All @@ -388,12 +385,12 @@ def _parallel_ei_greedy(U, pool, error_norm=None, atol=None, rtol=None, max_inte
new_vec = pool.apply_only(_parallel_ei_greedy_get_vector, max_err_ind, data=distributed_data)
new_dof = new_vec.amax()[0][0]
if new_dof in interpolation_dofs:
logger.info('DOF {} selected twice for interpolation! Stopping extension loop.'.format(new_dof))
logger.info(f'DOF {new_dof} selected twice for interpolation! Stopping extension loop.')
break
new_dof_value = new_vec.dofs([new_dof])[0, 0]
if new_dof_value == 0.:
logger.info('DOF {} selected for interpolation has zero maximum error! Stopping extension loop.'
.format(new_dof))
logger.info(f'DOF {new_dof} selected for interpolation has zero maximum error! '
f'Stopping extension loop.')
break
new_vec *= 1 / new_dof_value
interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
Expand All @@ -410,8 +407,7 @@ def _parallel_ei_greedy(U, pool, error_norm=None, atol=None, rtol=None, max_inte
triangularity_errs.append(np.max(triangularity_errors[:d, :d]))

if len(triangularity_errs) > 0:
logger.info('Interpolation matrix is not lower triangular with maximum error of {}'
.format(triangularity_errs[-1]))
logger.info(f'Interpolation matrix is not lower triangular with maximum error of {triangularity_errs[-1]}')
logger.info('')

data = {'errors': max_errs, 'triangularity_errors': triangularity_errs}
Expand Down