From 1030f7e109166f2d8859cc4c3fdda71bda23eea6 Mon Sep 17 00:00:00 2001 From: Robert Speck Date: Tue, 10 Jan 2023 14:03:16 +0100 Subject: [PATCH 01/23] better testing of (outdated) standard_integrators --- .../acoustic_helpers/standard_integrators.py | 11 ++--------- pySDC/projects/FastWaveSlowWave/plot_dispersion.py | 13 ++++++++----- .../FastWaveSlowWave/runmultiscale_acoustic.py | 4 ++++ pySDC/tests/test_projects/test_fwsw/test_fwsw.py | 3 +++ 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/pySDC/implementations/problem_classes/acoustic_helpers/standard_integrators.py b/pySDC/implementations/problem_classes/acoustic_helpers/standard_integrators.py index 84685c7568..dc54a4330b 100644 --- a/pySDC/implementations/problem_classes/acoustic_helpers/standard_integrators.py +++ b/pySDC/implementations/problem_classes/acoustic_helpers/standard_integrators.py @@ -15,17 +15,10 @@ def __init__(self, M_fast, M_slow, order): assert np.shape(M_slow)[0] == np.shape(M_slow)[1], "A_slow must be square" assert np.shape(M_fast)[0] == np.shape(M_slow)[0], "A_fast and A_slow must be of the same size" - assert order in [1, 2, 3, 4, 5], "Order must be between 1 and 5" + assert order in [1, 2, 3, 4, 5], "Order must be between 2 and 5" self.order = order - if self.order == 1: - self.A = np.array([[0, 0], [0, 1]]) - self.A_hat = np.array([[0, 0], [1, 0]]) - self.b = np.array([0, 1]) - self.b_hat = np.array([1, 0]) - self.nstages = 2 - - elif self.order == 2: + if self.order == 2: self.A = np.array([[0, 0], [0, 0.5]]) self.A_hat = np.array([[0, 0], [0.5, 0]]) self.b = np.array([0, 1]) diff --git a/pySDC/projects/FastWaveSlowWave/plot_dispersion.py b/pySDC/projects/FastWaveSlowWave/plot_dispersion.py index 1d154b1c29..da594a7a75 100644 --- a/pySDC/projects/FastWaveSlowWave/plot_dispersion.py +++ b/pySDC/projects/FastWaveSlowWave/plot_dispersion.py @@ -34,7 +34,14 @@ def findomega(stab_fh): return sol -def compute_and_plot_dispersion(): +def compute_and_plot_dispersion(Nsamples=15, K=3): + """ + Function to compute and plot the dispersion relation + + Args: + Nsamples: number of samples for testing + K: number of iterations as well as order + """ problem_params = dict() # SET VALUE FOR lambda_slow AND VALUES FOR lambda_fast ### problem_params['lambda_s'] = np.array([0.0]) @@ -61,9 +68,6 @@ def compute_and_plot_dispersion(): description['level_params'] = level_params # pass level parameters description['step_params'] = dict() # pass step parameters - # SET NUMBER OF ITERATIONS ### - K = 3 - # ORDER OF DIRK/IMEX IS EQUAL TO NUMBER OF ITERATIONS AND THUS ORDER OF SDC ### dirk_order = K @@ -83,7 +87,6 @@ def compute_and_plot_dispersion(): nnodes = L.sweep.coll.num_nodes dt = L.params.dt - Nsamples = 15 k_vec = np.linspace(0, np.pi, Nsamples + 1, endpoint=False) k_vec = k_vec[1:] phase = np.zeros((3, Nsamples)) diff --git a/pySDC/projects/FastWaveSlowWave/runmultiscale_acoustic.py b/pySDC/projects/FastWaveSlowWave/runmultiscale_acoustic.py index d44c897a1e..eb6bae2e2f 100644 --- a/pySDC/projects/FastWaveSlowWave/runmultiscale_acoustic.py +++ b/pySDC/projects/FastWaveSlowWave/runmultiscale_acoustic.py @@ -126,6 +126,10 @@ def compute_and_plot_solutions(): x_0 = 0.75 # x_1 = 0.25 + assert np.isclose(np.linalg.norm(uend[1, :], np.inf), 8.489e-01, 1E-03) + assert np.isclose(np.linalg.norm(pnew_dirk, np.inf), 1.003e+00, 1E-03) + assert np.isclose(np.linalg.norm(pnew_imex, np.inf), 2.762e+21, 1E-03) + print('Maximum pressure in SDC: %5.3e' % np.linalg.norm(uend[1, :], np.inf)) print('Maximum pressure in DIRK: %5.3e' % np.linalg.norm(pnew_dirk, np.inf)) print('Maximum pressure in RK-IMEX: %5.3e' % np.linalg.norm(pnew_imex, np.inf)) diff --git a/pySDC/tests/test_projects/test_fwsw/test_fwsw.py b/pySDC/tests/test_projects/test_fwsw/test_fwsw.py index 27d9a69045..166c0c334b 100644 --- a/pySDC/tests/test_projects/test_fwsw/test_fwsw.py +++ b/pySDC/tests/test_projects/test_fwsw/test_fwsw.py @@ -52,3 +52,6 @@ def test_dispersion(): compute_and_plot_dispersion() assert os.path.isfile('data/phase-K3-M3.png'), 'ERROR: phase plot has not been created' assert os.path.isfile('data/ampfactor-K3-M3.png'), 'ERROR: phase plot has not been created' + + compute_and_plot_dispersion(Nsamples=3, K=4) + compute_and_plot_dispersion(Nsamples=3, K=5) From 1f6470cb2f8af620cfeb6cc9688bc4766af7eb75 Mon Sep 17 00:00:00 2001 From: Robert Speck Date: Tue, 10 Jan 2023 14:10:23 +0100 Subject: [PATCH 02/23] Better tests of asympconf_Linf --- pySDC/projects/AsympConv/PFASST_conv_Linf.py | 12 +++++++----- .../test_projects/test_asympconv/test_PFASST.py | 12 ++++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/pySDC/projects/AsympConv/PFASST_conv_Linf.py b/pySDC/projects/AsympConv/PFASST_conv_Linf.py index 183a452a1e..7a51923cf0 100644 --- a/pySDC/projects/AsympConv/PFASST_conv_Linf.py +++ b/pySDC/projects/AsympConv/PFASST_conv_Linf.py @@ -30,12 +30,13 @@ def main(): plot_results() -def run_diffusion(QI): +def run_diffusion(QI, max_proc_exp=13): """ A simple test program to test PFASST convergence for the heat equation with random initial data Args: QI: preconditioner + max_proc_exp: max number of processors will be 2^max_proc_exp """ # initialize level parameters @@ -53,7 +54,7 @@ def run_diffusion(QI): # initialize problem parameters problem_params = dict() problem_params['nu'] = 0.1 # diffusion coefficient - problem_params['freq'] = -1 # frequency for the test value + problem_params['freq'] = 2 # frequency for the test value problem_params['nvars'] = [127, 63] # number of degrees of freedom for each level problem_params['bc'] = 'dirichlet-zero' # boundary conditions @@ -93,7 +94,7 @@ def run_diffusion(QI): writer.writerow(('num_proc', 'niter')) file.close() - for i in range(0, 13): + for i in range(0, max_proc_exp): num_proc = 2**i level_params['dt'] = (Tend - t0) / num_proc @@ -131,12 +132,13 @@ def run_diffusion(QI): assert os.path.isfile(fname), 'ERROR: pickle did not create file' -def run_advection(QI): +def run_advection(QI, max_proc_exp=7): """ A simple test program to test PFASST convergence for the periodic advection equation Args: QI: preconditioner + max_proc_exp: max number of processors will be 2^max_proc_exp """ # initialize level parameters @@ -196,7 +198,7 @@ def run_advection(QI): writer.writerow(('num_proc', 'niter')) file.close() - for i in range(0, 7): + for i in range(0, max_proc_exp): num_proc = 2**i level_params['dt'] = (Tend - t0) / num_proc diff --git a/pySDC/tests/test_projects/test_asympconv/test_PFASST.py b/pySDC/tests/test_projects/test_asympconv/test_PFASST.py index 57f1af0546..a5a8838899 100644 --- a/pySDC/tests/test_projects/test_asympconv/test_PFASST.py +++ b/pySDC/tests/test_projects/test_asympconv/test_PFASST.py @@ -7,6 +7,18 @@ def test_main(): main() +@pytest.mark.base +def test_Linf(): + from pySDC.projects.AsympConv.PFASST_conv_Linf import run_advection, run_diffusion + + QI = 'LU' + run_diffusion(QI=QI, max_proc_exp=4) + run_advection(QI=QI, max_proc_exp=4) + + QI = 'LU2' + run_diffusion(QI=QI, max_proc_exp=4) + run_advection(QI=QI, max_proc_exp=4) + @pytest.mark.base def test_plot_results(): From 798c925b97c91f2cd2d2a9332f9ab569c368ee4c Mon Sep 17 00:00:00 2001 From: Robert Speck Date: Tue, 10 Jan 2023 14:28:23 +0100 Subject: [PATCH 03/23] black... --- pySDC/projects/FastWaveSlowWave/runmultiscale_acoustic.py | 6 +++--- pySDC/tests/test_projects/test_asympconv/test_PFASST.py | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pySDC/projects/FastWaveSlowWave/runmultiscale_acoustic.py b/pySDC/projects/FastWaveSlowWave/runmultiscale_acoustic.py index eb6bae2e2f..6b10cacdac 100644 --- a/pySDC/projects/FastWaveSlowWave/runmultiscale_acoustic.py +++ b/pySDC/projects/FastWaveSlowWave/runmultiscale_acoustic.py @@ -126,9 +126,9 @@ def compute_and_plot_solutions(): x_0 = 0.75 # x_1 = 0.25 - assert np.isclose(np.linalg.norm(uend[1, :], np.inf), 8.489e-01, 1E-03) - assert np.isclose(np.linalg.norm(pnew_dirk, np.inf), 1.003e+00, 1E-03) - assert np.isclose(np.linalg.norm(pnew_imex, np.inf), 2.762e+21, 1E-03) + assert np.isclose(np.linalg.norm(uend[1, :], np.inf), 8.489e-01, 1e-03) + assert np.isclose(np.linalg.norm(pnew_dirk, np.inf), 1.003e00, 1e-03) + assert np.isclose(np.linalg.norm(pnew_imex, np.inf), 2.762e21, 1e-03) print('Maximum pressure in SDC: %5.3e' % np.linalg.norm(uend[1, :], np.inf)) print('Maximum pressure in DIRK: %5.3e' % np.linalg.norm(pnew_dirk, np.inf)) diff --git a/pySDC/tests/test_projects/test_asympconv/test_PFASST.py b/pySDC/tests/test_projects/test_asympconv/test_PFASST.py index a5a8838899..86ec9a346c 100644 --- a/pySDC/tests/test_projects/test_asympconv/test_PFASST.py +++ b/pySDC/tests/test_projects/test_asympconv/test_PFASST.py @@ -7,6 +7,7 @@ def test_main(): main() + @pytest.mark.base def test_Linf(): from pySDC.projects.AsympConv.PFASST_conv_Linf import run_advection, run_diffusion From 4ab008c20830c5f583fbac9d95dd8c416655a0a9 Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Wed, 11 Jan 2023 11:46:57 +0100 Subject: [PATCH 04/23] Ran spell checker on tutorials --- pySDC/tutorial/step_1/B_spatial_accuracy_check.py | 2 +- pySDC/tutorial/step_1/README.rst | 2 +- pySDC/tutorial/step_2/README.rst | 2 +- pySDC/tutorial/step_3/A_getting_statistics.py | 2 +- pySDC/tutorial/step_3/B_adding_statistics.py | 2 +- pySDC/tutorial/step_3/C_study_collocations.py | 2 +- pySDC/tutorial/step_4/B_multilevel_hierarchy.py | 2 +- pySDC/tutorial/step_4/C_SDC_vs_MLSDC.py | 2 +- pySDC/tutorial/step_5/A_multistep_multilevel_hierarchy.py | 2 +- pySDC/tutorial/step_5/B_my_first_PFASST_run.py | 4 ++-- pySDC/tutorial/step_5/C_advection_and_PFASST.py | 2 +- pySDC/tutorial/step_6/A_run_non_MPI_controller.py | 2 +- pySDC/tutorial/step_7/C_pySDC_with_PETSc.py | 2 +- pySDC/tutorial/step_8/B_multistep_SDC.py | 2 +- pySDC/tutorial/step_8/C_iteration_estimator.py | 4 ++-- 15 files changed, 17 insertions(+), 17 deletions(-) diff --git a/pySDC/tutorial/step_1/B_spatial_accuracy_check.py b/pySDC/tutorial/step_1/B_spatial_accuracy_check.py index 1d5c713a54..1b9d57cc7b 100644 --- a/pySDC/tutorial/step_1/B_spatial_accuracy_check.py +++ b/pySDC/tutorial/step_1/B_spatial_accuracy_check.py @@ -150,7 +150,7 @@ def plot_accuracy(results): # create new figure plt.figure() - # take x-axis limits from nvars_list + some spacning left and right + # take x-axis limits from nvars_list + some spacing left and right plt.xlim([min(nvars_list) / 2, max(nvars_list) * 2]) plt.xlabel('nvars') plt.ylabel('abs. error') diff --git a/pySDC/tutorial/step_1/README.rst b/pySDC/tutorial/step_1/README.rst index 21d789f0d1..87adaa5ab2 100644 --- a/pySDC/tutorial/step_1/README.rst +++ b/pySDC/tutorial/step_1/README.rst @@ -91,6 +91,6 @@ Important things to note: order in time with a 2nd order stencil in space. - Orders of convergence are not as stable as for the space-only test. One of the problems of this example is that we are actually trying to - compute 0 very, very thorougly... + compute 0 very, very thoroughly... .. include:: doc_step_1_D.rst diff --git a/pySDC/tutorial/step_2/README.rst b/pySDC/tutorial/step_2/README.rst index 5b767844c5..66658f03a4 100644 --- a/pySDC/tutorial/step_2/README.rst +++ b/pySDC/tutorial/step_2/README.rst @@ -86,6 +86,6 @@ Important things to note: iteration counts for now. We will deal with these later. - This example is the prototype for a user to work with pySDC. Most of the logic and most of the data structures are hidden, but all - relevant parameters are accessable using the ``description``. + relevant parameters are accessible using the ``description``. .. include:: doc_step_2_C.rst diff --git a/pySDC/tutorial/step_3/A_getting_statistics.py b/pySDC/tutorial/step_3/A_getting_statistics.py index 5c3bd25e87..deffb269b1 100644 --- a/pySDC/tutorial/step_3/A_getting_statistics.py +++ b/pySDC/tutorial/step_3/A_getting_statistics.py @@ -21,7 +21,7 @@ def main(): f.write(out + '\n') print(out) - # filter statistics by first time intervall and type (residual) + # filter statistics by first time interval and type (residual) residuals = get_sorted(stats, time=0.1, type='residual_post_iteration', sortby='iter') for item in residuals: diff --git a/pySDC/tutorial/step_3/B_adding_statistics.py b/pySDC/tutorial/step_3/B_adding_statistics.py index e9df27dddb..1240ec2bee 100644 --- a/pySDC/tutorial/step_3/B_adding_statistics.py +++ b/pySDC/tutorial/step_3/B_adding_statistics.py @@ -11,7 +11,7 @@ def main(): """ - A simple tets program to retrieve user-defined statistics from a run + A simple test program to retrieve user-defined statistics from a run """ Path("data").mkdir(parents=True, exist_ok=True) diff --git a/pySDC/tutorial/step_3/C_study_collocations.py b/pySDC/tutorial/step_3/C_study_collocations.py index 184706c107..18a28ce152 100644 --- a/pySDC/tutorial/step_3/C_study_collocations.py +++ b/pySDC/tutorial/step_3/C_study_collocations.py @@ -10,7 +10,7 @@ def main(): """ - A simple test program to show th eenergy deviation for different quadrature nodes + A simple test program to show the energy deviation for different quadrature nodes """ stats_dict = run_simulation() diff --git a/pySDC/tutorial/step_4/B_multilevel_hierarchy.py b/pySDC/tutorial/step_4/B_multilevel_hierarchy.py index 77ccf3158d..6c35e33242 100644 --- a/pySDC/tutorial/step_4/B_multilevel_hierarchy.py +++ b/pySDC/tutorial/step_4/B_multilevel_hierarchy.py @@ -47,7 +47,7 @@ def main(): description['level_params'] = level_params # pass level parameters description['step_params'] = step_params # pass step parameters description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class - description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer + description['space_transfer_params'] = space_transfer_params # pass parameters for spatial transfer # now the description contains more or less everything we need to create a step with multiple levels S = step(description=description) diff --git a/pySDC/tutorial/step_4/C_SDC_vs_MLSDC.py b/pySDC/tutorial/step_4/C_SDC_vs_MLSDC.py index 881b47dfe3..cd7c45c146 100644 --- a/pySDC/tutorial/step_4/C_SDC_vs_MLSDC.py +++ b/pySDC/tutorial/step_4/C_SDC_vs_MLSDC.py @@ -73,7 +73,7 @@ def main(): description_mlsdc['level_params'] = level_params # pass level parameters description_mlsdc['step_params'] = step_params # pass step parameters description_mlsdc['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class - description_mlsdc['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer + description_mlsdc['space_transfer_params'] = space_transfer_params # pass parameters for spatial transfer # instantiate the controller (no controller parameters used here) controller_sdc = controller_nonMPI(num_procs=1, controller_params=controller_params, description=description_sdc) diff --git a/pySDC/tutorial/step_5/A_multistep_multilevel_hierarchy.py b/pySDC/tutorial/step_5/A_multistep_multilevel_hierarchy.py index 8de1819a43..0c51932aed 100644 --- a/pySDC/tutorial/step_5/A_multistep_multilevel_hierarchy.py +++ b/pySDC/tutorial/step_5/A_multistep_multilevel_hierarchy.py @@ -47,7 +47,7 @@ def main(): description['level_params'] = level_params # pass level parameters description['step_params'] = step_params # pass step parameters description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class - description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer + description['space_transfer_params'] = space_transfer_params # pass parameters for spatial transfer # instantiate controller controller = controller_nonMPI(num_procs=10, controller_params={}, description=description) diff --git a/pySDC/tutorial/step_5/B_my_first_PFASST_run.py b/pySDC/tutorial/step_5/B_my_first_PFASST_run.py index 72f4cda566..05559a205e 100644 --- a/pySDC/tutorial/step_5/B_my_first_PFASST_run.py +++ b/pySDC/tutorial/step_5/B_my_first_PFASST_run.py @@ -55,7 +55,7 @@ def main(): description['level_params'] = level_params # pass level parameters description['step_params'] = step_params # pass step parameters description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class - description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer + description['space_transfer_params'] = space_transfer_params # pass parameters for spatial transfer # set time parameters t0 = 0.0 @@ -119,7 +119,7 @@ def main(): assert err < 1.3505e-04, "ERROR: error is too high, got %s" % err assert np.ptp(niters) <= 1, "ERROR: range of number of iterations is too high, got %s" % np.ptp(niters) - assert np.mean(niters) <= 5.0, "ERROR: mean number of iteratiobs is too high, got %s" % np.mean(niters) + assert np.mean(niters) <= 5.0, "ERROR: mean number of iterations is too high, got %s" % np.mean(niters) f.close() diff --git a/pySDC/tutorial/step_5/C_advection_and_PFASST.py b/pySDC/tutorial/step_5/C_advection_and_PFASST.py index 3ccde084ff..251368992d 100644 --- a/pySDC/tutorial/step_5/C_advection_and_PFASST.py +++ b/pySDC/tutorial/step_5/C_advection_and_PFASST.py @@ -56,7 +56,7 @@ def main(): description['level_params'] = level_params # pass level parameters description['step_params'] = step_params # pass step parameters description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class - description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer + description['space_transfer_params'] = space_transfer_params # pass parameters for spatial transfer # set time parameters t0 = 0.0 diff --git a/pySDC/tutorial/step_6/A_run_non_MPI_controller.py b/pySDC/tutorial/step_6/A_run_non_MPI_controller.py index 58bb7cd10e..bc6202029b 100644 --- a/pySDC/tutorial/step_6/A_run_non_MPI_controller.py +++ b/pySDC/tutorial/step_6/A_run_non_MPI_controller.py @@ -122,7 +122,7 @@ def set_parameters_ml(): description['level_params'] = level_params # pass level parameters description['step_params'] = step_params # pass step parameters description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class - description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer + description['space_transfer_params'] = space_transfer_params # pass parameters for spatial transfer # set time parameters t0 = 0.0 diff --git a/pySDC/tutorial/step_7/C_pySDC_with_PETSc.py b/pySDC/tutorial/step_7/C_pySDC_with_PETSc.py index 2cf58e2468..686ebf5ec8 100644 --- a/pySDC/tutorial/step_7/C_pySDC_with_PETSc.py +++ b/pySDC/tutorial/step_7/C_pySDC_with_PETSc.py @@ -85,7 +85,7 @@ def main(): description['level_params'] = level_params # pass level parameters description['step_params'] = step_params # pass step parameters description['space_transfer_class'] = mesh_to_mesh_petsc_dmda # pass spatial transfer class - description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer + description['space_transfer_params'] = space_transfer_params # pass parameters for spatial transfer # set time parameters t0 = 0.0 diff --git a/pySDC/tutorial/step_8/B_multistep_SDC.py b/pySDC/tutorial/step_8/B_multistep_SDC.py index df9a00338a..b0e34e213a 100644 --- a/pySDC/tutorial/step_8/B_multistep_SDC.py +++ b/pySDC/tutorial/step_8/B_multistep_SDC.py @@ -53,7 +53,7 @@ def main(): description['level_params'] = level_params # pass level parameters description['step_params'] = step_params # pass step parameters description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class - description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer + description['space_transfer_params'] = space_transfer_params # pass parameters for spatial transfer # set up parameters for PFASST run problem_params['nvars'] = [63, 31] diff --git a/pySDC/tutorial/step_8/C_iteration_estimator.py b/pySDC/tutorial/step_8/C_iteration_estimator.py index 16cca59e83..cfacc0eab1 100644 --- a/pySDC/tutorial/step_8/C_iteration_estimator.py +++ b/pySDC/tutorial/step_8/C_iteration_estimator.py @@ -75,7 +75,7 @@ def setup_diffusion(dt=None, ndim=None, ml=False): description['convergence_controllers'] = convergence_controllers if ml: description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class - description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer + description['space_transfer_params'] = space_transfer_params # pass parameters for spatial transfer return description, controller_params @@ -141,7 +141,7 @@ def setup_advection(dt=None, ndim=None, ml=False): description['convergence_controllers'] = convergence_controllers if ml: description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class - description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer + description['space_transfer_params'] = space_transfer_params # pass parameters for spatial transfer return description, controller_params From ee6bbde1391bb802642fc4ab1f46c550ddf82628 Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Wed, 11 Jan 2023 12:02:12 +0100 Subject: [PATCH 05/23] Ran spellchecker on all core modules --- pySDC/core/BaseTransfer.py | 2 +- pySDC/core/Collocation.py | 2 +- pySDC/core/Controller.py | 4 ++-- pySDC/core/Hooks.py | 2 +- pySDC/core/Lagrange.py | 2 +- pySDC/core/Step.py | 2 +- pySDC/core/Sweeper.py | 2 +- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pySDC/core/BaseTransfer.py b/pySDC/core/BaseTransfer.py index f3cc0c3f0c..54d140c0db 100644 --- a/pySDC/core/BaseTransfer.py +++ b/pySDC/core/BaseTransfer.py @@ -83,7 +83,7 @@ def restrict(self): """ Space-time restriction routine - The routine applies the spatial restriction operator to teh fine values on the fine nodes, then reevaluates f + The routine applies the spatial restriction operator to the fine values on the fine nodes, then reevaluates f on the coarse level. This is used for the first part of the FAS correction tau via integration. The second part is the integral over the fine values, restricted to the coarse level. Finally, possible tau corrections on the fine level are restricted as well. diff --git a/pySDC/core/Collocation.py b/pySDC/core/Collocation.py index d7a7383fee..3a796c9558 100644 --- a/pySDC/core/Collocation.py +++ b/pySDC/core/Collocation.py @@ -90,7 +90,7 @@ def __init__(self, num_nodes=None, tleft=0, tright=1, node_type='LEGENDRE', quad self.node_type = node_type self.quad_type = quad_type - # Instanciate attributes + # Instantiate attributes self.nodeGenerator = NodesGenerator(self.node_type, self.quad_type) if self.node_type == 'EQUID': self.order = num_nodes diff --git a/pySDC/core/Controller.py b/pySDC/core/Controller.py index 469297e126..c5257d2316 100644 --- a/pySDC/core/Controller.py +++ b/pySDC/core/Controller.py @@ -40,7 +40,7 @@ def __init__(self, controller_params, description): controller_params (dict): parameter set for the controller and the steps """ - # check if we have a hook on this list. if not, use default class. + # check if we have a hook on this list. If not, use default class. controller_params['hook_class'] = controller_params.get('hook_class', hookclass.hooks) self.__hooks = controller_params['hook_class']() @@ -262,7 +262,7 @@ def add_convergence_controller(self, convergence_controller, description, params Args: convergence_controller (pySDC.ConvergenceController): The convergence controller to be added description (dict): The description object used to instantiate the controller - params (dict): Parametes for the convergence controller + params (dict): Parameters for the convergence controller allow_double (bool): Allow adding the same convergence controller multiple times Returns: diff --git a/pySDC/core/Hooks.py b/pySDC/core/Hooks.py index 0d05f27372..5268278529 100644 --- a/pySDC/core/Hooks.py +++ b/pySDC/core/Hooks.py @@ -26,7 +26,7 @@ class hooks(object): __num_restarts (int): number of restarts of the current step logger: logger instance for output __stats (dict): dictionary for gathering the statistics of a run - __entry (namedtuple): statistics entry containign all information to identify the value + __entry (namedtuple): statistics entry containing all information to identify the value """ def __init__(self): diff --git a/pySDC/core/Lagrange.py b/pySDC/core/Lagrange.py index e3ed2b869b..1c7e0e4105 100644 --- a/pySDC/core/Lagrange.py +++ b/pySDC/core/Lagrange.py @@ -43,7 +43,7 @@ def computeFejerRule(n): v1 = np.empty(len(v0) - 1, dtype=complex) np.conjugate(v0[:0:-1], out=v1) v1 += v0[:-1] - # -- Compute inverse fourier transform + # -- Compute inverse Fourier transform w = np.fft.ifft(v1) if max(w.imag) > 1.0e-15: raise ValueError(f'Max imaginary value to important for ifft: {max(w.imag)}') diff --git a/pySDC/core/Step.py b/pySDC/core/Step.py index 56bc17cfca..1c804d9c77 100644 --- a/pySDC/core/Step.py +++ b/pySDC/core/Step.py @@ -137,7 +137,7 @@ def __generate_hierarchy(self, descr): # generate list of dictionaries out of the description descr_list = self.__dict_to_list(descr_new) - # sanity check: is there a base_transfer class? is there one even if only a single level is specified? + # sanity check: is there a base_transfer class? Is there one even if only a single level is specified? if len(descr_list) > 1 and not descr_new['space_transfer_class']: msg = 'need %s to instantiate step, only got %s' % ('space_transfer_class', str(descr_new.keys())) self.logger.error(msg) diff --git a/pySDC/core/Sweeper.py b/pySDC/core/Sweeper.py index 59f77220ef..6e0f0efe1a 100644 --- a/pySDC/core/Sweeper.py +++ b/pySDC/core/Sweeper.py @@ -67,7 +67,7 @@ def __init__(self, params): if not coll.right_is_node and not self.params.do_coll_update: self.logger.warning( - 'we need to do a collocation update here, since the right end point is not a node. ' 'Changing this!' + 'we need to do a collocation update here, since the right end point is not a node. Changing this!' ) self.params.do_coll_update = True From afa3483a01cf509d77eeed13763d1d5a8a3b1de0 Mon Sep 17 00:00:00 2001 From: Robert Speck Date: Thu, 12 Jan 2023 11:34:17 +0100 Subject: [PATCH 06/23] Update README.rst --- README.rst | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.rst b/README.rst index bfce4ed96b..41ce82469e 100644 --- a/README.rst +++ b/README.rst @@ -1,3 +1,6 @@ +|badge-ga| +|badge-ossf| + Welcome to pySDC! ================= @@ -74,3 +77,9 @@ This project has received funding from the `European High-Performance Computing The JU receives support from the European Union’s Horizon 2020 research and innovation programme and Belgium, France, Germany, and Switzerland. This project also received funding from the `German Federal Ministry of Education and Research `_ (BMBF) grant 16HPC047. The project also received help from the `Helmholtz Platform for Research Software Engineering - Preparatory Study (HiRSE_PS) `_. + + +.. |badge-ga| image:: https://github.com/Parallel-in-Time/pySDC/actions/workflows/ci_pipeline.yml/badge.svg + :target: https://github.com/Parallel-in-Time/pySDC/actions/workflows/ci_pipeline.yml +.. |badge-ossf| image:: https://bestpractices.coreinfrastructure.org/projects/6909/badge + :target: https://bestpractices.coreinfrastructure.org/projects/6909 \ No newline at end of file From c74a9fec659895f0dafcc1963d56b6f83f94863b Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Thu, 12 Jan 2023 15:14:01 +0100 Subject: [PATCH 07/23] Allowing multiple hook classes now --- pySDC/core/Controller.py | 26 +- pySDC/core/Hooks.py | 178 +--------- .../controller_classes/controller_MPI.py | 92 +++-- .../controller_classes/controller_nonMPI.py | 81 +++-- .../check_convergence.py | 12 +- pySDC/implementations/hooks/default_hook.py | 332 ++++++++++++++++++ 6 files changed, 478 insertions(+), 243 deletions(-) create mode 100644 pySDC/implementations/hooks/default_hook.py diff --git a/pySDC/core/Controller.py b/pySDC/core/Controller.py index c5257d2316..932ec4b13c 100644 --- a/pySDC/core/Controller.py +++ b/pySDC/core/Controller.py @@ -3,10 +3,10 @@ import sys import numpy as np -from pySDC.core import Hooks as hookclass from pySDC.core.BaseTransfer import base_transfer from pySDC.helpers.pysdc_helper import FrozenClass from pySDC.implementations.convergence_controller_classes.check_convergence import CheckConvergence +from pySDC.implementations.hooks.default_hook import default_hooks # short helper class to add params as attributes @@ -41,10 +41,16 @@ def __init__(self, controller_params, description): """ # check if we have a hook on this list. If not, use default class. - controller_params['hook_class'] = controller_params.get('hook_class', hookclass.hooks) - self.__hooks = controller_params['hook_class']() + self.__hooks = [] + self.hook_classes = [default_hooks] + user_hooks = controller_params.get('hook_class', []) + self.hook_classes += user_hooks if type(user_hooks) == list else [user_hooks] + for hook in self.hook_classes: + self.__hooks += [hook()] + controller_params['hook_class'] = controller_params.get('hook_class', self.hook_classes) - self.hooks.pre_setup(step=None, level_number=None) + for hook in self.hooks: + hook.pre_setup(step=None, level_number=None) self.params = _Pars(controller_params) @@ -308,3 +314,15 @@ def get_convergence_controllers_as_table(self, description): out += f'\n{user_added}|{i:3} | {C.params.control_order:5} | {type(C).__name__}' return out + + def return_stats(self): + """ + Return the merged stats from all hooks + + Returns: + dict: Merged stats from all hooks + """ + stats = {} + for hook in self.hooks: + stats = {**stats, **hook.return_stats()} + return stats diff --git a/pySDC/core/Hooks.py b/pySDC/core/Hooks.py index 5268278529..e875e694d9 100644 --- a/pySDC/core/Hooks.py +++ b/pySDC/core/Hooks.py @@ -33,20 +33,6 @@ def __init__(self): """ Initialization routine """ - self.__t0_setup = None - self.__t0_run = None - self.__t0_predict = None - self.__t0_step = None - self.__t0_iteration = None - self.__t0_sweep = None - self.__t0_comm = [] - self.__t1_run = None - self.__t1_predict = None - self.__t1_step = None - self.__t1_iteration = None - self.__t1_sweep = None - self.__t1_setup = None - self.__t1_comm = [] self.__num_restarts = 0 self.logger = logging.getLogger('hooks') @@ -130,7 +116,6 @@ def pre_setup(self, step, level_number): level_number (int): the current level number """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t0_setup = time.perf_counter() def pre_run(self, step, level_number): """ @@ -141,7 +126,6 @@ def pre_run(self, step, level_number): level_number (int): the current level number """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t0_run = time.perf_counter() def pre_predict(self, step, level_number): """ @@ -151,7 +135,7 @@ def pre_predict(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ - self.__t0_predict = time.perf_counter() + pass def pre_step(self, step, level_number): """ @@ -162,7 +146,6 @@ def pre_step(self, step, level_number): level_number (int): the current level number """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t0_step = time.perf_counter() def pre_iteration(self, step, level_number): """ @@ -173,7 +156,6 @@ def pre_iteration(self, step, level_number): level_number (int): the current level number """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t0_iteration = time.perf_counter() def pre_sweep(self, step, level_number): """ @@ -184,7 +166,6 @@ def pre_sweep(self, step, level_number): level_number (int): the current level number """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t0_sweep = time.perf_counter() def pre_comm(self, step, level_number): """ @@ -195,16 +176,6 @@ def pre_comm(self, step, level_number): level_number (int): the current level number """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - if len(self.__t0_comm) >= level_number + 1: - self.__t0_comm[level_number] = time.perf_counter() - else: - while len(self.__t0_comm) < level_number: - self.__t0_comm.append(None) - self.__t0_comm.append(time.perf_counter()) - while len(self.__t1_comm) <= level_number: - self.__t1_comm.append(0.0) - assert len(self.__t0_comm) == level_number + 1 - assert len(self.__t1_comm) == level_number + 1 def post_comm(self, step, level_number, add_to_stats=False): """ @@ -216,22 +187,6 @@ def post_comm(self, step, level_number, add_to_stats=False): add_to_stats (bool): set if result should go to stats object """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - assert len(self.__t1_comm) >= level_number + 1 - self.__t1_comm[level_number] += time.perf_counter() - self.__t0_comm[level_number] - - if add_to_stats: - L = step.levels[level_number] - - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=step.status.iter, - sweep=L.status.sweep, - type='timing_comm', - value=self.__t1_comm[level_number], - ) - self.__t1_comm[level_number] = 0.0 def post_sweep(self, step, level_number): """ @@ -242,39 +197,6 @@ def post_sweep(self, step, level_number): level_number (int): the current level number """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t1_sweep = time.perf_counter() - - L = step.levels[level_number] - - self.logger.info( - 'Process %2i on time %8.6f at stage %15s: Level: %s -- Iteration: %2i -- Sweep: %2i -- ' 'residual: %12.8e', - step.status.slot, - L.time, - step.status.stage, - L.level_index, - step.status.iter, - L.status.sweep, - L.status.residual, - ) - - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=step.status.iter, - sweep=L.status.sweep, - type='residual_post_sweep', - value=L.status.residual, - ) - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=step.status.iter, - sweep=L.status.sweep, - type='timing_sweep', - value=self.__t1_sweep - self.__t0_sweep, - ) def post_iteration(self, step, level_number): """ @@ -286,29 +208,6 @@ def post_iteration(self, step, level_number): """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t1_iteration = time.perf_counter() - - L = step.levels[level_number] - - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=-1, - iter=step.status.iter, - sweep=L.status.sweep, - type='residual_post_iteration', - value=L.status.residual, - ) - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=step.status.iter, - sweep=L.status.sweep, - type='timing_iteration', - value=self.__t1_iteration - self.__t0_iteration, - ) - def post_step(self, step, level_number): """ Default routine called after each step or block @@ -319,44 +218,6 @@ def post_step(self, step, level_number): """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t1_step = time.perf_counter() - - L = step.levels[level_number] - - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=step.status.iter, - sweep=L.status.sweep, - type='timing_step', - value=self.__t1_step - self.__t0_step, - ) - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=-1, - iter=step.status.iter, - sweep=L.status.sweep, - type='niter', - value=step.status.iter, - ) - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=-1, - sweep=L.status.sweep, - type='residual_post_step', - value=L.status.residual, - ) - - # record the recomputed quantities at weird positions to make sure there is only one value for each step - for t in [L.time, L.time + L.dt]: - self.add_to_stats( - process=-1, time=t, level=-1, iter=-1, sweep=-1, type='_recomputed', value=step.status.get('restart') - ) - def post_predict(self, step, level_number): """ Default routine called after each predictor @@ -366,19 +227,6 @@ def post_predict(self, step, level_number): level_number (int): the current level number """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t1_predict = time.perf_counter() - - L = step.levels[level_number] - - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=step.status.iter, - sweep=L.status.sweep, - type='timing_predictor', - value=self.__t1_predict - self.__t0_predict, - ) def post_run(self, step, level_number): """ @@ -389,19 +237,6 @@ def post_run(self, step, level_number): level_number (int): the current level number """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t1_run = time.perf_counter() - - L = step.levels[level_number] - - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=step.status.iter, - sweep=L.status.sweep, - type='timing_run', - value=self.__t1_run - self.__t0_run, - ) def post_setup(self, step, level_number): """ @@ -412,14 +247,3 @@ def post_setup(self, step, level_number): level_number (int): the current level number """ self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 - self.__t1_setup = time.perf_counter() - - self.add_to_stats( - process=-1, - time=-1, - level=-1, - iter=-1, - sweep=-1, - type='timing_setup', - value=self.__t1_setup - self.__t0_setup, - ) diff --git a/pySDC/implementations/controller_classes/controller_MPI.py b/pySDC/implementations/controller_classes/controller_MPI.py index 9383e87608..1c63ede8d0 100644 --- a/pySDC/implementations/controller_classes/controller_MPI.py +++ b/pySDC/implementations/controller_classes/controller_MPI.py @@ -83,7 +83,8 @@ def run(self, u0, t0, Tend): """ # reset stats to prevent double entries from old runs - self.hooks.reset_stats() + for hook in self.hooks: + hook.reset_stats() # find active processes and put into new communicator rank = self.comm.Get_rank() @@ -111,10 +112,12 @@ def run(self, u0, t0, Tend): uend = u0 # call post-setup hook - self.hooks.post_setup(step=None, level_number=None) + for hook in self.hooks: + hook.post_setup(step=None, level_number=None) # call pre-run hook - self.hooks.pre_run(step=self.S, level_number=0) + for hook in self.hooks: + hook.pre_run(step=self.S, level_number=0) comm_active.Barrier() @@ -162,11 +165,12 @@ def run(self, u0, t0, Tend): self.restart_block(num_procs, time, uend, comm=comm_active) # call post-run hook - self.hooks.post_run(step=self.S, level_number=0) + for hook in self.hooks: + hook.post_run(step=self.S, level_number=0) comm_active.Free() - return uend, self.hooks.return_stats() + return uend, self.return_stats() def restart_block(self, size, time, u0, comm): """ @@ -243,7 +247,8 @@ def send_full(self, comm=None, blocking=False, level=None, add_to_stats=False): level: the level number add_to_stats: a flag to end recording data in the hooks (defaults to False) """ - self.hooks.pre_comm(step=self.S, level_number=level) + for hook in self.hooks: + hook.pre_comm(step=self.S, level_number=level) if not blocking: self.wait_with_interrupt(request=self.req_send[level]) @@ -272,7 +277,8 @@ def send_full(self, comm=None, blocking=False, level=None, add_to_stats=False): if self.S.status.force_done: return None - self.hooks.post_comm(step=self.S, level_number=level, add_to_stats=add_to_stats) + for hook in self.hooks: + hook.post_comm(step=self.S, level_number=level, add_to_stats=add_to_stats) def recv_full(self, comm, level=None, add_to_stats=False): """ @@ -284,7 +290,8 @@ def recv_full(self, comm, level=None, add_to_stats=False): add_to_stats: a flag to end recording data in the hooks (defaults to False) """ - self.hooks.pre_comm(step=self.S, level_number=level) + for hook in self.hooks: + hook.pre_comm(step=self.S, level_number=level) if not self.S.status.first and not self.S.status.prev_done: self.logger.debug( 'recv data: process %s, stage %s, time %s, source %s, tag %s, iter %s' @@ -299,7 +306,8 @@ def recv_full(self, comm, level=None, add_to_stats=False): ) self.recv(target=self.S.levels[level], source=self.S.prev, tag=level * 100 + self.S.status.iter, comm=comm) - self.hooks.post_comm(step=self.S, level_number=level, add_to_stats=add_to_stats) + for hook in self.hooks: + hook.post_comm(step=self.S, level_number=level, add_to_stats=add_to_stats) def wait_with_interrupt(self, request): """ @@ -334,7 +342,8 @@ def check_iteration_estimate(self, comm): diff_new = max(diff_new, abs(L.uold[m] - L.u[m])) # Send forward diff - self.hooks.pre_comm(step=self.S, level_number=0) + for hook in self.hooks: + hook.pre_comm(step=self.S, level_number=0) self.wait_with_interrupt(request=self.req_diff) if self.S.status.force_done: @@ -360,7 +369,8 @@ def check_iteration_estimate(self, comm): tmp = np.array(diff_new, dtype=float) self.req_diff = comm.Issend((tmp, MPI.DOUBLE), dest=self.S.next, tag=999) - self.hooks.post_comm(step=self.S, level_number=0) + for hook in self.hooks: + hook.post_comm(step=self.S, level_number=0) # Store values from first iteration if self.S.status.iter == 1: @@ -382,14 +392,18 @@ def check_iteration_estimate(self, comm): if np.ceil(Kest_glob) <= self.S.status.iter: if self.S.status.last: self.logger.debug(f'{self.S.status.slot} is done, broadcasting..') - self.hooks.pre_comm(step=self.S, level_number=0) + for hook in self.hooks: + hook.pre_comm(step=self.S, level_number=0) comm.Ibcast((np.array([1]), MPI.INT), root=self.S.status.slot).Wait() - self.hooks.post_comm(step=self.S, level_number=0, add_to_stats=True) + for hook in self.hooks: + hook.post_comm(step=self.S, level_number=0, add_to_stats=True) self.logger.debug(f'{self.S.status.slot} is done, broadcasting done') self.S.status.done = True else: - self.hooks.pre_comm(step=self.S, level_number=0) - self.hooks.post_comm(step=self.S, level_number=0, add_to_stats=True) + for hook in self.hooks: + hook.pre_comm(step=self.S, level_number=0) + for hook in self.hooks: + hook.post_comm(step=self.S, level_number=0, add_to_stats=True) def pfasst(self, comm, num_procs): """ @@ -420,7 +434,8 @@ def pfasst(self, comm, num_procs): self.logger.debug(f'Rewinding {self.S.status.slot} after {stage}..') self.S.levels[0].u[1:] = self.S.levels[0].uold[1:] - self.hooks.post_iteration(step=self.S, level_number=0) + for hook in self.hooks: + hook.post_iteration(step=self.S, level_number=0) for req in self.req_send: if req is not None and req != MPI.REQUEST_NULL: @@ -431,7 +446,8 @@ def pfasst(self, comm, num_procs): self.req_diff.Cancel() self.S.status.stage = 'DONE' - self.hooks.post_step(step=self.S, level_number=0) + for hook in self.hooks: + hook.post_step(step=self.S, level_number=0) else: # Start cycling, if not interrupted @@ -453,7 +469,8 @@ def spread(self, comm, num_procs): """ # first stage: spread values - self.hooks.pre_step(step=self.S, level_number=0) + for hook in self.hooks: + hook.pre_step(step=self.S, level_number=0) # call predictor from sweeper self.S.levels[0].sweep.predict() @@ -476,7 +493,8 @@ def predict(self, comm, num_procs): Predictor phase """ - self.hooks.pre_predict(step=self.S, level_number=0) + for hook in self.hooks: + hook.pre_predict(step=self.S, level_number=0) if self.params.predict_type is None: pass @@ -568,7 +586,8 @@ def predict(self, comm, num_procs): else: raise ControllerError('Wrong predictor type, got %s' % self.params.predict_type) - self.hooks.post_predict(step=self.S, level_number=0) + for hook in self.hooks: + hook.post_predict(step=self.S, level_number=0) # update stage self.S.status.stage = 'IT_CHECK' @@ -598,7 +617,8 @@ def it_check(self, comm, num_procs): return None if self.S.status.iter > 0: - self.hooks.post_iteration(step=self.S, level_number=0) + for hook in self.hooks: + hook.post_iteration(step=self.S, level_number=0) # decide if the step is done, needs to be restarted and other things convergence related for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: @@ -611,7 +631,8 @@ def it_check(self, comm, num_procs): # increment iteration count here (and only here) self.S.status.iter += 1 - self.hooks.pre_iteration(step=self.S, level_number=0) + for hook in self.hooks: + hook.pre_iteration(step=self.S, level_number=0) for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: C.pre_iteration_processing(self, self.S, comm=comm) @@ -648,7 +669,8 @@ def it_check(self, comm, num_procs): if self.req_diff is not None: self.req_diff.Cancel() - self.hooks.post_step(step=self.S, level_number=0) + for hook in self.hooks: + hook.post_step(step=self.S, level_number=0) self.S.status.stage = 'DONE' def it_fine(self, comm, num_procs): @@ -675,10 +697,12 @@ def it_fine(self, comm, num_procs): if self.S.status.force_done: return None - self.hooks.pre_sweep(step=self.S, level_number=0) + for hook in self.hooks: + hook.pre_sweep(step=self.S, level_number=0) self.S.levels[0].sweep.update_nodes() self.S.levels[0].sweep.compute_residual() - self.hooks.post_sweep(step=self.S, level_number=0) + for hook in self.hooks: + hook.post_sweep(step=self.S, level_number=0) # update stage self.S.status.stage = 'IT_CHECK' @@ -705,10 +729,12 @@ def it_down(self, comm, num_procs): if self.S.status.force_done: return None - self.hooks.pre_sweep(step=self.S, level_number=l) + for hook in self.hooks: + hook.pre_sweep(step=self.S, level_number=l) self.S.levels[l].sweep.update_nodes() self.S.levels[l].sweep.compute_residual() - self.hooks.post_sweep(step=self.S, level_number=l) + for hook in self.hooks: + hook.post_sweep(step=self.S, level_number=l) # transfer further down the hierarchy self.S.transfer(source=self.S.levels[l], target=self.S.levels[l + 1]) @@ -727,14 +753,16 @@ def it_coarse(self, comm, num_procs): return None # do the sweep - self.hooks.pre_sweep(step=self.S, level_number=len(self.S.levels) - 1) + for hook in self.hooks: + hook.pre_sweep(step=self.S, level_number=len(self.S.levels) - 1) assert self.S.levels[-1].params.nsweeps == 1, ( 'ERROR: this controller can only work with one sweep on the coarse level, got %s' % self.S.levels[-1].params.nsweeps ) self.S.levels[-1].sweep.update_nodes() self.S.levels[-1].sweep.compute_residual() - self.hooks.post_sweep(step=self.S, level_number=len(self.S.levels) - 1) + for hook in self.hooks: + hook.post_sweep(step=self.S, level_number=len(self.S.levels) - 1) self.S.levels[-1].sweep.compute_end_point() # send to next step @@ -774,10 +802,12 @@ def it_up(self, comm, num_procs): if self.S.status.force_done: return None - self.hooks.pre_sweep(step=self.S, level_number=l - 1) + for hook in self.hooks: + hook.pre_sweep(step=self.S, level_number=l - 1) self.S.levels[l - 1].sweep.update_nodes() self.S.levels[l - 1].sweep.compute_residual() - self.hooks.post_sweep(step=self.S, level_number=l - 1) + for hook in self.hooks: + hooks.post_sweep(step=self.S, level_number=l - 1) # update stage self.S.status.stage = 'IT_FINE' diff --git a/pySDC/implementations/controller_classes/controller_nonMPI.py b/pySDC/implementations/controller_classes/controller_nonMPI.py index 2144717298..4cbd86260e 100644 --- a/pySDC/implementations/controller_classes/controller_nonMPI.py +++ b/pySDC/implementations/controller_classes/controller_nonMPI.py @@ -100,7 +100,8 @@ def run(self, u0, t0, Tend): # some initializations and reset of statistics uend = None num_procs = len(self.MS) - self.hooks.reset_stats() + for hook in self.hooks: + hook.reset_stats() # initial ordering of the steps: 0,1,...,Np-1 slots = list(range(num_procs)) @@ -120,11 +121,13 @@ def run(self, u0, t0, Tend): # initialize block of steps with u0 self.restart_block(active_slots, time, u0) - self.hooks.post_setup(step=None, level_number=None) + for hook in self.hooks: + hook.post_setup(step=None, level_number=None) # call pre-run hook for S in self.MS: - self.hooks.pre_run(step=S, level_number=0) + for hook in self.hooks: + hook.pre_run(step=S, level_number=0) # main loop: as long as at least one step is still active (time < Tend), do something while any(active): @@ -168,9 +171,10 @@ def run(self, u0, t0, Tend): # call post-run hook for S in self.MS: - self.hooks.post_run(step=S, level_number=0) + for hook in self.hooks: + hook.post_run(step=S, level_number=0) - return uend, self.hooks.return_stats() + return uend, self.return_stats() def restart_block(self, active_slots, time, u0): """ @@ -241,13 +245,16 @@ def send(source, tag): source.sweep.compute_end_point() source.tag = cp.deepcopy(tag) - self.hooks.pre_comm(step=S, level_number=level) + for hook in self.hooks: + hook.pre_comm(step=S, level_number=level) if not S.status.last: self.logger.debug( 'Process %2i provides data on level %2i with tag %s' % (S.status.slot, level, S.status.iter) ) send(S.levels[level], tag=(level, S.status.iter, S.status.slot)) - self.hooks.post_comm(step=S, level_number=level, add_to_stats=add_to_stats) + + for hook in self.hooks: + hook.post_comm(step=S, level_number=level, add_to_stats=add_to_stats) def recv_full(self, S, level=None, add_to_stats=False): """ @@ -276,14 +283,16 @@ def recv(target, source, tag=None): # re-evaluate f on left interval boundary target.f[0] = target.prob.eval_f(target.u[0], target.time) - self.hooks.pre_comm(step=S, level_number=level) + for hook in self.hooks: + hook.pre_comm(step=S, level_number=level) if not S.status.prev_done and not S.status.first: self.logger.debug( 'Process %2i receives from %2i on level %2i with tag %s' % (S.status.slot, S.prev.status.slot, level, S.status.iter) ) recv(S.levels[level], S.prev.levels[level], tag=(level, S.status.iter, S.prev.status.slot)) - self.hooks.post_comm(step=S, level_number=level, add_to_stats=add_to_stats) + for hook in self.hooks: + hook.post_comm(step=S, level_number=level, add_to_stats=add_to_stats) def pfasst(self, local_MS_active): """ @@ -333,7 +342,8 @@ def spread(self, local_MS_running): for S in local_MS_running: # first stage: spread values - self.hooks.pre_step(step=S, level_number=0) + for hook in self.hooks: + hook.pre_step(step=S, level_number=0) # call predictor from sweeper S.levels[0].sweep.predict() @@ -356,7 +366,8 @@ def predict(self, local_MS_running): """ for S in local_MS_running: - self.hooks.pre_predict(step=S, level_number=0) + for hook in self.hooks: + hook.pre_predict(step=S, level_number=0) if self.params.predict_type is None: pass @@ -464,7 +475,8 @@ def predict(self, local_MS_running): raise ControllerError('Wrong predictor type, got %s' % self.params.predict_type) for S in local_MS_running: - self.hooks.post_predict(step=S, level_number=0) + for hook in self.hooks: + hook.post_predict(step=S, level_number=0) for S in local_MS_running: # update stage @@ -490,7 +502,8 @@ def it_check(self, local_MS_running): for S in local_MS_running: if S.status.iter > 0: - self.hooks.post_iteration(step=S, level_number=0) + for hook in self.hooks: + hook.post_iteration(step=S, level_number=0) # decide if the step is done, needs to be restarted and other things convergence related for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: @@ -499,20 +512,25 @@ def it_check(self, local_MS_running): for S in local_MS_running: if not S.status.first: - self.hooks.pre_comm(step=S, level_number=0) + for hook in self.hooks: + hook.pre_comm(step=S, level_number=0) S.status.prev_done = S.prev.status.done # "communicate" - self.hooks.post_comm(step=S, level_number=0, add_to_stats=True) + for hook in self.hooks: + hook.post_comm(step=S, level_number=0, add_to_stats=True) S.status.done = S.status.done and S.status.prev_done if self.params.all_to_done: - self.hooks.pre_comm(step=S, level_number=0) + for hook in self.hooks: + hook.pre_comm(step=S, level_number=0) S.status.done = all([T.status.done for T in local_MS_running]) - self.hooks.post_comm(step=S, level_number=0, add_to_stats=True) + for hook in self.hooks: + hook.post_comm(step=S, level_number=0, add_to_stats=True) if not S.status.done: # increment iteration count here (and only here) S.status.iter += 1 - self.hooks.pre_iteration(step=S, level_number=0) + for hook in self.hooks: + hook.pre_iteration(step=S, level_number=0) for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: C.pre_iteration_processing(self, S) @@ -525,7 +543,8 @@ def it_check(self, local_MS_running): S.status.stage = 'IT_COARSE' # serial MSSDC (Gauss-like) else: S.levels[0].sweep.compute_end_point() - self.hooks.post_step(step=S, level_number=0) + for hook in self.hooks: + hook.post_step(step=S, level_number=0) S.status.stage = 'DONE' for C in [self.convergence_controllers[i] for i in self.convergence_controller_order]: @@ -555,10 +574,12 @@ def it_fine(self, local_MS_running): for S in local_MS_running: # standard sweep workflow: update nodes, compute residual, log progress - self.hooks.pre_sweep(step=S, level_number=0) + for hook in self.hooks: + hook.pre_sweep(step=S, level_number=0) S.levels[0].sweep.update_nodes() S.levels[0].sweep.compute_residual() - self.hooks.post_sweep(step=S, level_number=0) + for hook in self.hooks: + hook.post_sweep(step=S, level_number=0) for S in local_MS_running: # update stage @@ -589,10 +610,12 @@ def it_down(self, local_MS_running): self.recv_full(S, level=l) for S in local_MS_running: - self.hooks.pre_sweep(step=S, level_number=l) + for hook in self.hooks: + hook.pre_sweep(step=S, level_number=l) S.levels[l].sweep.update_nodes() S.levels[l].sweep.compute_residual() - self.hooks.post_sweep(step=S, level_number=l) + for hook in self.hooks: + hook.post_sweep(step=S, level_number=l) for S in local_MS_running: # transfer further down the hierarchy @@ -616,10 +639,12 @@ def it_coarse(self, local_MS_running): self.recv_full(S, level=len(S.levels) - 1) # do the sweep - self.hooks.pre_sweep(step=S, level_number=len(S.levels) - 1) + for hook in self.hooks: + hook.pre_sweep(step=S, level_number=len(S.levels) - 1) S.levels[-1].sweep.update_nodes() S.levels[-1].sweep.compute_residual() - self.hooks.post_sweep(step=S, level_number=len(S.levels) - 1) + for hook in self.hooks: + hook.post_sweep(step=S, level_number=len(S.levels) - 1) # send to succ step self.send_full(S, level=len(S.levels) - 1, add_to_stats=True) @@ -657,10 +682,12 @@ def it_up(self, local_MS_running): self.recv_full(S, level=l - 1, add_to_stats=(k == self.nsweeps[l - 1] - 1)) for S in local_MS_running: - self.hooks.pre_sweep(step=S, level_number=l - 1) + for hook in self.hooks: + hook.pre_sweep(step=S, level_number=l - 1) S.levels[l - 1].sweep.update_nodes() S.levels[l - 1].sweep.compute_residual() - self.hooks.post_sweep(step=S, level_number=l - 1) + for hook in self.hooks: + hook.post_sweep(step=S, level_number=l - 1) for S in local_MS_running: # update stage diff --git a/pySDC/implementations/convergence_controller_classes/check_convergence.py b/pySDC/implementations/convergence_controller_classes/check_convergence.py index f969779d07..e6a1907a9a 100644 --- a/pySDC/implementations/convergence_controller_classes/check_convergence.py +++ b/pySDC/implementations/convergence_controller_classes/check_convergence.py @@ -87,13 +87,16 @@ def communicate_convergence(self, controller, S, comm): if controller.params.all_to_done: from mpi4py.MPI import LAND - controller.hooks.pre_comm(step=S, level_number=0) + for hook in controller.hooks: + hook.pre_comm(step=S, level_number=0) S.status.done = comm.allreduce(sendobj=S.status.done, op=LAND) - controller.hooks.post_comm(step=S, level_number=0, add_to_stats=True) + for hook in controller.hooks: + hook.post_comm(step=S, level_number=0, add_to_stats=True) else: - controller.hooks.pre_comm(step=S, level_number=0) + for hook in controller.hooks: + hook.pre_comm(step=S, level_number=0) # check if an open request of the status send is pending controller.wait_with_interrupt(request=controller.req_status) @@ -109,4 +112,5 @@ def communicate_convergence(self, controller, S, comm): if not S.status.last: self.send(comm, dest=S.status.slot + 1, data=S.status.done) - controller.hooks.post_comm(step=S, level_number=0, add_to_stats=True) + for hook in controller.hooks: + hook.post_comm(step=S, level_number=0, add_to_stats=True) diff --git a/pySDC/implementations/hooks/default_hook.py b/pySDC/implementations/hooks/default_hook.py new file mode 100644 index 0000000000..2ddc4d05db --- /dev/null +++ b/pySDC/implementations/hooks/default_hook.py @@ -0,0 +1,332 @@ +import time +from pySDC.core.Hooks import hooks + + +class default_hooks(hooks): + """ + Hook class to contain the functions called during the controller runs (e.g. for calling user-routines) + + Attributes: + __t0_setup (float): private variable to get starting time of setup + __t0_run (float): private variable to get starting time of the run + __t0_predict (float): private variable to get starting time of the predictor + __t0_step (float): private variable to get starting time of the step + __t0_iteration (float): private variable to get starting time of the iteration + __t0_sweep (float): private variable to get starting time of the sweep + __t0_comm (list): private variable to get starting time of the communication + __t1_run (float): private variable to get end time of the run + __t1_predict (float): private variable to get end time of the predictor + __t1_step (float): private variable to get end time of the step + __t1_iteration (float): private variable to get end time of the iteration + __t1_sweep (float): private variable to get end time of the sweep + __t1_setup (float): private variable to get end time of setup + __t1_comm (list): private variable to hold timing of the communication (!) + logger: logger instance for output + __stats (dict): dictionary for gathering the statistics of a run + __entry (namedtuple): statistics entry containing all information to identify the value + """ + + def __init__(self): + super(default_hooks, self).__init__() + self.__t0_setup = None + self.__t0_run = None + self.__t0_predict = None + self.__t0_step = None + self.__t0_iteration = None + self.__t0_sweep = None + self.__t0_comm = [] + self.__t1_run = None + self.__t1_predict = None + self.__t1_step = None + self.__t1_iteration = None + self.__t1_sweep = None + self.__t1_setup = None + self.__t1_comm = [] + + def pre_setup(self, step, level_number): + """ + Default routine called before setup starts + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t0_setup = time.perf_counter() + + def pre_run(self, step, level_number): + """ + Default routine called before time-loop starts + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t0_run = time.perf_counter() + + def pre_predict(self, step, level_number): + """ + Default routine called before predictor starts + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t0_predict = time.perf_counter() + + def pre_step(self, step, level_number): + """ + Hook called before each step + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t0_step = time.perf_counter() + + def pre_iteration(self, step, level_number): + """ + Default routine called before iteration starts + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t0_iteration = time.perf_counter() + + def pre_sweep(self, step, level_number): + """ + Default routine called before sweep starts + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t0_sweep = time.perf_counter() + + def pre_comm(self, step, level_number): + """ + Default routine called before communication starts + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + if len(self.__t0_comm) >= level_number + 1: + self.__t0_comm[level_number] = time.perf_counter() + else: + while len(self.__t0_comm) < level_number: + self.__t0_comm.append(None) + self.__t0_comm.append(time.perf_counter()) + while len(self.__t1_comm) <= level_number: + self.__t1_comm.append(0.0) + assert len(self.__t0_comm) == level_number + 1 + assert len(self.__t1_comm) == level_number + 1 + + def post_comm(self, step, level_number, add_to_stats=False): + """ + Default routine called after each communication + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + add_to_stats (bool): set if result should go to stats object + """ + assert len(self.__t1_comm) >= level_number + 1 + self.__t1_comm[level_number] += time.perf_counter() - self.__t0_comm[level_number] + + if add_to_stats: + L = step.levels[level_number] + + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='timing_comm', + value=self.__t1_comm[level_number], + ) + self.__t1_comm[level_number] = 0.0 + + def post_sweep(self, step, level_number): + """ + Default routine called after each sweep + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t1_sweep = time.perf_counter() + + L = step.levels[level_number] + + self.logger.info( + 'Process %2i on time %8.6f at stage %15s: Level: %s -- Iteration: %2i -- Sweep: %2i -- ' 'residual: %12.8e', + step.status.slot, + L.time, + step.status.stage, + L.level_index, + step.status.iter, + L.status.sweep, + L.status.residual, + ) + + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='residual_post_sweep', + value=L.status.residual, + ) + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='timing_sweep', + value=self.__t1_sweep - self.__t0_sweep, + ) + + def post_iteration(self, step, level_number): + """ + Default routine called after each iteration + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t1_iteration = time.perf_counter() + + L = step.levels[level_number] + + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=-1, + iter=step.status.iter, + sweep=L.status.sweep, + type='residual_post_iteration', + value=L.status.residual, + ) + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='timing_iteration', + value=self.__t1_iteration - self.__t0_iteration, + ) + + def post_step(self, step, level_number): + """ + Default routine called after each step or block + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t1_step = time.perf_counter() + + L = step.levels[level_number] + + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='timing_step', + value=self.__t1_step - self.__t0_step, + ) + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=-1, + iter=step.status.iter, + sweep=L.status.sweep, + type='niter', + value=step.status.iter, + ) + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=-1, + sweep=L.status.sweep, + type='residual_post_step', + value=L.status.residual, + ) + + # record the recomputed quantities at weird positions to make sure there is only one value for each step + for t in [L.time, L.time + L.dt]: + self.add_to_stats( + process=-1, time=t, level=-1, iter=-1, sweep=-1, type='_recomputed', value=step.status.get('restart') + ) + + def post_predict(self, step, level_number): + """ + Default routine called after each predictor + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t1_predict = time.perf_counter() + + L = step.levels[level_number] + + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='timing_predictor', + value=self.__t1_predict - self.__t0_predict, + ) + + def post_run(self, step, level_number): + """ + Default routine called after each run + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t1_run = time.perf_counter() + + L = step.levels[level_number] + + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='timing_run', + value=self.__t1_run - self.__t0_run, + ) + + def post_setup(self, step, level_number): + """ + Default routine called after setup + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + """ + self.__t1_setup = time.perf_counter() + + self.add_to_stats( + process=-1, + time=-1, + level=-1, + iter=-1, + sweep=-1, + type='timing_setup', + value=self.__t1_setup - self.__t0_setup, + ) From 619117708f3e6fe22f643587a1641de8f158ea14 Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Thu, 12 Jan 2023 16:06:19 +0100 Subject: [PATCH 08/23] Error estimating convergence controllers will now add hooks to record their estimates --- pySDC/core/Controller.py | 23 +++++-- pySDC/core/Hooks.py | 15 ----- .../controller_classes/controller_MPI.py | 2 +- .../estimate_embedded_error.py | 6 +- .../estimate_extrapolation_error.py | 2 + pySDC/projects/PinTSimE/switch_estimator.py | 2 +- pySDC/projects/Resilience/accuracy_check.py | 22 +------ pySDC/projects/Resilience/advection.py | 61 +------------------ pySDC/projects/Resilience/heat.py | 4 +- pySDC/projects/Resilience/hook.py | 38 +++--------- pySDC/projects/Resilience/piline.py | 10 +-- pySDC/projects/Resilience/vdp.py | 9 +-- 12 files changed, 50 insertions(+), 144 deletions(-) diff --git a/pySDC/core/Controller.py b/pySDC/core/Controller.py index 932ec4b13c..08d95ba4d6 100644 --- a/pySDC/core/Controller.py +++ b/pySDC/core/Controller.py @@ -42,12 +42,11 @@ def __init__(self, controller_params, description): # check if we have a hook on this list. If not, use default class. self.__hooks = [] - self.hook_classes = [default_hooks] + hook_classes = [default_hooks] user_hooks = controller_params.get('hook_class', []) - self.hook_classes += user_hooks if type(user_hooks) == list else [user_hooks] - for hook in self.hook_classes: - self.__hooks += [hook()] - controller_params['hook_class'] = controller_params.get('hook_class', self.hook_classes) + hook_classes += user_hooks if type(user_hooks) == list else [user_hooks] + [self.add_hook(hook) for hook in hook_classes] + controller_params['hook_class'] = controller_params.get('hook_class', hook_classes) for hook in self.hooks: hook.pre_setup(step=None, level_number=None) @@ -107,6 +106,20 @@ def __setup_custom_logger(level=None, log_to_file=None, fname=None): else: pass + def add_hook(self, hook): + """ + Add a hook to the controller which will be called in addition to all other hooks whenever something happens. + The hook is only added if a hook of the same class is not already present. + + Args: + hook (pySDC.Hook): A hook class that is derived from the core hook class + + Returns: + None + """ + if hook not in [type(me) for me in self.hooks]: + self.__hooks += [hook()] + def welcome_message(self): out = ( "Welcome to the one and only, really very astonishing and 87.3% bug free" diff --git a/pySDC/core/Hooks.py b/pySDC/core/Hooks.py index e875e694d9..0f1f597a3f 100644 --- a/pySDC/core/Hooks.py +++ b/pySDC/core/Hooks.py @@ -1,5 +1,4 @@ import logging -import time from collections import namedtuple @@ -9,20 +8,6 @@ class hooks(object): Hook class to contain the functions called during the controller runs (e.g. for calling user-routines) Attributes: - __t0_setup (float): private variable to get starting time of setup - __t0_run (float): private variable to get starting time of the run - __t0_predict (float): private variable to get starting time of the predictor - __t0_step (float): private variable to get starting time of the step - __t0_iteration (float): private variable to get starting time of the iteration - __t0_sweep (float): private variable to get starting time of the sweep - __t0_comm (list): private variable to get starting time of the communication - __t1_run (float): private variable to get end time of the run - __t1_predict (float): private variable to get end time of the predictor - __t1_step (float): private variable to get end time of the step - __t1_iteration (float): private variable to get end time of the iteration - __t1_sweep (float): private variable to get end time of the sweep - __t1_setup (float): private variable to get end time of setup - __t1_comm (list): private variable to hold timing of the communication (!) __num_restarts (int): number of restarts of the current step logger: logger instance for output __stats (dict): dictionary for gathering the statistics of a run diff --git a/pySDC/implementations/controller_classes/controller_MPI.py b/pySDC/implementations/controller_classes/controller_MPI.py index 1c63ede8d0..7dc6bcd781 100644 --- a/pySDC/implementations/controller_classes/controller_MPI.py +++ b/pySDC/implementations/controller_classes/controller_MPI.py @@ -807,7 +807,7 @@ def it_up(self, comm, num_procs): self.S.levels[l - 1].sweep.update_nodes() self.S.levels[l - 1].sweep.compute_residual() for hook in self.hooks: - hooks.post_sweep(step=self.S, level_number=l - 1) + hook.post_sweep(step=self.S, level_number=l - 1) # update stage self.S.status.stage = 'IT_FINE' diff --git a/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py b/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py index 1cf5e3f41b..d1cb9711bc 100644 --- a/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py +++ b/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py @@ -2,6 +2,7 @@ from pySDC.core.ConvergenceController import ConvergenceController, Pars from pySDC.implementations.convergence_controller_classes.store_uold import StoreUOld +from pySDC.implementations.hooks.log_embedded_error_estimate import log_embedded_error_estimate from pySDC.implementations.sweeper_classes.Runge_Kutta import RungeKutta @@ -16,7 +17,7 @@ class EstimateEmbeddedError(ConvergenceController): def __init__(self, controller, params, description, **kwargs): """ - Initalization routine. Add the buffers for communication. + Initialisation routine. Add the buffers for communication. Args: controller (pySDC.Controller): The controller @@ -25,6 +26,7 @@ def __init__(self, controller, params, description, **kwargs): """ super(EstimateEmbeddedError, self).__init__(controller, params, description, **kwargs) self.buffers = Pars({'e_em_last': 0.0}) + controller.add_hook(log_embedded_error_estimate) @classmethod def get_implementation(cls, flavor): @@ -123,7 +125,7 @@ def reset_status_variables(self, controller, **kwargs): class EstimateEmbeddedErrorNonMPI(EstimateEmbeddedError): def reset_buffers_nonMPI(self, controller, **kwargs): """ - Reset buffers for immitated communication. + Reset buffers for imitated communication. Args: controller (pySDC.controller): The controller diff --git a/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py b/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py index 698df89627..ec8ddb83dc 100644 --- a/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py +++ b/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py @@ -4,6 +4,7 @@ from pySDC.core.ConvergenceController import ConvergenceController, Status from pySDC.core.Errors import DataError from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh +from pySDC.implementations.hooks.log_extrapolated_error_estimate import log_extrapolated_error_estimate class EstimateExtrapolationErrorBase(ConvergenceController): @@ -27,6 +28,7 @@ def __init__(self, controller, params, description, **kwargs): self.prev = Status(["t", "u", "f", "dt"]) # store solutions etc. of previous steps here self.coeff = Status(["u", "f", "prefactor"]) # store coefficients for extrapolation here super(EstimateExtrapolationErrorBase, self).__init__(controller, params, description) + controller.add_hook(log_extrapolated_error_estimate) def setup(self, controller, params, description, **kwargs): """ diff --git a/pySDC/projects/PinTSimE/switch_estimator.py b/pySDC/projects/PinTSimE/switch_estimator.py index e43fbb49b3..67addb60c2 100644 --- a/pySDC/projects/PinTSimE/switch_estimator.py +++ b/pySDC/projects/PinTSimE/switch_estimator.py @@ -95,7 +95,7 @@ def get_new_step_size(self, controller, S): dt_search = self.t_switch - L.time L.prob.params.set_switch[self.count_switches] = self.switch_detected L.prob.params.t_switch[self.count_switches] = self.t_switch - controller.hooks.add_to_stats( + controller.hooks[0].add_to_stats( process=S.status.slot, time=L.time, level=L.level_index, diff --git a/pySDC/projects/Resilience/accuracy_check.py b/pySDC/projects/Resilience/accuracy_check.py index 4c51d696c1..0bbc375ed9 100644 --- a/pySDC/projects/Resilience/accuracy_check.py +++ b/pySDC/projects/Resilience/accuracy_check.py @@ -34,24 +34,6 @@ def post_step(self, step, level_number): L.sweep.compute_end_point() - self.add_to_stats( - process=step.status.slot, - time=L.time + L.dt, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='e_embedded', - value=L.status.error_embedded_estimate, - ) - self.add_to_stats( - process=step.status.slot, - time=L.time + L.dt, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='e_extrapolated', - value=L.status.get('error_extrapolation_estimate'), - ) self.add_to_stats( process=step.status.slot, time=L.time, @@ -105,8 +87,8 @@ def get_results_from_stats(stats, var, val, hook_class=log_errors): } if hook_class == log_errors: - e_extrapolated = np.array(get_sorted(stats, type='e_extrapolated'))[:, 1] - e_embedded = np.array(get_sorted(stats, type='e_embedded'))[:, 1] + e_extrapolated = np.array(get_sorted(stats, type='error_extrapolation_estimate'))[:, 1] + e_embedded = np.array(get_sorted(stats, type='error_embedded_estimate'))[:, 1] e_loc = np.array(get_sorted(stats, type='e_loc'))[:, 1] if len(e_extrapolated[e_extrapolated != [None]]) > 0: diff --git a/pySDC/projects/Resilience/advection.py b/pySDC/projects/Resilience/advection.py index 6515a0931e..1591d340c2 100644 --- a/pySDC/projects/Resilience/advection.py +++ b/pySDC/projects/Resilience/advection.py @@ -6,7 +6,7 @@ from pySDC.core.Hooks import hooks from pySDC.helpers.stats_helper import get_sorted import numpy as np -from pySDC.projects.Resilience.hook import log_error_estimates +from pySDC.projects.Resilience.hook import log_data def plot_embedded(stats, ax): @@ -21,17 +21,8 @@ def plot_embedded(stats, ax): ax.legend(frameon=False) -class log_data(hooks): - def pre_run(self, step, level_number): - """ - Record los conditiones initiales - """ - super(log_data, self).pre_run(step, level_number) - L = step.levels[level_number] - self.add_to_stats(process=0, time=0, level=0, iter=0, sweep=0, type='u0', value=L.u[0]) - +class log_every_iteration(hooks): def post_iteration(self, step, level_number): - super(log_data, self).post_iteration(step, level_number) if step.status.iter == step.params.maxiter - 1: L = step.levels[level_number] L.sweep.compute_end_point() @@ -45,58 +36,12 @@ def post_iteration(self, step, level_number): value=L.uold[-1], ) - def post_step(self, step, level_number): - - super(log_data, self).post_step(step, level_number) - - # some abbreviations - L = step.levels[level_number] - - L.sweep.compute_end_point() - - self.add_to_stats( - process=step.status.slot, - time=L.time + L.dt, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='u', - value=L.uend, - ) - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='dt', - value=L.dt, - ) - self.add_to_stats( - process=step.status.slot, - time=L.time + L.dt, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='e_embedded', - value=L.status.get('error_embedded_estimate'), - ) - self.add_to_stats( - process=step.status.slot, - time=L.time + L.dt, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='e_extrapolated', - value=L.status.get('error_extrapolation_estimate'), - ) - def run_advection( custom_description=None, num_procs=1, Tend=2e-1, - hook_class=log_error_estimates, + hook_class=log_data, fault_stuff=None, custom_controller_params=None, custom_problem_params=None, diff --git a/pySDC/projects/Resilience/heat.py b/pySDC/projects/Resilience/heat.py index e4d3e55bb4..770002ffab 100644 --- a/pySDC/projects/Resilience/heat.py +++ b/pySDC/projects/Resilience/heat.py @@ -5,7 +5,7 @@ from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.core.Hooks import hooks from pySDC.helpers.stats_helper import get_sorted -from pySDC.projects.Resilience.hook import log_error_estimates +from pySDC.projects.Resilience.hook import log_data import numpy as np @@ -13,7 +13,7 @@ def run_heat( custom_description=None, num_procs=1, Tend=2e-1, - hook_class=log_error_estimates, + hook_class=log_data, fault_stuff=None, custom_controller_params=None, custom_problem_params=None, diff --git a/pySDC/projects/Resilience/hook.py b/pySDC/projects/Resilience/hook.py index 0ad5bdfb6f..4212b90e93 100644 --- a/pySDC/projects/Resilience/hook.py +++ b/pySDC/projects/Resilience/hook.py @@ -1,7 +1,13 @@ from pySDC.core.Hooks import hooks +from pySDC.implementations.hooks.log_solution import log_solution +from pySDC.implementations.hooks.log_embedded_error_estimate import log_embedded_error_estimate +from pySDC.implementations.hooks.log_extrapolated_error_estimate import log_extrapolated_error_estimate -class log_error_estimates(hooks): +hook_collection = [log_solution, log_embedded_error_estimate, log_extrapolated_error_estimate] + + +class log_data(hooks): """ Record data required for analysis of problems in the resilience project """ @@ -10,7 +16,6 @@ def pre_run(self, step, level_number): """ Record los conditiones initiales """ - super(log_error_estimates, self).pre_run(step, level_number) L = step.levels[level_number] self.add_to_stats(process=0, time=0, level=0, iter=0, sweep=0, type='u0', value=L.u[0]) @@ -18,22 +23,11 @@ def post_step(self, step, level_number): """ Record final solutions as well as step size and error estimates """ - super(log_error_estimates, self).post_step(step, level_number) - # some abbreviations L = step.levels[level_number] L.sweep.compute_end_point() - self.add_to_stats( - process=step.status.slot, - time=L.time + L.dt, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='u', - value=L.uend, - ) self.add_to_stats( process=step.status.slot, time=L.time, @@ -43,24 +37,6 @@ def post_step(self, step, level_number): type='dt', value=L.dt, ) - self.add_to_stats( - process=step.status.slot, - time=L.time + L.dt, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='e_embedded', - value=L.status.__dict__.get('error_embedded_estimate', None), - ) - self.add_to_stats( - process=step.status.slot, - time=L.time + L.dt, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='e_extrapolated', - value=L.status.__dict__.get('error_extrapolation_estimate', None), - ) self.add_to_stats( process=step.status.slot, time=L.time, diff --git a/pySDC/projects/Resilience/piline.py b/pySDC/projects/Resilience/piline.py index 0d301cab59..9ac4af3fbe 100644 --- a/pySDC/projects/Resilience/piline.py +++ b/pySDC/projects/Resilience/piline.py @@ -7,14 +7,14 @@ from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity from pySDC.implementations.convergence_controller_classes.hotrod import HotRod -from pySDC.projects.Resilience.hook import log_error_estimates +from pySDC.projects.Resilience.hook import log_data, hook_collection def run_piline( custom_description=None, num_procs=1, Tend=20.0, - hook_class=log_error_estimates, + hook_class=log_data, fault_stuff=None, custom_controller_params=None, custom_problem_params=None, @@ -68,7 +68,7 @@ def run_piline( # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - controller_params['hook_class'] = hook_class + controller_params['hook_class'] = hook_collection + [hook_class] controller_params['mssdc_jac'] = False if custom_controller_params is not None: @@ -131,8 +131,8 @@ def get_data(stats, recomputed=False): 't': np.array([me[0] for me in get_sorted(stats, type='u', recomputed=recomputed)]), 'dt': np.array([me[1] for me in get_sorted(stats, type='dt', recomputed=recomputed)]), 't_dt': np.array([me[0] for me in get_sorted(stats, type='dt', recomputed=recomputed)]), - 'e_em': np.array(get_sorted(stats, type='e_embedded', recomputed=recomputed))[:, 1], - 'e_ex': np.array(get_sorted(stats, type='e_extrapolated', recomputed=recomputed))[:, 1], + 'e_em': np.array(get_sorted(stats, type='error_embedded_estimate', recomputed=recomputed))[:, 1], + 'e_ex': np.array(get_sorted(stats, type='error_extrapolation_estimate', recomputed=recomputed))[:, 1], 'restarts': np.array(get_sorted(stats, type='restart', recomputed=None))[:, 1], 't_restarts': np.array(get_sorted(stats, type='restart', recomputed=None))[:, 0], 'sweeps': np.array(get_sorted(stats, type='sweeps', recomputed=None))[:, 1], diff --git a/pySDC/projects/Resilience/vdp.py b/pySDC/projects/Resilience/vdp.py index 2a712d39ce..14e4ee27f4 100644 --- a/pySDC/projects/Resilience/vdp.py +++ b/pySDC/projects/Resilience/vdp.py @@ -8,7 +8,8 @@ from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity from pySDC.core.Errors import ProblemError -from pySDC.projects.Resilience.hook import log_error_estimates +from pySDC.projects.Resilience.hook import log_data +from pySDC.implementations.hooks.log_solution import log_solution def plot_step_sizes(stats, ax): @@ -85,7 +86,7 @@ def run_vdp( custom_description=None, num_procs=1, Tend=10.0, - hook_class=log_error_estimates, + hook_class=log_data, fault_stuff=None, custom_controller_params=None, custom_problem_params=None, @@ -138,7 +139,7 @@ def run_vdp( # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - controller_params['hook_class'] = hook_class + controller_params['hook_class'] = [hook_class, log_solution] controller_params['mssdc_jac'] = False if custom_controller_params is not None: @@ -212,7 +213,7 @@ def fetch_test_data(stats, comm=None, use_MPI=False): Returns: dict: Key values to perform tests on """ - types = ['e_embedded', 'restart', 'dt', 'sweeps', 'residual_post_step'] + types = ['error_embedded_estimate', 'restart', 'dt', 'sweeps', 'residual_post_step'] data = {} for type in types: if type not in get_list_of_types(stats): From 2be53b8c1060e77a74d4b9880064060ae5c751c8 Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Thu, 12 Jan 2023 16:18:02 +0100 Subject: [PATCH 09/23] Adhering to python standards for naming classes --- pySDC/core/Controller.py | 4 ++-- .../estimate_embedded_error.py | 4 ++-- .../estimate_extrapolation_error.py | 6 +++--- pySDC/implementations/hooks/default_hook.py | 4 ++-- pySDC/projects/Resilience/hook.py | 8 ++++---- pySDC/projects/Resilience/vdp.py | 7 +++---- 6 files changed, 16 insertions(+), 17 deletions(-) diff --git a/pySDC/core/Controller.py b/pySDC/core/Controller.py index 08d95ba4d6..e4d213e475 100644 --- a/pySDC/core/Controller.py +++ b/pySDC/core/Controller.py @@ -6,7 +6,7 @@ from pySDC.core.BaseTransfer import base_transfer from pySDC.helpers.pysdc_helper import FrozenClass from pySDC.implementations.convergence_controller_classes.check_convergence import CheckConvergence -from pySDC.implementations.hooks.default_hook import default_hooks +from pySDC.implementations.hooks.default_hook import DefaultHooks # short helper class to add params as attributes @@ -42,7 +42,7 @@ def __init__(self, controller_params, description): # check if we have a hook on this list. If not, use default class. self.__hooks = [] - hook_classes = [default_hooks] + hook_classes = [DefaultHooks] user_hooks = controller_params.get('hook_class', []) hook_classes += user_hooks if type(user_hooks) == list else [user_hooks] [self.add_hook(hook) for hook in hook_classes] diff --git a/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py b/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py index d1cb9711bc..eb96956b64 100644 --- a/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py +++ b/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py @@ -2,7 +2,7 @@ from pySDC.core.ConvergenceController import ConvergenceController, Pars from pySDC.implementations.convergence_controller_classes.store_uold import StoreUOld -from pySDC.implementations.hooks.log_embedded_error_estimate import log_embedded_error_estimate +from pySDC.implementations.hooks.log_embedded_error_estimate import LogEmbeddedErrorEstimate from pySDC.implementations.sweeper_classes.Runge_Kutta import RungeKutta @@ -26,7 +26,7 @@ def __init__(self, controller, params, description, **kwargs): """ super(EstimateEmbeddedError, self).__init__(controller, params, description, **kwargs) self.buffers = Pars({'e_em_last': 0.0}) - controller.add_hook(log_embedded_error_estimate) + controller.add_hook(LogEmbeddedErrorEstimate) @classmethod def get_implementation(cls, flavor): diff --git a/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py b/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py index ec8ddb83dc..887761d982 100644 --- a/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py +++ b/pySDC/implementations/convergence_controller_classes/estimate_extrapolation_error.py @@ -4,7 +4,7 @@ from pySDC.core.ConvergenceController import ConvergenceController, Status from pySDC.core.Errors import DataError from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh -from pySDC.implementations.hooks.log_extrapolated_error_estimate import log_extrapolated_error_estimate +from pySDC.implementations.hooks.log_extrapolated_error_estimate import LogExtrapolationErrorEstimate class EstimateExtrapolationErrorBase(ConvergenceController): @@ -28,7 +28,7 @@ def __init__(self, controller, params, description, **kwargs): self.prev = Status(["t", "u", "f", "dt"]) # store solutions etc. of previous steps here self.coeff = Status(["u", "f", "prefactor"]) # store coefficients for extrapolation here super(EstimateExtrapolationErrorBase, self).__init__(controller, params, description) - controller.add_hook(log_extrapolated_error_estimate) + controller.add_hook(LogExtrapolationErrorEstimate) def setup(self, controller, params, description, **kwargs): """ @@ -254,7 +254,7 @@ class EstimateExtrapolationErrorNonMPI(EstimateExtrapolationErrorBase): def setup(self, controller, params, description, **kwargs): """ - Add a no parameter 'no_storage' which decides whether the standart or the no-memory-overhead version is run, + Add a no parameter 'no_storage' which decides whether the standard or the no-memory-overhead version is run, where only values are used for extrapolation which are in memory of other processes Args: diff --git a/pySDC/implementations/hooks/default_hook.py b/pySDC/implementations/hooks/default_hook.py index 2ddc4d05db..1f59b98f1f 100644 --- a/pySDC/implementations/hooks/default_hook.py +++ b/pySDC/implementations/hooks/default_hook.py @@ -2,7 +2,7 @@ from pySDC.core.Hooks import hooks -class default_hooks(hooks): +class DefaultHooks(hooks): """ Hook class to contain the functions called during the controller runs (e.g. for calling user-routines) @@ -27,7 +27,7 @@ class default_hooks(hooks): """ def __init__(self): - super(default_hooks, self).__init__() + super(DefaultHooks, self).__init__() self.__t0_setup = None self.__t0_run = None self.__t0_predict = None diff --git a/pySDC/projects/Resilience/hook.py b/pySDC/projects/Resilience/hook.py index 4212b90e93..c1a635e6dd 100644 --- a/pySDC/projects/Resilience/hook.py +++ b/pySDC/projects/Resilience/hook.py @@ -1,10 +1,10 @@ from pySDC.core.Hooks import hooks -from pySDC.implementations.hooks.log_solution import log_solution -from pySDC.implementations.hooks.log_embedded_error_estimate import log_embedded_error_estimate -from pySDC.implementations.hooks.log_extrapolated_error_estimate import log_extrapolated_error_estimate +from pySDC.implementations.hooks.log_solution import LogSolution +from pySDC.implementations.hooks.log_embedded_error_estimate import LogEmbeddedErrorEstimate +from pySDC.implementations.hooks.log_extrapolated_error_estimate import LogExtrapolationErrorEstimate -hook_collection = [log_solution, log_embedded_error_estimate, log_extrapolated_error_estimate] +hook_collection = [LogSolution, LogEmbeddedErrorEstimate, LogExtrapolationErrorEstimate] class log_data(hooks): diff --git a/pySDC/projects/Resilience/vdp.py b/pySDC/projects/Resilience/vdp.py index 14e4ee27f4..de7de087c1 100644 --- a/pySDC/projects/Resilience/vdp.py +++ b/pySDC/projects/Resilience/vdp.py @@ -8,8 +8,7 @@ from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.implementations.convergence_controller_classes.adaptivity import Adaptivity from pySDC.core.Errors import ProblemError -from pySDC.projects.Resilience.hook import log_data -from pySDC.implementations.hooks.log_solution import log_solution +from pySDC.projects.Resilience.hook import log_data, hook_collection def plot_step_sizes(stats, ax): @@ -29,7 +28,7 @@ def plot_step_sizes(stats, ax): p = np.array([me[1][1] for me in get_sorted(stats, type='u', recomputed=False, sortby='time')]) t = np.array([me[0] for me in get_sorted(stats, type='u', recomputed=False, sortby='time')]) - e_em = np.array(get_sorted(stats, type='e_embedded', recomputed=False, sortby='time'))[:, 1] + e_em = np.array(get_sorted(stats, type='error_embedded_estimate', recomputed=False, sortby='time'))[:, 1] dt = np.array(get_sorted(stats, type='dt', recomputed=False, sortby='time')) restart = np.array(get_sorted(stats, type='restart', recomputed=None, sortby='time')) @@ -139,7 +138,7 @@ def run_vdp( # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 30 - controller_params['hook_class'] = [hook_class, log_solution] + controller_params['hook_class'] = hook_collection + [hook_class] controller_params['mssdc_jac'] = False if custom_controller_params is not None: From 7c33bd0fd42e45d4aaed397d379d9d973658eae9 Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Thu, 12 Jan 2023 16:22:26 +0100 Subject: [PATCH 10/23] Forgot to add some files to the commit --- .../hooks/log_embedded_error_estimate.py | 31 ++++++++++++++++++ .../hooks/log_extrapolated_error_estimate.py | 31 ++++++++++++++++++ pySDC/implementations/hooks/log_solution.py | 32 +++++++++++++++++++ 3 files changed, 94 insertions(+) create mode 100644 pySDC/implementations/hooks/log_embedded_error_estimate.py create mode 100644 pySDC/implementations/hooks/log_extrapolated_error_estimate.py create mode 100644 pySDC/implementations/hooks/log_solution.py diff --git a/pySDC/implementations/hooks/log_embedded_error_estimate.py b/pySDC/implementations/hooks/log_embedded_error_estimate.py new file mode 100644 index 0000000000..fb41974411 --- /dev/null +++ b/pySDC/implementations/hooks/log_embedded_error_estimate.py @@ -0,0 +1,31 @@ +from pySDC.core.Hooks import hooks + + +class LogEmbeddedErrorEstimate(hooks): + """ + Store the embedded error estimate at the end of each step as "error_embedded_estimate". + """ + + def post_step(self, step, level_number): + """ + Record embedded error estimate + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + + Returns: + None + """ + # some abbreviations + L = step.levels[level_number] + + self.add_to_stats( + process=step.status.slot, + time=L.time + L.dt, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='error_embedded_estimate', + value=L.status.get('error_embedded_estimate'), + ) diff --git a/pySDC/implementations/hooks/log_extrapolated_error_estimate.py b/pySDC/implementations/hooks/log_extrapolated_error_estimate.py new file mode 100644 index 0000000000..5eea52bca2 --- /dev/null +++ b/pySDC/implementations/hooks/log_extrapolated_error_estimate.py @@ -0,0 +1,31 @@ +from pySDC.core.Hooks import hooks + + +class LogExtrapolationErrorEstimate(hooks): + """ + Store the extrapolated error estimate at the end of each step as "error_extrapolation_estimate". + """ + + def post_step(self, step, level_number): + """ + Record extrapolated error estimate + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + + Returns: + None + """ + # some abbreviations + L = step.levels[level_number] + + self.add_to_stats( + process=step.status.slot, + time=L.time + L.dt, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='error_extrapolation_estimate', + value=L.status.get('error_extrapolation_estimate'), + ) diff --git a/pySDC/implementations/hooks/log_solution.py b/pySDC/implementations/hooks/log_solution.py new file mode 100644 index 0000000000..560985d0e5 --- /dev/null +++ b/pySDC/implementations/hooks/log_solution.py @@ -0,0 +1,32 @@ +from pySDC.core.Hooks import hooks + + +class LogSolution(hooks): + """ + Store the solution at the end of each step as "u". + """ + + def post_step(self, step, level_number): + """ + Record solution at the end of the step + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + + Returns: + None + """ + # some abbreviations + L = step.levels[level_number] + L.sweep.compute_end_point() + + self.add_to_stats( + process=step.status.slot, + time=L.time + L.dt, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='u', + value=L.uend, + ) From 0de2f8dfc83dffee5ba9c2b5a67926f9a8d5cdcf Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Thu, 12 Jan 2023 16:28:53 +0100 Subject: [PATCH 11/23] Added a step size hook --- .../adaptivity.py | 2 ++ .../estimate_embedded_error.py | 2 +- pySDC/implementations/hooks/log_step_size.py | 31 +++++++++++++++++++ pySDC/projects/Resilience/hook.py | 12 ++----- 4 files changed, 36 insertions(+), 11 deletions(-) create mode 100644 pySDC/implementations/hooks/log_step_size.py diff --git a/pySDC/implementations/convergence_controller_classes/adaptivity.py b/pySDC/implementations/convergence_controller_classes/adaptivity.py index 06fc27f073..c39bf5fae2 100644 --- a/pySDC/implementations/convergence_controller_classes/adaptivity.py +++ b/pySDC/implementations/convergence_controller_classes/adaptivity.py @@ -7,6 +7,7 @@ BasicRestartingNonMPI, ) from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI +from pySDC.implementations.hooks.log_step_size import LogStepSize class AdaptivityBase(ConvergenceController): @@ -35,6 +36,7 @@ def setup(self, controller, params, description, **kwargs): "control_order": -50, "beta": 0.9, } + controller.add_hook(LogStepSize) return {**defaults, **params} def dependencies(self, controller, description, **kwargs): diff --git a/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py b/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py index eb96956b64..f33f272e29 100644 --- a/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py +++ b/pySDC/implementations/convergence_controller_classes/estimate_embedded_error.py @@ -44,7 +44,7 @@ def get_implementation(cls, flavor): elif flavor == 'nonMPI': return EstimateEmbeddedErrorNonMPI else: - raise NotImplementedError(f'Flavor {flavor} of EmstimateEmbeddedError is not implemented!') + raise NotImplementedError(f'Flavor {flavor} of EstimateEmbeddedError is not implemented!') def setup(self, controller, params, description, **kwargs): """ diff --git a/pySDC/implementations/hooks/log_step_size.py b/pySDC/implementations/hooks/log_step_size.py new file mode 100644 index 0000000000..90e4fbb6de --- /dev/null +++ b/pySDC/implementations/hooks/log_step_size.py @@ -0,0 +1,31 @@ +from pySDC.core.Hooks import hooks + + +class LogStepSize(hooks): + """ + Store the step size at the end of each step as "dt". + """ + + def post_step(self, step, level_number): + """ + Record step size + + Args: + step (pySDC.Step.step): the current step + level_number (int): the current level number + + Returns: + None + """ + # some abbreviations + L = step.levels[level_number] + + self.add_to_stats( + process=step.status.slot, + time=L.time, + level=L.level_index, + iter=step.status.iter, + sweep=L.status.sweep, + type='dt', + value=L.dt, + ) diff --git a/pySDC/projects/Resilience/hook.py b/pySDC/projects/Resilience/hook.py index c1a635e6dd..e84ecd21c5 100644 --- a/pySDC/projects/Resilience/hook.py +++ b/pySDC/projects/Resilience/hook.py @@ -2,9 +2,10 @@ from pySDC.implementations.hooks.log_solution import LogSolution from pySDC.implementations.hooks.log_embedded_error_estimate import LogEmbeddedErrorEstimate from pySDC.implementations.hooks.log_extrapolated_error_estimate import LogExtrapolationErrorEstimate +from pySDC.implementations.hooks.log_step_size import LogStepSize -hook_collection = [LogSolution, LogEmbeddedErrorEstimate, LogExtrapolationErrorEstimate] +hook_collection = [LogSolution, LogEmbeddedErrorEstimate, LogExtrapolationErrorEstimate, LogStepSize] class log_data(hooks): @@ -28,15 +29,6 @@ def post_step(self, step, level_number): L.sweep.compute_end_point() - self.add_to_stats( - process=step.status.slot, - time=L.time, - level=L.level_index, - iter=0, - sweep=L.status.sweep, - type='dt', - value=L.dt, - ) self.add_to_stats( process=step.status.slot, time=L.time, From 02dc5d428b48f5c5d4623f3a5aea2e8cbe2974fd Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Thu, 12 Jan 2023 17:17:03 +0100 Subject: [PATCH 12/23] Adapted controller in project --- .../matrixPFASST/controller_matrix_nonMPI.py | 35 ++++++++++++------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/pySDC/projects/matrixPFASST/controller_matrix_nonMPI.py b/pySDC/projects/matrixPFASST/controller_matrix_nonMPI.py index afdcfed8a0..081ca420d4 100644 --- a/pySDC/projects/matrixPFASST/controller_matrix_nonMPI.py +++ b/pySDC/projects/matrixPFASST/controller_matrix_nonMPI.py @@ -133,7 +133,8 @@ def run(self, u0, t0, Tend): # some initializations and reset of statistics uend = None num_procs = len(self.MS) - self.hooks.reset_stats() + for hook in self.hooks: + hook.reset_stats() assert ( (Tend - t0) / self.dt @@ -152,7 +153,8 @@ def run(self, u0, t0, Tend): # call pre-run hook for S in self.MS: - self.hooks.pre_run(step=S, level_number=0) + for hook in self.hooks: + hook.pre_run(step=S, level_number=0) nblocks = int((Tend - t0) / self.dt / num_procs) @@ -169,9 +171,10 @@ def run(self, u0, t0, Tend): # call post-run hook for S in self.MS: - self.hooks.post_run(step=S, level_number=0) + for hook in self.hooks: + hook.post_run(step=S, level_number=0) - return uend, self.hooks.return_stats() + return uend, self.return_stats() def build_propagation_matrix(self, niter): """ @@ -302,7 +305,8 @@ def pfasst(self, MS): MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='PRE_STEP') for S in MS: - self.hooks.pre_step(step=S, level_number=0) + for hook in self.hooks: + hook.pre_step(step=S, level_number=0) while np.linalg.norm(self.res, np.inf) > self.tol and niter < self.maxiter: @@ -310,14 +314,16 @@ def pfasst(self, MS): MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='PRE_ITERATION') for S in MS: - self.hooks.pre_iteration(step=S, level_number=0) + for hook in self.hooks: + hook.pre_iteration(step=S, level_number=0) if self.nlevels > 1: for _ in range(MS[0].levels[1].params.nsweeps): MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=1, stage='PRE_COARSE_SWEEP') for S in MS: - self.hooks.pre_sweep(step=S, level_number=1) + for hook in self.hooks: + hook.pre_sweep(step=S, level_number=1) self.u += self.Tcf.dot(np.linalg.solve(self.Pc, self.Tfc.dot(self.res))) self.res = self.u0 - self.C.dot(self.u) @@ -326,27 +332,32 @@ def pfasst(self, MS): MS=MS, u=self.u, res=self.res, niter=niter, level=1, stage='POST_COARSE_SWEEP' ) for S in MS: - self.hooks.post_sweep(step=S, level_number=1) + for hook in self.hooks: + hook.post_sweep(step=S, level_number=1) for _ in range(MS[0].levels[0].params.nsweeps): MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='PRE_FINE_SWEEP') for S in MS: - self.hooks.pre_sweep(step=S, level_number=0) + for hook in self.hooks: + hook.pre_sweep(step=S, level_number=0) self.u += np.linalg.solve(self.P, self.res) self.res = self.u0 - self.C.dot(self.u) MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='POST_FINE_SWEEP') for S in MS: - self.hooks.post_sweep(step=S, level_number=0) + for hook in self.hooks: + hook.post_sweep(step=S, level_number=0) MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='POST_ITERATION') for S in MS: - self.hooks.post_iteration(step=S, level_number=0) + for hook in self.hooks: + hook.post_iteration(step=S, level_number=0) MS = self.update_data(MS=MS, u=self.u, res=self.res, niter=niter, level=0, stage='POST_STEP') for S in MS: - self.hooks.post_step(step=S, level_number=0) + for hook in self.hooks: + hook.post_step(step=S, level_number=0) return MS From 226d9dd6e1a5cdf0d5a2fe8f1c7e4f1d6b8381d5 Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Fri, 13 Jan 2023 10:38:27 +0100 Subject: [PATCH 13/23] Dealt with dangerous business of restarts a little bit --- pySDC/core/Hooks.py | 8 ++++++-- pySDC/implementations/hooks/default_hook.py | 19 +++++++++++++++---- .../hooks/log_embedded_error_estimate.py | 3 ++- .../hooks/log_extrapolated_error_estimate.py | 2 ++ pySDC/implementations/hooks/log_solution.py | 3 ++- pySDC/implementations/hooks/log_step_size.py | 3 ++- pySDC/projects/Resilience/hook.py | 7 ++++--- 7 files changed, 33 insertions(+), 12 deletions(-) diff --git a/pySDC/core/Hooks.py b/pySDC/core/Hooks.py index 0f1f597a3f..dfff1045af 100644 --- a/pySDC/core/Hooks.py +++ b/pySDC/core/Hooks.py @@ -7,9 +7,13 @@ class hooks(object): """ Hook class to contain the functions called during the controller runs (e.g. for calling user-routines) + When deriving a custom hook from this class make sure to always call the parent method using e.g. + `super().post_step(step, level_number)`. Otherwise bugs may arise when using `filer_recomputed` from the stats + helper for post processing. + Attributes: - __num_restarts (int): number of restarts of the current step logger: logger instance for output + __num_restarts (int): number of restarts of the current step __stats (dict): dictionary for gathering the statistics of a run __entry (namedtuple): statistics entry containing all information to identify the value """ @@ -120,7 +124,7 @@ def pre_predict(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ - pass + self.__num_restarts = step.status.get('restarts_in_a_row') if step is not None else 0 def pre_step(self, step, level_number): """ diff --git a/pySDC/implementations/hooks/default_hook.py b/pySDC/implementations/hooks/default_hook.py index 1f59b98f1f..dcdd236421 100644 --- a/pySDC/implementations/hooks/default_hook.py +++ b/pySDC/implementations/hooks/default_hook.py @@ -21,13 +21,10 @@ class DefaultHooks(hooks): __t1_sweep (float): private variable to get end time of the sweep __t1_setup (float): private variable to get end time of setup __t1_comm (list): private variable to hold timing of the communication (!) - logger: logger instance for output - __stats (dict): dictionary for gathering the statistics of a run - __entry (namedtuple): statistics entry containing all information to identify the value """ def __init__(self): - super(DefaultHooks, self).__init__() + super().__init__() self.__t0_setup = None self.__t0_run = None self.__t0_predict = None @@ -51,6 +48,7 @@ def pre_setup(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().pre_setup(step, level_number) self.__t0_setup = time.perf_counter() def pre_run(self, step, level_number): @@ -61,6 +59,7 @@ def pre_run(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().pre_run(step, level_number) self.__t0_run = time.perf_counter() def pre_predict(self, step, level_number): @@ -71,6 +70,7 @@ def pre_predict(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().pre_predict(step, level_number) self.__t0_predict = time.perf_counter() def pre_step(self, step, level_number): @@ -81,6 +81,7 @@ def pre_step(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().pre_step(step, level_number) self.__t0_step = time.perf_counter() def pre_iteration(self, step, level_number): @@ -91,6 +92,7 @@ def pre_iteration(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().pre_iteration(step, level_number) self.__t0_iteration = time.perf_counter() def pre_sweep(self, step, level_number): @@ -101,6 +103,7 @@ def pre_sweep(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().pre_sweep(step, level_number) self.__t0_sweep = time.perf_counter() def pre_comm(self, step, level_number): @@ -111,6 +114,7 @@ def pre_comm(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().pre_comm(step, level_number) if len(self.__t0_comm) >= level_number + 1: self.__t0_comm[level_number] = time.perf_counter() else: @@ -131,6 +135,7 @@ def post_comm(self, step, level_number, add_to_stats=False): level_number (int): the current level number add_to_stats (bool): set if result should go to stats object """ + super().post_comm(step, level_number) assert len(self.__t1_comm) >= level_number + 1 self.__t1_comm[level_number] += time.perf_counter() - self.__t0_comm[level_number] @@ -156,6 +161,7 @@ def post_sweep(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().post_sweep(step, level_number) self.__t1_sweep = time.perf_counter() L = step.levels[level_number] @@ -198,6 +204,7 @@ def post_iteration(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().post_iteration(step, level_number) self.__t1_iteration = time.perf_counter() L = step.levels[level_number] @@ -229,6 +236,7 @@ def post_step(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().post_step(step, level_number) self.__t1_step = time.perf_counter() L = step.levels[level_number] @@ -275,6 +283,7 @@ def post_predict(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().post_predict(step, level_number) self.__t1_predict = time.perf_counter() L = step.levels[level_number] @@ -297,6 +306,7 @@ def post_run(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().post_run(step, level_number) self.__t1_run = time.perf_counter() L = step.levels[level_number] @@ -319,6 +329,7 @@ def post_setup(self, step, level_number): step (pySDC.Step.step): the current step level_number (int): the current level number """ + super().post_setup(step, level_number) self.__t1_setup = time.perf_counter() self.add_to_stats( diff --git a/pySDC/implementations/hooks/log_embedded_error_estimate.py b/pySDC/implementations/hooks/log_embedded_error_estimate.py index fb41974411..60fa0c6ea7 100644 --- a/pySDC/implementations/hooks/log_embedded_error_estimate.py +++ b/pySDC/implementations/hooks/log_embedded_error_estimate.py @@ -17,7 +17,8 @@ def post_step(self, step, level_number): Returns: None """ - # some abbreviations + super().post_step(step, level_number) + L = step.levels[level_number] self.add_to_stats( diff --git a/pySDC/implementations/hooks/log_extrapolated_error_estimate.py b/pySDC/implementations/hooks/log_extrapolated_error_estimate.py index 5eea52bca2..1530db9e18 100644 --- a/pySDC/implementations/hooks/log_extrapolated_error_estimate.py +++ b/pySDC/implementations/hooks/log_extrapolated_error_estimate.py @@ -17,6 +17,8 @@ def post_step(self, step, level_number): Returns: None """ + super().post_step(step, level_number) + # some abbreviations L = step.levels[level_number] diff --git a/pySDC/implementations/hooks/log_solution.py b/pySDC/implementations/hooks/log_solution.py index 560985d0e5..9d2d72d2e6 100644 --- a/pySDC/implementations/hooks/log_solution.py +++ b/pySDC/implementations/hooks/log_solution.py @@ -17,7 +17,8 @@ def post_step(self, step, level_number): Returns: None """ - # some abbreviations + super().post_step(step, level_number) + L = step.levels[level_number] L.sweep.compute_end_point() diff --git a/pySDC/implementations/hooks/log_step_size.py b/pySDC/implementations/hooks/log_step_size.py index 90e4fbb6de..62dada9ab0 100644 --- a/pySDC/implementations/hooks/log_step_size.py +++ b/pySDC/implementations/hooks/log_step_size.py @@ -17,7 +17,8 @@ def post_step(self, step, level_number): Returns: None """ - # some abbreviations + super().post_step(step, level_number) + L = step.levels[level_number] self.add_to_stats( diff --git a/pySDC/projects/Resilience/hook.py b/pySDC/projects/Resilience/hook.py index e84ecd21c5..a980e0223c 100644 --- a/pySDC/projects/Resilience/hook.py +++ b/pySDC/projects/Resilience/hook.py @@ -17,6 +17,8 @@ def pre_run(self, step, level_number): """ Record los conditiones initiales """ + super().pre_run(step, level_number) + L = step.levels[level_number] self.add_to_stats(process=0, time=0, level=0, iter=0, sweep=0, type='u0', value=L.u[0]) @@ -24,10 +26,9 @@ def post_step(self, step, level_number): """ Record final solutions as well as step size and error estimates """ - # some abbreviations - L = step.levels[level_number] + super().post_step(step, level_number) - L.sweep.compute_end_point() + L = step.levels[level_number] self.add_to_stats( process=step.status.slot, From f14c47dcc23bad45e047dc0ceb6d04d3dada1f2e Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Fri, 13 Jan 2023 14:03:56 +0100 Subject: [PATCH 14/23] Changed CI pipeline so use secrets from target repository upon PR --- .github/workflows/ci_pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_pipeline.yml b/.github/workflows/ci_pipeline.yml index 6515e8d272..cca64b4a01 100644 --- a/.github/workflows/ci_pipeline.yml +++ b/.github/workflows/ci_pipeline.yml @@ -2,7 +2,7 @@ name: CI pipeline for pySDC on: push: - pull_request: + pull_request_target: schedule: - cron: '1 5 * * 1' From f6b465e6069ac15110d4de1da66da768e18917fe Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Fri, 13 Jan 2023 14:24:25 +0100 Subject: [PATCH 15/23] Added types for `on: pull_request_target` --- .github/workflows/ci_pipeline.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci_pipeline.yml b/.github/workflows/ci_pipeline.yml index cca64b4a01..f552fba70c 100644 --- a/.github/workflows/ci_pipeline.yml +++ b/.github/workflows/ci_pipeline.yml @@ -3,6 +3,7 @@ name: CI pipeline for pySDC on: push: pull_request_target: + types: [opened, edited, closed, reopened, synchronize, ready_for_review, review_requested , review_request_removed, auto_merge_enabled, auto_merge_disabled] schedule: - cron: '1 5 * * 1' From 4a8a5b6b6efd7ac336b6f52dcb61a7164d2f7359 Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Fri, 13 Jan 2023 14:27:48 +0100 Subject: [PATCH 16/23] Reverted previous commit because it didn't help --- .github/workflows/ci_pipeline.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci_pipeline.yml b/.github/workflows/ci_pipeline.yml index f552fba70c..cca64b4a01 100644 --- a/.github/workflows/ci_pipeline.yml +++ b/.github/workflows/ci_pipeline.yml @@ -3,7 +3,6 @@ name: CI pipeline for pySDC on: push: pull_request_target: - types: [opened, edited, closed, reopened, synchronize, ready_for_review, review_requested , review_request_removed, auto_merge_enabled, auto_merge_disabled] schedule: - cron: '1 5 * * 1' From 81d594433d39d2e1cba0430b1d1184d9e0902003 Mon Sep 17 00:00:00 2001 From: Robert Speck Date: Sun, 15 Jan 2023 09:38:18 +0100 Subject: [PATCH 17/23] Update ci_pipeline.yml --- .github/workflows/ci_pipeline.yml | 74 +++++++++++++++---------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/.github/workflows/ci_pipeline.yml b/.github/workflows/ci_pipeline.yml index 6515e8d272..47cd672dc8 100644 --- a/.github/workflows/ci_pipeline.yml +++ b/.github/workflows/ci_pipeline.yml @@ -35,23 +35,23 @@ jobs: run: | flakeheaven lint --benchmark pySDC - mirror_to_gitlab: +# mirror_to_gitlab: - runs-on: ubuntu-latest +# runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v1 +# steps: +# - name: Checkout +# uses: actions/checkout@v1 - - name: Mirror - uses: jakob-fritz/github2lab_action@main - env: - MODE: 'mirror' # Either 'mirror', 'get_status', or 'both' - GITLAB_TOKEN: ${{ secrets.GITLAB_SECRET_H }} - FORCE_PUSH: "true" - GITLAB_HOSTNAME: "codebase.helmholtz.cloud" - GITLAB_PROJECT_ID: "3525" - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} +# - name: Mirror +# uses: jakob-fritz/github2lab_action@main +# env: +# MODE: 'mirror' # Either 'mirror', 'get_status', or 'both' +# GITLAB_TOKEN: ${{ secrets.GITLAB_SECRET_H }} +# FORCE_PUSH: "true" +# GITLAB_HOSTNAME: "codebase.helmholtz.cloud" +# GITLAB_PROJECT_ID: "3525" +# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} user_cpu_tests_linux: runs-on: ubuntu-latest @@ -121,31 +121,31 @@ jobs: pytest --continue-on-collection-errors -v --durations=0 pySDC/tests -m ${{ matrix.env }} - wait_for_gitlab: - runs-on: ubuntu-latest +# wait_for_gitlab: +# runs-on: ubuntu-latest - needs: - - mirror_to_gitlab +# needs: +# - mirror_to_gitlab - steps: - - name: Wait - uses: jakob-fritz/github2lab_action@main - env: - MODE: 'get_status' # Either 'mirror', 'get_status', or 'both' - GITLAB_TOKEN: ${{ secrets.GITLAB_SECRET_H }} - FORCE_PUSH: "true" - GITLAB_HOSTNAME: "codebase.helmholtz.cloud" - GITLAB_PROJECT_ID: "3525" - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} +# steps: +# - name: Wait +# uses: jakob-fritz/github2lab_action@main +# env: +# MODE: 'get_status' # Either 'mirror', 'get_status', or 'both' +# GITLAB_TOKEN: ${{ secrets.GITLAB_SECRET_H }} +# FORCE_PUSH: "true" +# GITLAB_HOSTNAME: "codebase.helmholtz.cloud" +# GITLAB_PROJECT_ID: "3525" +# GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} -# - name: Get and prepare artifacts -# run: | -# pipeline_id=$(curl --header "PRIVATE-TOKEN: ${{ secrets.GITLAB_SECRET_H }}" --silent "https://gitlab.hzdr.de/api/v4/projects/3525/repository/commits/${{ github.head_ref || github.ref_name }}" | jq '.last_pipeline.id') -# job_id=$(curl --header "PRIVATE-TOKEN: ${{ secrets.GITLAB_SECRET_H }}" --silent "https://gitlab.hzdr.de/api/v4/projects/3525/pipelines/$pipeline_id/jobs" | jq '.[] | select( .name == "bundle" ) | select( .status == "success" ) | .id') -# curl --output artifacts.zip "https://gitlab.hzdr.de/api/v4/projects/3525/jobs/$job_id/artifacts" -# rm -rf data -# unzip artifacts.zip -# ls -ratl +# # - name: Get and prepare artifacts +# # run: | +# # pipeline_id=$(curl --header "PRIVATE-TOKEN: ${{ secrets.GITLAB_SECRET_H }}" --silent "https://gitlab.hzdr.de/api/v4/projects/3525/repository/commits/${{ github.head_ref || github.ref_name }}" | jq '.last_pipeline.id') +# # job_id=$(curl --header "PRIVATE-TOKEN: ${{ secrets.GITLAB_SECRET_H }}" --silent "https://gitlab.hzdr.de/api/v4/projects/3525/pipelines/$pipeline_id/jobs" | jq '.[] | select( .name == "bundle" ) | select( .status == "success" ) | .id') +# # curl --output artifacts.zip "https://gitlab.hzdr.de/api/v4/projects/3525/jobs/$job_id/artifacts" +# # rm -rf data +# # unzip artifacts.zip +# # ls -ratl post-processing: @@ -156,7 +156,7 @@ jobs: needs: - lint - user_cpu_tests_linux - - wait_for_gitlab +# - wait_for_gitlab defaults: run: From a83fa6bc65a0b90356ae6eeaea53a0866be041f2 Mon Sep 17 00:00:00 2001 From: Robert Speck Date: Mon, 16 Jan 2023 09:22:49 +0100 Subject: [PATCH 18/23] Update ci_pipeline.yml --- .github/workflows/ci_pipeline.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci_pipeline.yml b/.github/workflows/ci_pipeline.yml index 47cd672dc8..b45cf0b402 100644 --- a/.github/workflows/ci_pipeline.yml +++ b/.github/workflows/ci_pipeline.yml @@ -188,7 +188,10 @@ jobs: run: | pip install genbadge[all] genbadge coverage -i coverage.xml -o htmlcov/coverage-badge.svg - + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + # - name: Generate benchmark report # uses: pancetta/github-action-benchmark@v1 # if: ${{ (!contains(github.event.head_commit.message, '[CI-no-benchmarks]')) && (github.event_name == 'push') }} From 41a1113dc37edd29c636fb2182ee92bbda02fdf2 Mon Sep 17 00:00:00 2001 From: Robert Speck Date: Mon, 16 Jan 2023 09:23:41 +0100 Subject: [PATCH 19/23] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ede34d02c1..f8bbeb3e87 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -78,7 +78,7 @@ pyflakes = [ '-E203', '-E741', '-E402', '-W504', '-W605', '-F401' ] #flake8-black = ["+*"] -flake8-bugbear = ["+*", '-B023'] +flake8-bugbear = ["+*", '-B023', '-B028'] flake8-comprehensions = ["+*", '-C408', '-C417'] [tool.black] From 515936815a0350261e9b843c9ecc2c04b257555c Mon Sep 17 00:00:00 2001 From: Robert Speck Date: Mon, 16 Jan 2023 09:51:52 +0100 Subject: [PATCH 20/23] Update README.rst --- README.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 41ce82469e..96a16259d2 100644 --- a/README.rst +++ b/README.rst @@ -1,5 +1,6 @@ |badge-ga| |badge-ossf| +|badge-cc| Welcome to pySDC! ================= @@ -82,4 +83,6 @@ The project also received help from the `Helmholtz Platform for Research Softwar .. |badge-ga| image:: https://github.com/Parallel-in-Time/pySDC/actions/workflows/ci_pipeline.yml/badge.svg :target: https://github.com/Parallel-in-Time/pySDC/actions/workflows/ci_pipeline.yml .. |badge-ossf| image:: https://bestpractices.coreinfrastructure.org/projects/6909/badge - :target: https://bestpractices.coreinfrastructure.org/projects/6909 \ No newline at end of file + :target: https://bestpractices.coreinfrastructure.org/projects/6909 +.. |badge-cc| image:: https://codecov.io/gh/Parallel-in-Time/pySDC/branch/master/graph/badge.svg?token=hpP18dmtgS + :target: https://codecov.io/gh/Parallel-in-Time/pySDC From 1de8d2f8a0958d0d7f35b4df19971fd2c8ebdadd Mon Sep 17 00:00:00 2001 From: Robert Speck Date: Mon, 16 Jan 2023 09:53:38 +0100 Subject: [PATCH 21/23] Update README.rst --- README.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.rst b/README.rst index 96a16259d2..3cb5437fbf 100644 --- a/README.rst +++ b/README.rst @@ -80,7 +80,7 @@ This project also received funding from the `German Federal Ministry of Educatio The project also received help from the `Helmholtz Platform for Research Software Engineering - Preparatory Study (HiRSE_PS) `_. -.. |badge-ga| image:: https://github.com/Parallel-in-Time/pySDC/actions/workflows/ci_pipeline.yml/badge.svg +.. |badge-ga| image:: https://github.com/Parallel-in-Time/pySDC/actions/workflows/ci_pipeline.yml/badge.svg?branch=master :target: https://github.com/Parallel-in-Time/pySDC/actions/workflows/ci_pipeline.yml .. |badge-ossf| image:: https://bestpractices.coreinfrastructure.org/projects/6909/badge :target: https://bestpractices.coreinfrastructure.org/projects/6909 From 23976b65601a0037c2df880db61eac382d279660 Mon Sep 17 00:00:00 2001 From: Thomas Baumann Date: Mon, 16 Jan 2023 11:16:12 +0100 Subject: [PATCH 22/23] Removing any changes to the CI pipeline from this PR --- .github/workflows/ci_pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci_pipeline.yml b/.github/workflows/ci_pipeline.yml index cca64b4a01..6515e8d272 100644 --- a/.github/workflows/ci_pipeline.yml +++ b/.github/workflows/ci_pipeline.yml @@ -2,7 +2,7 @@ name: CI pipeline for pySDC on: push: - pull_request_target: + pull_request: schedule: - cron: '1 5 * * 1' From e24debc2413d6cc142d5e3d25fbfc9c343948797 Mon Sep 17 00:00:00 2001 From: Robert Speck Date: Mon, 16 Jan 2023 13:08:53 +0100 Subject: [PATCH 23/23] Update README.rst --- README.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/README.rst b/README.rst index 3cb5437fbf..e420177c73 100644 --- a/README.rst +++ b/README.rst @@ -1,6 +1,7 @@ |badge-ga| |badge-ossf| |badge-cc| +|zenodo| Welcome to pySDC! =================