Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pySDC/helpers/blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def ranks(self):
@property
def localBounds(self):
iLocList, nLocList = [], []
for rank, nPoints, nBlocks in zip(self.ranks, self.gridSizes, self.nBlocks):
for rank, nPoints, nBlocks in zip(self.ranks, self.gridSizes, self.nBlocks, strict=True):
n0 = nPoints // nBlocks
nRest = nPoints - nBlocks * n0
nLoc = n0 + 1 * (rank < nRest)
Expand Down
2 changes: 1 addition & 1 deletion pySDC/helpers/fieldsIO.py
Original file line number Diff line number Diff line change
Expand Up @@ -714,7 +714,7 @@ def writeFields_MPI(fileName, dtypeIdx, algo, nSteps, nVar, gridSizes):

iLoc, nLoc = blocks.localBounds
Rectilinear.setupMPI(comm, iLoc, nLoc)
s = [slice(i, i + n) for i, n in zip(iLoc, nLoc)]
s = [slice(i, i + n) for i, n in zip(iLoc, nLoc, strict=True)]
u0 = u0[(slice(None), *s)]

f1 = Rectilinear(DTYPES[dtypeIdx], fileName)
Expand Down
18 changes: 9 additions & 9 deletions pySDC/helpers/transfer_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def next_neighbors_periodic(p, ps, k):

# zip it
value_index = []
for d, i in zip(distance_to_p, range(distance_to_p.size)):
for d, i in zip(distance_to_p, range(distance_to_p.size), strict=True):
value_index.append((d, i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
Expand All @@ -53,7 +53,7 @@ def next_neighbors(p, ps, k):
distance_to_p = np.abs(ps - p)
# zip it
value_index = []
for d, i in zip(distance_to_p, range(distance_to_p.size)):
for d, i in zip(distance_to_p, range(distance_to_p.size), strict=True):
value_index.append((d, i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
Expand All @@ -80,7 +80,7 @@ def continue_periodic_array(arr, nn):
else:
cont_arr = [arr[nn[0]]]
shift = 0.0
for n, d in zip(nn[1:], d_nn):
for n, d in zip(nn[1:], d_nn, strict=True):
if d != 1:
shift = -1
cont_arr.append(arr[n] + shift)
Expand All @@ -107,7 +107,7 @@ def restriction_matrix_1d(fine_grid, coarse_grid, k=2, periodic=False, pad=1):

if periodic:
M = np.zeros((coarse_grid.size, fine_grid.size))
for i, p in zip(range(n_g), coarse_grid):
for i, p in zip(range(n_g), coarse_grid, strict=True):
nn = next_neighbors_periodic(p, fine_grid, k)
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
cont_arr = continue_periodic_array(fine_grid, nn)
Expand All @@ -120,7 +120,7 @@ def restriction_matrix_1d(fine_grid, coarse_grid, k=2, periodic=False, pad=1):
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))
else:
M = np.zeros((coarse_grid.size, fine_grid.size + 2 * pad))
for i, p in zip(range(n_g), coarse_grid):
for i, p in zip(range(n_g), coarse_grid, strict=True):
padded_f_grid = border_padding(fine_grid, pad, pad)
nn = next_neighbors(p, padded_f_grid, k)
# construct the lagrange polynomials for the k neighbors
Expand Down Expand Up @@ -158,7 +158,7 @@ def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, periodic=False, pad=1,
M = np.zeros((fine_grid.size, coarse_grid.size))

if equidist_nested:
for i, p in zip(range(n_f), fine_grid):
for i, p in zip(range(n_f), fine_grid, strict=True):
if i % 2 == 0:
M[i, int(i / 2)] = 1.0
else:
Expand Down Expand Up @@ -189,7 +189,7 @@ def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, periodic=False, pad=1,
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))

else:
for i, p in zip(range(n_f), fine_grid):
for i, p in zip(range(n_f), fine_grid, strict=True):
nn = next_neighbors_periodic(p, coarse_grid, k)
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
cont_arr = continue_periodic_array(coarse_grid, nn)
Expand All @@ -208,7 +208,7 @@ def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, periodic=False, pad=1,
padded_c_grid = border_padding(coarse_grid, pad, pad)

if equidist_nested:
for i, p in zip(range(n_f), fine_grid):
for i, p in zip(range(n_f), fine_grid, strict=True):
if i % 2 != 0:
M[i, int((i - 1) / 2) + 1] = 1.0
else:
Expand All @@ -231,7 +231,7 @@ def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, periodic=False, pad=1,
M[i, nn] = np.asarray(list(map(lambda x: x(p), bary_pol)))

else:
for i, p in zip(range(n_f), fine_grid):
for i, p in zip(range(n_f), fine_grid, strict=True):
nn = next_neighbors(p, padded_c_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0] + [0.0] * (k - 1))
Expand Down
2 changes: 1 addition & 1 deletion pySDC/helpers/vtkIO.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def field(u):
return numpy_support.numpy_to_vtk(num_array=u.ravel(order='F'), deep=True, array_type=vtk.VTK_FLOAT)

pointData = vtr.GetPointData()
for name, u in zip(varNames, data):
for name, u in zip(varNames, data, strict=True):
uVTK = field(u)
uVTK.SetName(name)
pointData.AddArray(uVTK)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def __init__(
N = self.fft.global_shape()
k = [np.fft.fftfreq(n, 1.0 / n).astype(int) for n in N[:-1]]
k.append(np.fft.rfftfreq(N[-1], 1.0 / N[-1]).astype(int))
K = [ki[si] for ki, si in zip(k, s)]
K = [ki[si] for ki, si in zip(k, s, strict=True)]
Ks = list(np.meshgrid(*K, indexing='ij', sparse=True))
Lp = 2 * np.pi / L
for i in range(ndim):
Expand Down
2 changes: 1 addition & 1 deletion pySDC/implementations/problem_classes/Brusselator.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def plot(self, u, t=None, fig=None): # pragma: no cover

vmin = u.min()
vmax = u.max()
for i, label in zip([self.iU, self.iV], [r'$u$', r'$v$']):
for i, label in zip([self.iU, self.iV], [r'$u$', r'$v$'], strict=True):
im = axs[i].pcolormesh(self.X[0], self.X[1], u[i], vmin=vmin, vmax=vmax)
axs[i].set_aspect(1)
axs[i].set_title(label)
Expand Down
2 changes: 1 addition & 1 deletion pySDC/implementations/problem_classes/Burgers.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ def plot(self, u, t=None, fig=None, vmin=None, vmax=None): # pragma: no cover
imV = axs[1].pcolormesh(self.X, self.Z, u[iv].real, vmin=vmin, vmax=vmax)
imVort = axs[2].pcolormesh(self.X, self.Z, self.compute_vorticity(u).real)

for i, label in zip([0, 1, 2], [r'$u$', '$v$', 'vorticity']):
for i, label in zip([0, 1, 2], [r'$u$', '$v$', 'vorticity'], strict=True):
axs[i].set_aspect(1)
axs[i].set_title(label)

Expand Down
2 changes: 1 addition & 1 deletion pySDC/implementations/problem_classes/GrayScott_MPIFFT.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def plot(self, u, t=None, fig=None): # pragma: no cover

vmin = u.min()
vmax = u.max()
for i, label in zip([self.iU, self.iV], [r'$u$', r'$v$']):
for i, label in zip([self.iU, self.iV], [r'$u$', r'$v$'], strict=True):
im = axs[i].pcolormesh(self.X[0], self.X[1], u[i], vmin=vmin, vmax=vmax)
axs[i].set_aspect(1)
axs[i].set_title(label)
Expand Down
3 changes: 2 additions & 1 deletion pySDC/implementations/problem_classes/RayleighBenard.py
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,7 @@ def plot(self, u, t=None, fig=None, quantity='T'): # pragma: no cover

imT = axs[0].pcolormesh(self.X, self.Z, u[self.index(quantity)].real)

for i, label in zip([0, 1], [rf'${quantity}$', 'vorticity']):
for i, label in zip([0, 1], [rf'${quantity}$', 'vorticity'], strict=True):
axs[i].set_aspect(1)
axs[i].set_title(label)

Expand Down Expand Up @@ -630,6 +630,7 @@ def post_step(self, step, level_number):
for key, value in zip(
['Nusselt', 'buoyancy_production', 'viscous_dissipation'],
[Nusselt, buoyancy_production, viscous_dissipation],
strict=True,
):
self.add_to_stats(
process=step.status.slot,
Expand Down
6 changes: 3 additions & 3 deletions pySDC/implementations/problem_classes/RayleighBenard3D.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def eval_f(self, u, *args, **kwargs):

fexpl_pad = self.xp.zeros_like(u_pad)
for i in derivative_indices:
for i_vel, iD in zip([iu, iv, iw], range(self.ndim)):
for i_vel, iD in zip([iu, iv, iw], range(self.ndim), strict=True):
fexpl_pad[i] -= u_pad[i_vel] * derivatives[iD][i]

if self.spectral_space:
Expand Down Expand Up @@ -398,7 +398,7 @@ def get_frequency_spectrum(self, u):

# compute local spectrum
local_spectrum = self.xp.empty(shape=(2, energy.shape[3], n_k))
for i, k in zip(range(n_k), unique_k):
for i, k in zip(range(n_k), unique_k, strict=True):
mask = xp.logical_or(abs_kx == k, abs_ky == k)
local_spectrum[..., i] = xp.sum(energy[indices, mask, :], axis=1)

Expand All @@ -411,7 +411,7 @@ def get_frequency_spectrum(self, u):

spectra = self.comm.allgather(local_spectrum)
spectrum = self.xp.zeros(shape=(2, self.axes[2].N, n_k_all))
for ks, _spectrum in zip(k_all, spectra):
for ks, _spectrum in zip(k_all, spectra, strict=True):
ks = list(ks)
unique_k_all = list(unique_k_all)
for k in ks:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def getLaplacian(self):
s = self.fft.local_slice()
N = self.fft.global_shape()
k = [self.xp.fft.fftfreq(n, 1.0 / n).astype(int) for n in N]
K = [ki[si] for ki, si in zip(k, s)]
K = [ki[si] for ki, si in zip(k, s, strict=True)]
Ks = list(self.xp.meshgrid(*K, indexing='ij', sparse=True))
Lp = 2 * np.pi / self.L
for i in range(self.ndim):
Expand Down
11 changes: 6 additions & 5 deletions pySDC/implementations/sweeper_classes/Runge_Kutta.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,16 +279,16 @@ def compute_end_point(self):
lvl.uend = lvl.prob.dtype_u(lvl.u[-1])
if type(self.coll) == ButcherTableauEmbedded:
self.u_secondary = lvl.prob.dtype_u(lvl.u[0])
for w2, k in zip(self.coll.weights[1], lvl.f[1:]):
for w2, k in zip(self.coll.weights[1], lvl.f[1:], strict=True):
self.u_secondary += lvl.dt * w2 * k
else:
lvl.uend = lvl.prob.dtype_u(lvl.u[0])
if type(self.coll) == ButcherTableau:
for w, k in zip(self.coll.weights, lvl.f[1:]):
for w, k in zip(self.coll.weights, lvl.f[1:], strict=True):
lvl.uend += lvl.dt * w * k
elif type(self.coll) == ButcherTableauEmbedded:
self.u_secondary = lvl.prob.dtype_u(lvl.u[0])
for w1, w2, k in zip(self.coll.weights[0], self.coll.weights[1], lvl.f[1:]):
for w1, w2, k in zip(self.coll.weights[0], self.coll.weights[1], lvl.f[1:], strict=True):
lvl.uend += lvl.dt * w1 * k
self.u_secondary += lvl.dt * w2 * k

Expand Down Expand Up @@ -459,12 +459,12 @@ def compute_end_point(self):
lvl.uend = lvl.u[-1]
if type(self.coll) == ButcherTableauEmbedded:
self.u_secondary = lvl.prob.dtype_u(lvl.u[0])
for w2, w2E, k in zip(self.coll.weights[1], self.coll_explicit.weights[1], lvl.f[1:]):
for w2, w2E, k in zip(self.coll.weights[1], self.coll_explicit.weights[1], lvl.f[1:], strict=True):
self.u_secondary += lvl.dt * (w2 * k.impl + w2E * k.expl)
else:
lvl.uend = lvl.prob.dtype_u(lvl.u[0])
if type(self.coll) == ButcherTableau:
for w, wE, k in zip(self.coll.weights, self.coll_explicit.weights, lvl.f[1:]):
for w, wE, k in zip(self.coll.weights, self.coll_explicit.weights, lvl.f[1:], strict=True):
lvl.uend += lvl.dt * (w * k.impl + wE * k.expl)
elif type(self.coll) == ButcherTableauEmbedded:
self.u_secondary = lvl.u[0].copy()
Expand All @@ -474,6 +474,7 @@ def compute_end_point(self):
self.coll_explicit.weights[0],
self.coll_explicit.weights[1],
lvl.f[1:],
strict=True,
):
lvl.uend += lvl.dt * (w1 * k.impl + w1E * k.expl)
self.u_secondary += lvl.dt * (w2 * k.impl + w2E * k.expl)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def __init__(self, fine_prob, coarse_prob, params):

Nf = list(self.fine_prob.fft.global_shape())
Nc = list(self.coarse_prob.fft.global_shape())
self.ratio = [int(nf / nc) for nf, nc in zip(Nf, Nc)]
self.ratio = [int(nf / nc) for nf, nc in zip(Nf, Nc, strict=True)]
axes = tuple(range(len(Nf)))

fft_args = {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def run_simulation(name='', spectral=None, nprocs_space=None):
# print and store radii and error over time
err_test = 0.0
results = dict()
for cr, er, cv, ev in zip(computed_radii, exact_radii, computed_vol, exact_vol):
for cr, er, cv, ev in zip(computed_radii, exact_radii, computed_vol, exact_vol, strict=True):
if name == 'AC-test-noforce':
exrad = er[1]
exvol = ev[1]
Expand Down
2 changes: 1 addition & 1 deletion pySDC/projects/AllenCahn_Bayreuth/visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def plot_data(name=''):
json_files = sorted(glob.glob(f'./data/{name}_*.json'))
data_files = sorted(glob.glob(f'./data/{name}_*.dat'))

for json_file, data_file in zip(json_files, data_files):
for json_file, data_file in zip(json_files, data_files, strict=True):
with open(json_file, 'r') as fp:
obj = json.load(fp)

Expand Down
4 changes: 2 additions & 2 deletions pySDC/projects/AllenCahn_Bayreuth/visualize_temp.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def plot_data(path='./data', name='', output='.'):
json_files = sorted(glob.glob(f'{path}/{name}_*.json'))
data_files = sorted(glob.glob(f'{path}/{name}_*.dat'))

for json_file, data_file in zip(json_files, data_files):
for json_file, data_file in zip(json_files, data_files, strict=True):
with open(json_file, 'r') as fp:
obj = json.load(fp)

Expand Down Expand Up @@ -74,7 +74,7 @@ def make_movie(path='./data', name='', output='.'):
data_files = sorted(glob.glob(f'{path}/{name}_*.dat'))

img_list = []
for json_file, data_file in zip(json_files, data_files):
for json_file, data_file in zip(json_files, data_files, strict=True):
with open(json_file, 'r') as fp:
obj = json.load(fp)

Expand Down
2 changes: 1 addition & 1 deletion pySDC/projects/AsympConv/conv_test_to0.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def compute_and_plot_specrad(Nnodes, lam):
color_list = ['red', 'blue']
marker_list = ['s', 'o']

setup_list = zip(Nsteps_list, color_list, marker_list)
setup_list = zip(Nsteps_list, color_list, marker_list, strict=True)

xlist = [0.1**i for i in range(11)]

Expand Down
2 changes: 1 addition & 1 deletion pySDC/projects/AsympConv/conv_test_toinf.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def compute_and_plot_specrad(Nnodes, lam):
color_list = ['red', 'blue', 'green']
marker_list = ['s', 'o', 'd']

setup_list = zip(Nsweep_list, color_list, marker_list)
setup_list = zip(Nsweep_list, color_list, marker_list, strict=True)

xlist = [10**i for i in range(11)]

Expand Down
3 changes: 2 additions & 1 deletion pySDC/projects/DAE/plotting/loglog_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ def plot_convergence(): # pragma: no cover
ylim = (sys.float_info.max, sys.float_info.min)

for num_nodes, color, shape, style, order in zip(
num_nodes_list, color_list, shape_list, style_list, order_list
num_nodes_list, color_list, shape_list, style_list, order_list, strict=True
):
# Plot convergence data
ax.loglog(
Expand Down Expand Up @@ -99,6 +99,7 @@ def plot_convergence(): # pragma: no cover
data[qd_type][num_nodes]['dt'],
data[qd_type][num_nodes]['position'],
data[qd_type][num_nodes]['offset'],
strict=True,
):
ax.annotate(
niter,
Expand Down
2 changes: 1 addition & 1 deletion pySDC/projects/DAE/plotting/semilogy_plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def plot_convergence(): # pragma: no cover
lns1 = list()
lns2 = list()

for num_nodes, color, shape in zip(num_nodes_list, color_list, shape_list):
for num_nodes, color, shape in zip(num_nodes_list, color_list, shape_list, strict=True):
# Plot convergence data
lns1.append(
ax1.semilogy(
Expand Down
2 changes: 1 addition & 1 deletion pySDC/projects/DAE/problems/wscc9BusSystem.py
Original file line number Diff line number Diff line change
Expand Up @@ -866,7 +866,7 @@ def __init__(self, newton_tol=1e-10):
self.IC6 = [row[3] for row in self.bus] # Column 4 in MATLAB is indexed as 3 in Python
self.IC6 = [val / self.baseMVA for val in self.IC6]

self.IC = list(zip(self.IC1, self.IC2, self.IC3, self.IC4, self.IC5, self.IC6))
self.IC = list(zip(self.IC1, self.IC2, self.IC3, self.IC4, self.IC5, self.IC6, strict=True))

self.PL = [row[4] for row in self.IC] # Column 5 in MATLAB is indexed as 4 in Python
self.QL = [row[5] for row in self.IC] # Column 6 in MATLAB is indexed as 5 in Python
Expand Down
2 changes: 1 addition & 1 deletion pySDC/projects/DAE/run/synchronous_machine_playground.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def main():
sol_data = np.array(
[
[(sol[j][1].diff[id], sol[j][1].alg[ia]) for j in range(len(sol))]
for id, ia in zip(range(len(uend.diff)), range(len(uend.alg)))
for id, ia in zip(range(len(uend.diff)), range(len(uend.alg)), strict=True)
]
)
niter = filter_stats(stats, type='niter')
Expand Down
2 changes: 1 addition & 1 deletion pySDC/projects/GPU/analysis_scripts/plot_RBC_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def plot_preconditioners(): # pragma: no cover

fig, axs = plt.subplots(1, 4, figsize=figsize_by_journal('TUHH_thesis', 1, 0.4), sharex=True, sharey=True)

for M, ax in zip([A, A_b, A_r, A_l], axs):
for M, ax in zip([A, A_b, A_r, A_l], axs, strict=True):
ax.imshow((M / abs(M)).real + (M / abs(M)).imag, rasterized=False, cmap='Spectral')

for ax in axs:
Expand Down
6 changes: 3 additions & 3 deletions pySDC/projects/GPU/analysis_scripts/plot_large_simulations.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def prob(self):

def plot_work(self): # pragma: no cover
fig, ax = self.get_fig()
for key, label in zip(['factorizations', 'rhs'], ['LU decompositions', 'rhs evaluations']):
for key, label in zip(['factorizations', 'rhs'], ['LU decompositions', 'rhs evaluations'], strict=True):
work = get_sorted(self.stats, type=f'work_{key}')
ax.plot([me[0] for me in work], np.cumsum([4 * me[1] for me in work]), label=fr'\#{label}')
ax.set_yscale('log')
Expand Down Expand Up @@ -191,7 +191,7 @@ def get_CFL_limit(self, recompute=False):

def plot_work(self): # pragma: no cover
fig, ax = self.get_fig()
for key, label in zip(['factorizations', 'rhs'], ['LU decompositions', 'rhs evaluations']):
for key, label in zip(['factorizations', 'rhs'], ['LU decompositions', 'rhs evaluations'], strict=True):
work = get_sorted(self.stats, type=f'work_{key}')
ax.plot([me[0] for me in work], np.cumsum([4 * me[1] for me in work]), label=fr'\#{label}')
ax.set_yscale('log')
Expand Down Expand Up @@ -251,7 +251,7 @@ def plot_single(idx, ax): # pragma: no cover
im = ax.pcolormesh(X[r], Z[r], data['u'][2], vmin=0, vmax=2, cmap='plasma', rasterized=True), data['t']
return im

for i, ax in zip(indices, axs):
for i, ax in zip(indices, axs, strict=True):
im, t = plot_single(i, ax)
fig.colorbar(im, caxs[ax], label=f'$T(t={{{t:.1f}}})$')

Expand Down
2 changes: 1 addition & 1 deletion pySDC/projects/GPU/run_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def plot_series(args, config): # pragma: no cover

idxs = np.linspace(0, config.num_frames * 0.9, 9, dtype=int)

for idx, ax in zip(idxs, axs.flatten()):
for idx, ax in zip(idxs, axs.flatten(), strict=True):
try:
_fig = config.plot(P=P, idx=idx, n_procs_list=args['procs'], ax=ax)
except FileNotFoundError:
Expand Down
Loading
Loading