Skip to content

Commit

Permalink
Apply formatting changes from black v23 (#325)
Browse files Browse the repository at this point in the history
  • Loading branch information
dnerini committed Mar 17, 2023
1 parent 18d87c8 commit 16d9920
Show file tree
Hide file tree
Showing 39 changed files with 38 additions and 131 deletions.
1 change: 0 additions & 1 deletion examples/advection_correction.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,6 @@ def advection_correction(R, T=5, t=1):
np.arange(R[0].shape[1], dtype=float), np.arange(R[0].shape[0], dtype=float)
)
for i in range(t, T + t, t):

pos1 = (y - i / T * V[1], x - i / T * V[0])
R1 = map_coordinates(R[0], pos1, order=1)

Expand Down
1 change: 1 addition & 0 deletions examples/anvil_nowcast.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,7 @@
refobs_field, metadata = utils.to_rainrate(refobs_field[-1], metadata)
refobs_field[refobs_field < 0.5] = 0.0


###############################################################################
# Plot the extrapolation, S-PROG and ANVIL nowcasts.
# --------------------------------------------------
Expand Down
1 change: 0 additions & 1 deletion examples/blended_forecast.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,6 @@
leadtimes_min = [30, 60, 90, 120, 150, 180]
n_leadtimes = len(leadtimes_min)
for n, leadtime in enumerate(leadtimes_min):

# Nowcast with blending into NWP
plt.subplot(n_leadtimes, 2, n * 2 + 1)
plot_precip_field(
Expand Down
2 changes: 1 addition & 1 deletion examples/data_transformations.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,10 @@
# Test data transformations
# -------------------------


# Define method to visualize the data distribution with boxplots and plot the
# corresponding skewness
def plot_distribution(data, labels, skw):

N = len(data)
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
Expand Down
64 changes: 34 additions & 30 deletions examples/my_first_nowcast.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
},
"outputs": [],
"source": [
"# These libraries are needed for the pygrib library in Colab. \n",
"# These libraries are needed for the pygrib library in Colab.\n",
"# Note that is needed if you install pygrib using pip.\n",
"# If you use conda, the libraries will be installed automatically.\n",
"! apt-get install libeccodes-dev libproj-dev\n",
Expand All @@ -68,7 +68,7 @@
"source": [
"# Uninstall existing shapely\n",
"# We will re-install shapely in the next step by ignoring the binary\n",
"# wheels to make it compatible with other modules that depend on \n",
"# wheels to make it compatible with other modules that depend on\n",
"# GEOS, such as Cartopy (used here).\n",
"!pip uninstall --yes shapely"
]
Expand All @@ -83,7 +83,7 @@
},
"outputs": [],
"source": [
"# To install cartopy in Colab using pip, we need to install the library \n",
"# To install cartopy in Colab using pip, we need to install the library\n",
"# dependencies first.\n",
"\n",
"!apt-get install -qq libgdal-dev libgeos-dev\n",
Expand Down Expand Up @@ -170,9 +170,9 @@
},
"outputs": [],
"source": [
"# If the configuration file is placed in one of the default locations \n",
"# (https://pysteps.readthedocs.io/en/latest/user_guide/set_pystepsrc.html#configuration-file-lookup) \n",
"# it will be loaded automatically when pysteps is imported. \n",
"# If the configuration file is placed in one of the default locations\n",
"# (https://pysteps.readthedocs.io/en/latest/user_guide/set_pystepsrc.html#configuration-file-lookup)\n",
"# it will be loaded automatically when pysteps is imported.\n",
"config_file_path = create_default_pystepsrc(\"pysteps_data\")"
]
},
Expand All @@ -198,6 +198,7 @@
"source": [
"# Import pysteps and load the new configuration file\n",
"import pysteps\n",
"\n",
"_ = pysteps.load_config_file(config_file_path, verbose=True)"
]
},
Expand All @@ -224,7 +225,8 @@
"source": [
"# The default parameters are stored in pysteps.rcparams.\n",
"from pprint import pprint\n",
"pprint(pysteps.rcparams.data_sources['mrms'])"
"\n",
"pprint(pysteps.rcparams.data_sources[\"mrms\"])"
]
},
{
Expand Down Expand Up @@ -272,7 +274,9 @@
"start_time = time.time()\n",
"\n",
"# Import the data\n",
"precipitation, metadata, timestep = load_dataset('mrms',frames=35) # precipitation in mm/h\n",
"precipitation, metadata, timestep = load_dataset(\n",
" \"mrms\", frames=35\n",
") # precipitation in mm/h\n",
"\n",
"end_time = time.time()\n",
"\n",
Expand Down Expand Up @@ -330,7 +334,7 @@
},
"outputs": [],
"source": [
"timestep # In minutes"
"timestep # In minutes"
]
},
{
Expand Down Expand Up @@ -450,25 +454,25 @@
"\n",
"# Let's define some plotting default parameters for the next plots\n",
"# Note: This is not strictly needed.\n",
"plt.rc('figure', figsize=(4,4))\n",
"plt.rc('figure', dpi=100)\n",
"plt.rc('font', size=14) # controls default text sizes\n",
"plt.rc('axes', titlesize=14) # fontsize of the axes title\n",
"plt.rc('axes', labelsize=14) # fontsize of the x and y labels\n",
"plt.rc('xtick', labelsize=14) # fontsize of the tick labels\n",
"plt.rc('ytick', labelsize=14) # fontsize of the tick labels\n",
"plt.rc(\"figure\", figsize=(4, 4))\n",
"plt.rc(\"figure\", dpi=100)\n",
"plt.rc(\"font\", size=14) # controls default text sizes\n",
"plt.rc(\"axes\", titlesize=14) # fontsize of the axes title\n",
"plt.rc(\"axes\", labelsize=14) # fontsize of the x and y labels\n",
"plt.rc(\"xtick\", labelsize=14) # fontsize of the tick labels\n",
"plt.rc(\"ytick\", labelsize=14) # fontsize of the tick labels\n",
"\n",
"# Let's use the last available composite for nowcasting from the \"training\" data (train_precip[-1])\n",
"# Also, we will discard any invalid value.\n",
"valid_precip_values = train_precip[-1][~np.isnan(train_precip[-1])]\n",
"\n",
"# Plot the histogram\n",
"bins= np.concatenate( ([-0.01,0.01], np.linspace(1,40,39)))\n",
"plt.hist(valid_precip_values,bins=bins,log=True, edgecolor='black')\n",
"plt.autoscale(tight=True, axis='x')\n",
"bins = np.concatenate(([-0.01, 0.01], np.linspace(1, 40, 39)))\n",
"plt.hist(valid_precip_values, bins=bins, log=True, edgecolor=\"black\")\n",
"plt.autoscale(tight=True, axis=\"x\")\n",
"plt.xlabel(\"Rainfall intensity [mm/h]\")\n",
"plt.ylabel(\"Counts\")\n",
"plt.title('Precipitation rain rate histogram in mm/h units')\n",
"plt.title(\"Precipitation rain rate histogram in mm/h units\")\n",
"plt.show()"
]
},
Expand Down Expand Up @@ -507,11 +511,11 @@
"source": [
"from pysteps.utils import transformation\n",
"\n",
"# Log-transform the data to dBR. \n",
"# Log-transform the data to dBR.\n",
"# The threshold of 0.1 mm/h sets the fill value to -15 dBR.\n",
"train_precip_dbr, metadata_dbr = transformation.dB_transform(train_precip, metadata, \n",
" threshold=0.1, \n",
" zerovalue=-15.0)"
"train_precip_dbr, metadata_dbr = transformation.dB_transform(\n",
" train_precip, metadata, threshold=0.1, zerovalue=-15.0\n",
")"
]
},
{
Expand Down Expand Up @@ -554,7 +558,7 @@
"\n",
"# We will only use one composite to fit the function to speed up things.\n",
"# First, remove the no precip areas.\"\n",
"precip_to_fit = valid_precip_dbr[valid_precip_dbr > -15] \n",
"precip_to_fit = valid_precip_dbr[valid_precip_dbr > -15]\n",
"\n",
"fit_params = scipy.stats.lognorm.fit(precip_to_fit)\n",
"\n",
Expand Down Expand Up @@ -617,7 +621,7 @@
"plt.title(\"Estimated motion field with the Lukas-Kanade algorithm\")\n",
"\n",
"# Plot the last rainfall field in the \"training\" data.\n",
"# Remember to use the mm/h precipitation data since plot_precip_field assumes \n",
"# Remember to use the mm/h precipitation data since plot_precip_field assumes\n",
"# mm/h by default. You can change this behavior using the \"units\" keyword.\n",
"plot_precip_field(train_precip[-1], geodata=metadata, axis=\"off\")\n",
"\n",
Expand Down Expand Up @@ -662,7 +666,7 @@
"\n",
"last_observation[~np.isfinite(last_observation)] = metadata[\"zerovalue\"]\n",
"\n",
"# We set the number of leadtimes (the length of the forecast horizon) to the \n",
"# We set the number of leadtimes (the length of the forecast horizon) to the\n",
"# length of the observed/verification preipitation data. In this way, we'll get\n",
"# a forecast that covers these time intervals.\n",
"n_leadtimes = observed_precip.shape[0]\n",
Expand Down Expand Up @@ -746,7 +750,7 @@
" 64,\n",
"] # In grid points.\n",
"\n",
"scales_in_km = np.array(scales)*4\n",
"scales_in_km = np.array(scales) * 4\n",
"\n",
"# Set the threshold\n",
"thr = 1.0 # in mm/h\n",
Expand All @@ -764,13 +768,13 @@
"\n",
"# Now plot it\n",
"plt.figure()\n",
"x = np.arange(1, n_leadtimes+1) * timestep\n",
"x = np.arange(1, n_leadtimes + 1) * timestep\n",
"plt.plot(x, score, lw=2.0)\n",
"plt.xlabel(\"Lead time [min]\")\n",
"plt.ylabel(\"FSS ( > 1.0 mm/h ) \")\n",
"plt.title(\"Fractions Skill Score\")\n",
"plt.legend(\n",
" scales_in_km, \n",
" scales_in_km,\n",
" title=\"Scale [km]\",\n",
" loc=\"center left\",\n",
" bbox_to_anchor=(1.01, 0.5),\n",
Expand Down
1 change: 1 addition & 0 deletions examples/optical_flow_methods_convergence.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@
# The "precipitation region" includes the precipitation pattern plus a margin of
# approximately 20 grid points.


################################################################################
# Let's create a function to construct different motion fields.
def create_motion_field(input_precip, motion_type):
Expand Down
1 change: 0 additions & 1 deletion examples/plot_linear_blending.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,6 @@
leadtimes_min = [30, 60, 80, 100, 120]
n_leadtimes = len(leadtimes_min)
for n, leadtime in enumerate(leadtimes_min):

# Extrapolation
plt.subplot(n_leadtimes, 4, n * 4 + 1)
plot_precip_field(
Expand Down
1 change: 0 additions & 1 deletion examples/rainfarm_downscale.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,6 @@
# half or double the estimated slope.
alpha = None
for n in range(num_realizations):

# Spectral slope estimated from the upscaled field
precip_hr, alpha = rainfarm.downscale(
precip_lr, ds_factor=scale_factor, alpha=alpha, return_alpha=True
Expand Down
1 change: 0 additions & 1 deletion pysteps/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,6 @@ def config_fname():

file_name = None
for file_name in _fconfig_candidates_generator():

if file_name is not None:
if os.path.exists(file_name):
st_mode = os.stat(file_name).st_mode
Expand Down
1 change: 0 additions & 1 deletion pysteps/blending/linear_blending.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ def forecast(
saliency=False,
nowcast_kwargs=None,
):

"""Generate a forecast by linearly or saliency-based blending of nowcasts with NWP data
Parameters
Expand Down
3 changes: 0 additions & 3 deletions pysteps/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,6 @@ def _print(self, msg):
sys.stdout.write(msg)

def __call__(self, count, block_size, total_size, exact=True):

self._clear_line()

downloaded_size = count * block_size / (1024**2)
Expand Down Expand Up @@ -245,7 +244,6 @@ def delay(_counter):

counter = 0
while current_date <= final_date:

counter = delay(counter)

sub_dir = os.path.join(dir_path, datetime.strftime(current_date, "%Y/%m/%d"))
Expand Down Expand Up @@ -393,7 +391,6 @@ def create_default_pystepsrc(
dest_path = os.path.join(config_dir, file_name)

if not dryrun:

if not os.path.isdir(config_dir):
os.makedirs(config_dir)

Expand Down
1 change: 0 additions & 1 deletion pysteps/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ def postprocess_import(fillna=np.nan, dtype="double"):
def _postprocess_import(importer):
@wraps(importer)
def _import_with_postprocessing(*args, **kwargs):

precip, *other_args = importer(*args, **kwargs)

_dtype = kwargs.get("dtype", dtype)
Expand Down
4 changes: 0 additions & 4 deletions pysteps/io/importers.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,6 @@ def _check_coords_range(selected_range, coordinate, full_range):
return sorted(full_range)

if not isinstance(selected_range, (list, tuple)):

if len(selected_range) != 2:
raise ValueError(
f"The {coordinate} range must be None or a two-element tuple or list"
Expand Down Expand Up @@ -943,7 +942,6 @@ def import_mch_gif(filename, product, unit, accutime, **kwargs):
img = Image.open(filename)

if product.lower() in ["azc", "rzc", "precip"]:

# convert 8-bit GIF colortable to RGB values
img_rgb = img.convert("RGB")

Expand Down Expand Up @@ -974,7 +972,6 @@ def import_mch_gif(filename, product, unit, accutime, **kwargs):
precip[precip > 9999] = np.nan

elif product.lower() in ["aqc", "cpc", "acquire ", "combiprecip"]:

# convert digital numbers to physical values
img = np.array(img).astype(int)

Expand Down Expand Up @@ -1546,7 +1543,6 @@ def import_saf_crri(filename, extent=None, **kwargs):
metadata["y2"] = ycoord[idx_y].max() + metadata["ypixelsize"] / 2

else:

idx_x = None
idx_y = None

Expand Down
1 change: 0 additions & 1 deletion pysteps/motion/lucaskanade.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,6 @@ def dense_lucaskanade(
xy = np.empty(shape=(0, 2))
uv = np.empty(shape=(0, 2))
for n in range(nr_fields - 1):

# extract consecutive images
prvs_img = input_images[n, :, :].copy()
next_img = input_images[n + 1, :, :].copy()
Expand Down
3 changes: 0 additions & 3 deletions pysteps/motion/vet.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,6 @@ def debug_print(*args, **kwargs):
sectors = numpy.asarray(sectors, dtype="int", order="C")

if sectors.ndim == 1:

new_sectors = numpy.zeros(
(2,) + sectors.shape, dtype="int", order="C"
) + sectors.reshape((1, sectors.shape[0]))
Expand Down Expand Up @@ -545,13 +544,11 @@ def debug_print(*args, **kwargs):
previous_sectors_in_j = sectors[1, 0]

for n, (sectors_in_i, sectors_in_j) in enumerate(zip(sectors[0, :], sectors[1, :])):

# Minimize for each sector size
pad_i = get_padding(input_images.shape[1], sectors_in_i)
pad_j = get_padding(input_images.shape[2], sectors_in_j)

if (pad_i != (0, 0)) or (pad_j != (0, 0)):

_input_images = numpy.pad(input_images, ((0, 0), pad_i, pad_j), "edge")

_mask = numpy.pad(mask, (pad_i, pad_j), "constant", constant_values=1)
Expand Down
6 changes: 0 additions & 6 deletions pysteps/noise/fftgenerators.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,6 @@ def initialize_param_2d_fft_filter(field, **kwargs):
tapering = np.ones((M, N))

if model.lower() == "power-law":

# compute average 2D PSD
F = np.zeros((M, N), dtype=complex)
for i in range(nr_fields):
Expand Down Expand Up @@ -526,7 +525,6 @@ def initialize_nonparam_2d_ssft_filter(field, **kwargs):
for i in range(F.shape[0]):
# loop columns
for j in range(F.shape[1]):

# compute indices of local window
idxi[0] = int(np.max((i * win_size[0] - overlap * win_size[0], 0)))
idxi[1] = int(
Expand Down Expand Up @@ -657,16 +655,13 @@ def initialize_nonparam_2d_nested_filter(field, gridres=1.0, **kwargs):
# now loop levels and build composite spectra
level = 0
while level < max_level:

for m in range(len(Idxi)):

# the indices of rainfall field
Idxinext, Idxjnext = _split_field(Idxi[m, :], Idxj[m, :], 2)
# the indices of the field of fourier filters
Idxipsdnext, Idxjpsdnext = _split_field(Idxipsd[m, :], Idxjpsd[m, :], 2)

for n in range(len(Idxinext)):

mask = _get_mask(dim, Idxinext[n, :], Idxjnext[n, :], win_fun)
war = np.sum((field * mask[None, :, :]) > 0.01) / float(
(Idxinext[n, 1] - Idxinext[n, 0])
Expand Down Expand Up @@ -807,7 +802,6 @@ def generate_noise_2d_ssft_filter(F, randstate=None, seed=None, **kwargs):
for i in range(F.shape[0]):
# loop columns
for j in range(F.shape[1]):

# apply fourier filtering with local filter
lF = F[i, j, :, :]
flN = fN * lF
Expand Down

0 comments on commit 16d9920

Please sign in to comment.