Skip to content

Commit

Permalink
Merge 09cae1d into dd4b46b
Browse files Browse the repository at this point in the history
  • Loading branch information
rhugonnet committed May 10, 2024
2 parents dd4b46b + 09cae1d commit 609038b
Show file tree
Hide file tree
Showing 22 changed files with 125 additions and 132 deletions.
4 changes: 2 additions & 2 deletions dev-environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ dependencies:
- tqdm
- scikit-image=0.*
- scikit-gstat>=1.0
- geoutils>=0.1.4,<0.2
- geoutils>=0.1.5,<0.2

# Development-specific, to mirror manually in setup.cfg [options.extras_require].
- pip
Expand Down Expand Up @@ -51,4 +51,4 @@ dependencies:
- opencv-contrib-python-headless

# To run CI against latest GeoUtils
# - git+https://github.com/rhugonnet/geoutils.git@fix_to_points
# - git+https://github.com/rhugonnet/geoutils.git
2 changes: 1 addition & 1 deletion doc/source/code/comparison_plot_spatial_interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# The example DEMs are void-free, so let's make some random voids.
ddem.data.mask = np.zeros_like(ddem.data, dtype=bool) # Reset the mask
# Introduce 50000 nans randomly throughout the dDEM.
ddem.data.mask.ravel()[np.random.choice(ddem.data.size, 50000, replace=False)] = True
ddem.data.mask.ravel()[np.random.default_rng(42).choice(ddem.data.size, 50000, replace=False)] = True

ddem.interpolate(method="linear")

Expand Down
4 changes: 2 additions & 2 deletions doc/source/code/spatialstats_standardizing.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
# Example x vector
mu = 15
sig = 5
np.random.seed(42)
y = np.random.normal(mu, sig, size=300)
rng = np.random.default_rng(42)
y = rng.normal(mu, sig, size=300)

fig, ax1 = plt.subplots(figsize=(8, 3))

Expand Down
8 changes: 4 additions & 4 deletions doc/source/code/spatialstats_stationarity_assumption.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@
x = np.linspace(0, 1, 200)

sig = 0.2
np.random.seed(42)
y_rand1 = np.random.normal(0, sig, size=len(x))
y_rand2 = np.random.normal(0, sig, size=len(x))
y_rand3 = np.random.normal(0, sig, size=len(x))
rng = np.random.default_rng(42)
y_rand1 = rng.normal(0, sig, size=len(x))
y_rand2 = rng.normal(0, sig, size=len(x))
y_rand3 = rng.normal(0, sig, size=len(x))


y_mean = np.array([0.5 * xval - 0.25 if xval > 0.5 else 0.5 * (1 - xval) - 0.25 for xval in x])
Expand Down
4 changes: 2 additions & 2 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ dependencies:
- tqdm
- scikit-image=0.*
- scikit-gstat>=1.0
- geoutils>=0.1.4,<0.2
- geoutils>=0.1.5,<0.2
- pip

# To run CI against latest GeoUtils
# - pip:
# - git+https://github.com/rhugonnet/geoutils.git@fix_to_points
# - git+https://github.com/rhugonnet/geoutils.git
5 changes: 3 additions & 2 deletions examples/advanced/plot_norm_regional_hypso.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,9 @@
# %%
# To test the method, we can generate a semi-random mask to assign nans to glacierized areas.
# Let's remove 30% of the data.
np.random.seed(42)
random_nans = (xdem.misc.generate_random_field(dem_1990.shape, corr_size=5) > 0.7) & (glacier_index_map > 0)
random_nans = (xdem.misc.generate_random_field(dem_1990.shape, corr_size=5, random_state=42) > 0.7) & (
glacier_index_map > 0
)

random_nans.plot()

Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,5 @@ scipy>=1.0,<1.13
tqdm
scikit-image==0.*
scikit-gstat>=1.0
geoutils>=0.1.4,<0.2
geoutils>=0.1.5,<0.2
pip
2 changes: 1 addition & 1 deletion tests/test_coreg/test_affine.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def test_coreg_example(self, verbose: bool = False) -> None:

# Check the output metadata is always the same
shifts = (nuth_kaab._meta["offset_east_px"], nuth_kaab._meta["offset_north_px"], nuth_kaab._meta["vshift"])
assert shifts == pytest.approx((-0.463, -0.133, -1.9876264671765433))
assert shifts == pytest.approx((-0.463, -0.1339999, -1.9922009))

def test_gradientdescending(self, subsample: int = 10000, inlier_mask: bool = True, verbose: bool = False) -> None:
"""
Expand Down
7 changes: 4 additions & 3 deletions tests/test_coreg/test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,8 @@ def test_error_method(self) -> None:
assert vshiftcorr.error(dem1, dem2, transform=affine, crs=crs, error_type="median") == -2

# Create random noise and see if the standard deviation is equal (it should)
dem3 = dem1.copy() + np.random.random(size=dem1.size).reshape(dem1.shape)
rng = np.random.default_rng(42)
dem3 = dem1.copy() + rng.random(size=dem1.size).reshape(dem1.shape)
assert abs(vshiftcorr.error(dem1, dem3, transform=affine, crs=crs, error_type="std") - np.std(dem3)) < 1e-6

@pytest.mark.parametrize("subsample", [10, 10000, 0.5, 1]) # type: ignore
Expand All @@ -104,8 +105,8 @@ def test_get_subsample_on_valid_mask(self, subsample: float | int) -> None:

# Define a valid mask
width = height = 50
np.random.seed(42)
valid_mask = np.random.randint(low=0, high=2, size=(width, height), dtype=bool)
rng = np.random.default_rng(42)
valid_mask = rng.integers(low=0, high=2, size=(width, height), dtype=bool)

# Define a class with a subsample and random_state in the metadata
coreg = Coreg(meta={"subsample": subsample, "random_state": 42})
Expand Down
8 changes: 4 additions & 4 deletions tests/test_coreg/test_biascorr.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ class TestBiasCorr:
)

# Convert DEMs to points with a bit of subsampling for speed-up
# TODO: Simplify once this GeoUtils issue is resolved: https://github.com/GlacioHack/geoutils/issues/499
tba_pts = tba.to_pointcloud(data_column_name="z", subsample=50000, random_state=42).ds

ref_pts = ref.to_pointcloud(data_column_name="z", subsample=50000, random_state=42).ds
Expand Down Expand Up @@ -328,6 +327,8 @@ def test_biascorr__bin_and_fit_1d(self, fit_args, fit_func, fit_optimizer, bin_s
# Curve fit can be unhappy in certain circumstances for numerical estimation of covariance
# We don't care for this test
warnings.filterwarnings("ignore", message="Covariance of the parameters could not be estimated*")
# Apply the transform can create data exactly equal to the nodata
warnings.filterwarnings("ignore", category=UserWarning, message="Unmasked values equal to the nodata value*")

# Create a bias correction object
bcorr = biascorr.BiasCorr(
Expand Down Expand Up @@ -421,7 +422,6 @@ def test_directionalbias__synthetic(self, fit_args, angle, nb_freq) -> None:
xx = gu.raster.get_xy_rotated(self.ref, along_track_angle=angle)[0]

# Get random parameters (3 parameters needed per frequency)
np.random.seed(42)
params = np.array([(5, 3000, np.pi), (1, 300, 0), (0.5, 100, np.pi / 2)]).flatten()
nb_freq = 1
params = params[0 : 3 * nb_freq]
Expand Down Expand Up @@ -510,8 +510,8 @@ def test_deramp__synthetic(self, fit_args, order: int) -> None:
nb_params = int((order + 1) * (order + 1))

# Get a random number of parameters
np.random.seed(42)
params = np.random.normal(size=nb_params)
rng = np.random.default_rng(42)
params = rng.normal(size=nb_params)

# Create a synthetic bias and add to the DEM
synthetic_bias = polynomial_2d((xx, yy), *params)
Expand Down
6 changes: 4 additions & 2 deletions tests/test_ddem.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,8 @@ def test_regional_hypso(self) -> None:
"""Test the regional hypsometric approach."""
ddem = self.ddem.copy()
ddem.data.mask = np.zeros_like(ddem.data, dtype=bool)
ddem.data.mask.ravel()[np.random.choice(ddem.data.size, 50000, replace=False)] = True
rng = np.random.default_rng(42)
ddem.data.mask.ravel()[rng.choice(ddem.data.size, 50000, replace=False)] = True
assert np.count_nonzero(ddem.data.mask) > 0

assert ddem.filled_data is None
Expand All @@ -71,7 +72,8 @@ def test_local_hypso(self) -> None:
ddem = self.ddem.copy()
scott_1990 = self.outlines_1990.query("NAME == 'Scott Turnerbreen'")
ddem.data.mask = np.zeros_like(ddem.data, dtype=bool)
ddem.data.mask.ravel()[np.random.choice(ddem.data.size, 50000, replace=False)] = True
rng = np.random.default_rng(42)
ddem.data.mask.ravel()[rng.choice(ddem.data.size, 50000, replace=False)] = True
assert np.count_nonzero(ddem.data.mask) > 0

assert ddem.filled_data is None
Expand Down
10 changes: 6 additions & 4 deletions tests/test_demcollection.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,10 +60,11 @@ def test_init(self) -> None:
# Simple check that the dV number is of a greater magnitude than the dH number.
assert abs(cumulative_dv.iloc[-1]) > abs(cumulative_dh.iloc[-1])

rng = np.random.default_rng(42)
# Generate 10000 NaN values randomly in one of the dDEMs
dems.ddems[0].data[
np.random.randint(0, dems.ddems[0].data.shape[0], 100),
np.random.randint(0, dems.ddems[0].data.shape[1], 100),
rng.integers(0, dems.ddems[0].data.shape[0], 100),
rng.integers(0, dems.ddems[0].data.shape[1], 100),
] = np.nan
# Check that the cumulative_dh function warns for NaNs
with warnings.catch_warnings():
Expand Down Expand Up @@ -108,9 +109,10 @@ def test_ddem_interpolation(self) -> None:
raise exception

# Generate 10000 NaN values randomly in one of the dDEMs
rng = np.random.default_rng(42)
dems.ddems[0].data[
np.random.randint(0, dems.ddems[0].data.shape[0], 100),
np.random.randint(0, dems.ddems[0].data.shape[1], 100),
rng.integers(0, dems.ddems[0].data.shape[0], 100),
rng.integers(0, dems.ddems[0].data.shape[1], 100),
] = np.nan

# Make sure that filled_data is not available anymore, since the data now has nans
Expand Down
18 changes: 9 additions & 9 deletions tests/test_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,17 @@ class TestExamples:
@pytest.mark.parametrize(
"rst_and_truevals",
[
(ref_dem, np.array([868.6489, 623.42194, 180.57921, 267.30765, 601.67615], dtype=np.float32)),
(tba_dem, np.array([875.2358, 625.0544, 182.9936, 272.6586, 606.2897], dtype=np.float32)),
(ref_dem, np.array([465.11816, 207.3236, 208.30563, 748.7337, 797.28644], dtype=np.float32)),
(tba_dem, np.array([464.6715, 213.7554, 207.8788, 760.8192, 797.3268], dtype=np.float32)),
(
ddem,
np.array(
[
-0.012023926,
-0.6956787,
0.14024353,
1.1026001,
-5.9224243,
1.3182373,
-1.6629944,
0.10473633,
-10.096802,
2.4724731,
],
dtype=np.float32,
),
Expand All @@ -50,8 +50,8 @@ def test_array_content(self, rst_and_truevals: tuple[Raster, NDArrayf]) -> None:

rst = rst_and_truevals[0]
truevals = rst_and_truevals[1]
np.random.seed(42)
values = np.random.choice(rst.data.data.flatten(), size=5, replace=False)
rng = np.random.default_rng(42)
values = rng.choice(rst.data.data.flatten(), size=5, replace=False)

assert values == pytest.approx(truevals)

Expand Down
10 changes: 6 additions & 4 deletions tests/test_filters.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,9 @@ def test_gauss(self) -> None:

# Test that it works with NaNs too
nan_count = 1000
cols = np.random.randint(0, high=self.dem_1990.width - 1, size=nan_count, dtype=int)
rows = np.random.randint(0, high=self.dem_1990.height - 1, size=nan_count, dtype=int)
rng = np.random.default_rng(42)
cols = rng.integers(0, high=self.dem_1990.width - 1, size=nan_count, dtype=int)
rows = rng.integers(0, high=self.dem_1990.height - 1, size=nan_count, dtype=int)
dem_with_nans = np.copy(self.dem_1990.data).squeeze()
dem_with_nans[rows, cols] = np.nan

Expand Down Expand Up @@ -71,8 +72,9 @@ def test_dist_filter(self) -> None:

# Add random outliers
count = 1000
cols = np.random.randint(0, high=self.dem_1990.width - 1, size=count, dtype=int)
rows = np.random.randint(0, high=self.dem_1990.height - 1, size=count, dtype=int)
rng = np.random.default_rng(42)
cols = rng.integers(0, high=self.dem_1990.width - 1, size=count, dtype=int)
rows = rng.integers(0, high=self.dem_1990.height - 1, size=count, dtype=int)
ddem.data[rows, cols] = 5000

# Filter gross outliers
Expand Down
12 changes: 6 additions & 6 deletions tests/test_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,15 +53,15 @@ def test_robust_norder_polynomial_fit_noise_and_outliers(self) -> None:
# Ignore sklearn convergence warnings
warnings.filterwarnings("ignore", category=UserWarning, message="lbfgs failed to converge")

np.random.seed(42)
rng = np.random.default_rng(42)

# Define x vector
x = np.linspace(1, 10, 1000)
# Define an exact polynomial
true_coefs = [-100, 5, 3, 2]
y = np.polyval(np.flip(true_coefs), x).astype(np.float32)
# Add some noise on top
y += np.random.normal(loc=0, scale=3, size=1000)
y += rng.normal(loc=0, scale=3, size=1000)
# Add some outliers
y[50:75] = 0.0
y[900:925] = 1000.0
Expand Down Expand Up @@ -103,8 +103,8 @@ def test_robust_norder_polynomial_fit_noise_and_outliers(self) -> None:
coefs4, deg4 = xdem.fit.robust_norder_polynomial_fit(x, y, estimator_name="Theil-Sen", random_state=42)
assert deg4 == 3
# High degree coefficients should be well constrained
assert coefs4[2] == pytest.approx(true_coefs[2], abs=1)
assert coefs4[3] == pytest.approx(true_coefs[3], abs=1)
assert coefs4[2] == pytest.approx(true_coefs[2], abs=1.5)
assert coefs4[3] == pytest.approx(true_coefs[3], abs=1.5)

# RANSAC also works
coefs5, deg5 = xdem.fit.robust_norder_polynomial_fit(x, y, estimator_name="RANSAC", random_state=42)
Expand Down Expand Up @@ -150,15 +150,15 @@ def test_robust_nfreq_sumsin_fit(self) -> None:
def test_robust_nfreq_simsin_fit_noise_and_outliers(self) -> None:

# Check robustness to outliers
np.random.seed(42)
rng = np.random.default_rng(42)
# Define X vector
x = np.linspace(0, 10, 1000)
# Define exact sum of sinusoid signal
true_coefs = np.array([(5, 3, np.pi), (3, 0.5, 0)]).flatten()
y = xdem.fit.sumsin_1d(x, *true_coefs)

# Add some noise
y += np.random.normal(loc=0, scale=0.25, size=1000)
y += rng.normal(loc=0, scale=0.25, size=1000)
# Add some outliers
y[50:75] = -10
y[900:925] = 10
Expand Down
Loading

0 comments on commit 609038b

Please sign in to comment.