Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prepare some more for NumPy 2.0 #439

Merged
merged 7 commits into from
Jun 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/full_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
strategy:
matrix:
os: [ubuntu-latest, windows-latest]
python-version: ["3.8", "3.9", "3.10","3.11"]
python-version: ["3.9", "3.10","3.11"]

steps:
- uses: actions/checkout@v3
Expand Down
4 changes: 2 additions & 2 deletions modelskill/comparison/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ def _add_spatial_grid_to_df(
bins_y = bins
else:
# bins from binsize
x_ptp = df.x.values.ptp() # type: ignore
y_ptp = df.y.values.ptp() # type: ignore
x_ptp = np.ptp(df.x.values) # type: ignore
y_ptp = np.ptp(df.y.values) # type: ignore
nx = int(np.ceil(x_ptp / binsize))
ny = int(np.ceil(y_ptp / binsize))
x_mean = np.round(df.x.mean())
Expand Down
78 changes: 39 additions & 39 deletions modelskill/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,33 +36,33 @@
>>> obs = np.array([0.3, 2.1, -1.0])
>>> mod = np.array([0.0, 2.3, 1.0])
>>> bias(obs, mod)
0.6333333333333332
np.float64(0.6333333333333332)
>>> max_error(obs, mod)
2.0
np.float64(2.0)
>>> rmse(obs, mod)
1.173314393786536
np.float64(1.173314393786536)
>>> urmse(obs, mod)
0.9877021593352702
np.float64(0.9877021593352702)
>>> mae(obs, mod)
0.8333333333333331
np.float64(0.8333333333333331)
>>> mape(obs, mod)
103.17460317460316
np.float64(103.17460317460316)
>>> nse(obs, mod)
0.14786795048143053
np.float64(0.14786795048143053)
>>> r2(obs, mod)
0.14786795048143053
np.float64(0.14786795048143053)
>>> mef(obs, mod)
0.9231099877688299
np.float64(0.9231099877688299)
>>> si(obs, mod)
0.8715019052958266
np.float64(0.8715019052958266)
>>> spearmanr(obs, mod)
0.5
np.float64(0.5)
>>> willmott(obs, mod)
0.7484604452865941
np.float64(0.7484604452865941)
>>> hit_ratio(obs, mod, a=0.5)
0.6666666666666666
np.float64(0.6666666666666666)
>>> ev(obs, mod)
0.39614855570839064
np.float64(0.39614855570839064)
"""

from __future__ import annotations
Expand Down Expand Up @@ -332,7 +332,7 @@ def r2(obs: np.ndarray, model: np.ndarray) -> float:
>>> obs = np.array([1.0,1.1,1.2,1.3,1.4])
>>> model = np.array([1.09, 1.16, 1.3 , 1.38, 1.49])
>>> r2(obs,model)
0.6379999999999998
np.float64(0.6379999999999998)
"""
assert obs.size == model.size
if len(obs) == 0:
Expand Down Expand Up @@ -429,9 +429,9 @@ def spearmanr(obs: np.ndarray, model: np.ndarray) -> float:
>>> obs = np.linspace(-20, 20, 100)
>>> mod = np.tanh(obs)
>>> rho(obs, mod)
0.9999759973116955
np.float64(0.9999759973116955)
>>> spearmanr(obs, mod)
0.9999759973116955
np.float64(0.9999759973116955)

See Also
--------
Expand Down Expand Up @@ -533,7 +533,7 @@ def pr(
model: np.ndarray,
inter_event_level: float = 0.7,
AAP: Union[int, float] = 2,
inter_event_time: str ="36h",
inter_event_time: str = "36h",
) -> float:
"""alias for peak_ratio"""
assert obs.size == model.size
Expand All @@ -544,8 +544,8 @@ def peak_ratio(
obs: pd.Series,
model: np.ndarray,
inter_event_level: float = 0.7,
AAP: Union[int, float] = 2,
inter_event_time: str="36h",
AAP: Union[int, float] = 2,
inter_event_time: str = "36h",
) -> float:
r"""Peak Ratio

Expand Down Expand Up @@ -578,24 +578,24 @@ def peak_ratio(
# Calculate number of years
dt_int = time[1:].values - time[0:-1].values
dt_int_mode = float(stats.mode(dt_int, keepdims=False)[0]) / 1e9 # in seconds
N_years = dt_int_mode / 24 / 3600 / 365.25 * len(time)
N_years = dt_int_mode / 24 / 3600 / 365.25 * len(time)
peak_index, AAP_ = _partial_duration_series(
time,
obs,
inter_event_level=inter_event_level,
AAP=AAP,
inter_event_time=inter_event_time,
)
time,
obs,
inter_event_level=inter_event_level,
AAP=AAP,
inter_event_time=inter_event_time,
)
peaks = obs[peak_index]
found_peaks_obs = peaks.sort_values(ascending=False)

peak_index, _ = _partial_duration_series(
time,
model,
inter_event_level=inter_event_level,
AAP=AAP,
inter_event_time=inter_event_time,
)
time,
model,
inter_event_level=inter_event_level,
AAP=AAP,
inter_event_time=inter_event_time,
)
peaks = model[peak_index]
found_peaks_mod = peaks.sort_values(ascending=False)

Expand Down Expand Up @@ -650,7 +650,7 @@ def willmott(obs: np.ndarray, model: np.ndarray) -> float:
>>> obs = np.array([1.0, 1.1, 1.2, 1.3, 1.4, 1.4, 1.3])
>>> model = np.array([1.02, 1.16, 1.3, 1.38, 1.49, 1.45, 1.32])
>>> willmott(obs, model)
0.9501403174479723
np.float64(0.9501403174479723)

References
----------
Expand Down Expand Up @@ -682,11 +682,11 @@ def hit_ratio(obs: np.ndarray, model: np.ndarray, a=0.1) -> float:
>>> obs = np.array([1.0, 1.1, 1.2, 1.3, 1.4, 1.4, 1.3])
>>> model = np.array([1.02, 1.16, 1.3, 1.38, 1.49, 1.45, 1.32])
>>> hit_ratio(obs, model, a=0.05)
0.2857142857142857
np.float64(0.2857142857142857)
>>> hit_ratio(obs, model, a=0.1)
0.8571428571428571
np.float64(0.8571428571428571)
>>> hit_ratio(obs, model, a=0.15)
1.0
np.float64(1.0)
"""
assert obs.size == model.size

Expand Down Expand Up @@ -916,7 +916,7 @@ def c_bias(obs: np.ndarray, model: np.ndarray) -> float:
>>> obs = np.array([10., 355., 170.])
>>> mod = np.array([20., 5., -180.])
>>> c_bias(obs, mod)
10.0
np.float64(10.0)
"""
from scipy.stats import circmean

Expand Down Expand Up @@ -946,7 +946,7 @@ def c_max_error(obs: np.ndarray, model: np.ndarray) -> float:
>>> obs = np.array([10., 350., 10.])
>>> mod = np.array([20., 10., 350.])
>>> c_max_error(obs, mod)
20.0
np.float64(20.0)
"""

resi = _c_residual(obs, model)
Expand Down
Loading