Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Dec 18, 2023
1 parent f6f2379 commit c87d18f
Show file tree
Hide file tree
Showing 10 changed files with 13 additions and 46 deletions.
6 changes: 3 additions & 3 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ updates:
# directory: "/"
# schedule:
# interval: daily
- package-ecosystem: 'github-actions'
directory: '/'
- package-ecosystem: "github-actions"
directory: "/"
schedule:
# Check for updates once a week
interval: 'weekly'
interval: "weekly"
14 changes: 7 additions & 7 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,12 @@ name: CI
on:
push:
branches:
- '*'
- "*"
pull_request:
branches:
- '*'
- "*"
schedule:
- cron: '0 0 * * *' # Daily “At 00:00” UTC
- cron: "0 0 * * *" # Daily “At 00:00” UTC
workflow_dispatch: # allows you to trigger the workflow run manually

jobs:
Expand All @@ -22,8 +22,8 @@ jobs:
strategy:
fail-fast: false
matrix:
os: ['ubuntu-latest']
python-version: ['3.7', '3.8', '3.9']
os: ["ubuntu-latest"]
python-version: ["3.7", "3.8", "3.9"]
steps:
- name: Cancel previous runs
uses: styfle/cancel-workflow-action@0.11.0
Expand All @@ -34,7 +34,7 @@ jobs:
with:
channels: conda-forge
channel-priority: strict
mamba-version: '*'
mamba-version: "*"
activate-environment: ldcpy # Defined in ci/environment.yml
auto-update-conda: false
python-version: ${{ matrix.python-version }}
Expand Down Expand Up @@ -76,7 +76,7 @@ jobs:
with:
channels: conda-forge
channel-priority: strict
mamba-version: '*'
mamba-version: "*"
activate-environment: ldcpy # Defined in ci/environment.yml
auto-update-conda: false
python-version: 3.9
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/linting.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@ name: code-style

on:
push:
branches: '*'
branches: "*"
pull_request:
branches: '*'
branches: "*"

jobs:
linting:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/pypi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.x'
python-version: "3.x"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
Expand Down
1 change: 0 additions & 1 deletion docs/source/notebooks/MutualInformation_V2.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,6 @@
"\n",
" I = []\n",
" for bcount, bit_pos_dict in enumerate(dict_list_H):\n",
"\n",
" p00 = np.divide(bit_pos_dict[\"00\"], num - 1, dtype=np.float64)\n",
" p01 = np.divide(bit_pos_dict[\"01\"], num - 1, dtype=np.float64)\n",
" p10 = np.divide(bit_pos_dict[\"10\"], num - 1, dtype=np.float64)\n",
Expand Down
11 changes: 0 additions & 11 deletions ldcpy/calcs.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,6 @@ def _is_memoized(self, calc_name: str) -> bool:
return hasattr(self, calc_name) and (self.__getattribute__(calc_name) is not None)

def _con_var(self, dir, dataset) -> xr.DataArray:

if dir == 'ns':
tt = dataset.diff(self._lat_dim_name, 1)

Expand Down Expand Up @@ -417,7 +416,6 @@ def entropy(self) -> xr.DataArray:
# lower is better (1.0 means random - no compression possible)
"""
if not self._is_memoized('_entropy'):

a1 = self._ds.data
if dask.is_dask_collection(a1):
a1 = a1.compute()
Expand Down Expand Up @@ -1052,7 +1050,6 @@ def vfftmax(self) -> xr.DataArray:
@property
def stft(self) -> xr.DataArray:
if not self._is_memoized('_stft'):

f, t, self._stft = np.abs(
stft(
self.mean.isel({'lat': 100}).squeeze(), nperseg=1, nfft=382, detrend='constant'
Expand Down Expand Up @@ -1552,7 +1549,6 @@ def covariance(self) -> xr.DataArray:
The covariance between the two datasets
"""
if not self._is_memoized('_covariance'):

# need to use unweighted means
c1_mean = self._calcs1.get_calc('ds').mean(skipna=True)
c2_mean = self._calcs2.get_calc('ds').mean(skipna=True)
Expand Down Expand Up @@ -1582,7 +1578,6 @@ def pearson_correlation_coefficient(self):
returns the pearson correlation coefficient between the two datasets
"""
if not self._is_memoized('_pearson_correlation_coefficient'):

# we need to do this with unweighted data
c1_std = float(self._calcs1.get_calc('ds').std(skipna=True))
c2_std = float(self._calcs2.get_calc('ds').std(skipna=True))
Expand Down Expand Up @@ -1659,7 +1654,6 @@ def spatial_rel_error(self):
self._spatial_rel_error = 0
self._max_spatial_rel_error = 0
else:

if z.size > 0:
m_t1_denom = np.ma.masked_invalid(t1_denom).compressed()
else:
Expand Down Expand Up @@ -1799,7 +1793,6 @@ def ssim_value(self):
ssim_mats_array = []

for this_lev in range(nlevels):

with tempfile.TemporaryDirectory() as tmpdirname:
filename_1, filename_2 = (
f'{tmpdirname}/t_ssim1.png',
Expand Down Expand Up @@ -1908,7 +1901,6 @@ def ssim_value_fp_slow(self):
"""

if not self._is_memoized('_ssim_value_fp_slow'):

# if this is a 3D variable, we will do each level seperately
if self._calcs1._vert_dim_name is not None:
vname = self._calcs1._vert_dim_name
Expand Down Expand Up @@ -1977,13 +1969,11 @@ def ssim_value_fp_slow(self):
# go through 2D arrays - each grid point x0, y0 has
# a 2D window [x0 - k, x0+k] [y0 - k, y0 + k]
for i in range(X):

# don't go over boundaries
imin = max(0, i - k)
imax = min(X - 1, i + k)

for j in range(Y):

if np.isnan(sc_a1[i, j]):
# SKIP IF gridpoint is nan
ssim_mat[i, j] = np.nan
Expand Down Expand Up @@ -2079,7 +2069,6 @@ def ssim_value_fp_fast(self):
from astropy.convolution import Gaussian2DKernel, convolve, interpolate_replace_nans

if not self._is_memoized('_ssim_value_fp_fast'):

# if this is a 3D variable, we will do each level separately
if self._calcs1._vert_dim_name is not None:
vname = self._calcs1._vert_dim_name
Expand Down
3 changes: 0 additions & 3 deletions ldcpy/comp_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ def __init__(
comp_mode='p',
accept_first=False,
):

self._calc_type = calc_type
self._calc_tol = calc_tol
self._tol_greater_than = True
Expand All @@ -73,7 +72,6 @@ def reset_checker(self): # call before doing the next timestep
self._opt_level = None

def eval_comp_level(self, orig_da, comp_da, comp_level):

dc = lm.Diffcalcs(orig_da, comp_da)
val = dc.get_diff_calc(self._calc_type)

Expand Down Expand Up @@ -166,7 +164,6 @@ def _comp_rules(self, comp_level, level_passed):
return new_level

def _zfp_rules(self, comp_level, level_passed):

if self._comp_mode == 'p': # precision
pmax = 28
pmin = 6
Expand Down
5 changes: 0 additions & 5 deletions ldcpy/derived_vars.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@


def _preprocess(set_labels, list_of_cols):

contU = True
num_sets = len(set_labels)

Expand All @@ -26,7 +25,6 @@ def _preprocess(set_labels, list_of_cols):

# top of the model radiation budget
def cam_restom(all_col, sets):

col = []

fsnt = all_col['FSNT']
Expand Down Expand Up @@ -64,7 +62,6 @@ def cam_restom(all_col, sets):

# global precipitation
def cam_precip(all_col, sets):

col = []

precc = all_col['PRECC']
Expand Down Expand Up @@ -101,7 +98,6 @@ def cam_precip(all_col, sets):

# evaporation-precipitation
def cam_ep(all_col, sets):

# QFLX is "kg/m2/s or mm/s
# PRECC and PRECL are m/s
# 1 kg/m2/s = 86400 mm/day.
Expand Down Expand Up @@ -148,7 +144,6 @@ def cam_ep(all_col, sets):

# surface energy balance
def cam_ressurf(all_col, sets):

col = []

# all in W/m^2
Expand Down
7 changes: 0 additions & 7 deletions ldcpy/plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,6 @@ def __init__(
cmax=None,
cmin=None,
):

self._ds = ds

self._cmax = cmax
Expand Down Expand Up @@ -209,7 +208,6 @@ def get_plot_data(self, raw_data_1, raw_data_2=None):
return plot_data

def get_title(self, calc_name, c_name=None):

if c_name is not None:
das = f'{c_name}'
else:
Expand Down Expand Up @@ -238,7 +236,6 @@ def get_title(self, calc_name, c_name=None):
title = f'{title} {self._calc_type}'

if self._group_by is not None:

title = f'{title} by {self._group_by}'

if self.title_lat is not None:
Expand Down Expand Up @@ -282,7 +279,6 @@ def update_label(event_axes):
return

def spatial_plot(self, da_sets, titles, data_type):

if self.vert_plot:
nrows = int((da_sets.sets.size))
else:
Expand Down Expand Up @@ -326,7 +322,6 @@ def spatial_plot(self, da_sets, titles, data_type):
central = 300.0

for i in range(da_sets.sets.size):

if self.vert_plot:
axs[i] = plt.subplot(
nrows, 1, i + 1, projection=ccrs.Robinson(central_longitude=central)
Expand Down Expand Up @@ -563,7 +558,6 @@ def time_series_plot(
if da_sets.size / da_sets.sets.size == 1:
tick_interval = 1
if self._group_by == 'time.dayofyear':

group_string = 'dayofyear'
xlabel = 'Day of Year'
elif self._group_by == 'time.month':
Expand Down Expand Up @@ -704,7 +698,6 @@ def get_calc_label(self, calc, data, data_type):
calc_name = f'{calc}: cutoff {zscore_cutoff[0]:.2e}, % sig: {percent_sig:.2f}'

elif calc == 'mean' and self._plot_type == 'spatial' and self._calc_type == 'raw':

if self._weighted:
a1_data = (
lm.Datasetcalcs(data, data_type, ['time'], weighted=self._weighted)
Expand Down
6 changes: 0 additions & 6 deletions ldcpy/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -526,7 +526,6 @@ def check_metrics(
# Pearson less than pcc_tol means fail
pcc = diff_calcs.get_diff_calc('pearson_correlation_coefficient')
if pcc < pcc_tol:

print(' *FAILED pearson correlation coefficient test...(pcc = {0:.5f}'.format(pcc), ')')
num_fail = num_fail + 1
else:
Expand Down Expand Up @@ -581,7 +580,6 @@ def subset_data(
if lon_coord_name is None:
lon_coord_name = ds.cf.coordinates['longitude'][0]
if lat_coord_name is None:

lat_coord_name = ds.cf.coordinates['latitude'][0]
if vertical_dim_name is None:
try:
Expand Down Expand Up @@ -624,19 +622,16 @@ def subset_data(
ds_subset = ds_subset.isel({vertical_dim_name: lev})

if latdim == 1:

if lat is not None:
ds_subset = ds_subset.sel(**{lat_coord_name: [lat], 'method': 'nearest'})
if lon is not None:
ds_subset = ds_subset.sel(**{lon_coord_name: [lon + 180], 'method': 'nearest'})

elif latdim == 2:

# print(ds_subset)

if lat is not None:
if lon is not None:

# lat is -90 to 90
# lon should be 0- 360
ad_lon = lon
Expand Down Expand Up @@ -665,7 +660,6 @@ def subset_data(


def var_and_wt_coords(varname, ds_col):

ca_coord = ds_col.coords['cell_area']
if dask.is_dask_collection(ca_coord):
ca_coord = ca_coord.compute()
Expand Down

0 comments on commit c87d18f

Please sign in to comment.