Skip to content

Commit

Permalink
[pre-commit.ci] auto fixes from pre-commit.com hooks
Browse files Browse the repository at this point in the history
for more information, see https://pre-commit.ci
  • Loading branch information
pre-commit-ci[bot] committed Dec 7, 2023
1 parent e211182 commit c516f2d
Show file tree
Hide file tree
Showing 36 changed files with 57 additions and 94 deletions.
1 change: 0 additions & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pocean-core documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 10 16:09:19 2017.
Expand Down
1 change: 0 additions & 1 deletion pocean/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8

# Package level logger
import logging
Expand Down
9 changes: 4 additions & 5 deletions pocean/cf.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
import itertools
import os
import re
Expand Down Expand Up @@ -46,7 +45,7 @@ def load(cls, path):
try:
dsg = cls(path)
for klass in subs:
logger.debug('Trying {}...'.format(klass.__name__))
logger.debug(f'Trying {klass.__name__}...')
if hasattr(klass, 'is_mine'):
if klass.is_mine(dsg):
return klass(path)
Expand All @@ -65,7 +64,7 @@ def load(cls, path):
)

def axes(self, name):
return getattr(self, '{}_axes'.format(name.lower()))()
return getattr(self, f'{name.lower()}_axes')()

def t_axes(self):

Expand Down Expand Up @@ -197,7 +196,7 @@ def cf_safe_name(name):
if isinstance(name, str):
if re.match('^[0-9_]', name):
# Add a letter to the front
name = "v_{}".format(name)
name = f"v_{name}"
return re.sub(r'[^_a-zA-Z0-9]', "_", name)

raise ValueError('Could not convert "{}" to a safe name'.format(name))
raise ValueError(f'Could not convert "{name}" to a safe name')
11 changes: 5 additions & 6 deletions pocean/dataset.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
import warnings
from collections import OrderedDict

Expand Down Expand Up @@ -48,7 +47,7 @@ def close(self):
if not self.isopen():
return

super(EnhancedDataset, self).close()
super().close()

def vatts(self, vname):
d = {}
Expand Down Expand Up @@ -169,7 +168,7 @@ def apply_json(self, meta, create_vars=True, create_dims=True, create_data=True)
continue

if 'shape' not in vvalue and 'type' not in vvalue:
L.debug("Skipping {} creation, no shape or no type defined".format(vname))
L.debug(f"Skipping {vname} creation, no shape or no type defined")
continue
shape = vvalue.get('shape', []) # Dimension names
vardtype = string_to_dtype(vvalue.get('type'))
Expand Down Expand Up @@ -257,19 +256,19 @@ def update_attributes(self, attributes):
try:
self.setncattr(k, v)
except BaseException:
L.warning('Could not set global attribute {}: {}'.format(k, v))
L.warning(f'Could not set global attribute {k}: {v}')

for k, v in attributes.items():
if k in self.variables:
for n, z in v.items():

# Don't re-assign fill value attributes
if n in ['_FillValue', 'missing_value']:
L.warning('Refusing to set {} on {}'.format(n, k))
L.warning(f'Refusing to set {n} on {k}')
continue

try:
self.variables[k].setncattr(n, z)
except BaseException:
L.warning('Could not set attribute {} on {}'.format(n, k))
L.warning(f'Could not set attribute {n} on {k}')
self.sync()
1 change: 0 additions & 1 deletion pocean/dsg/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8

# Profile
from .profile.im import IncompleteMultidimensionalProfile
Expand Down
1 change: 0 additions & 1 deletion pocean/dsg/profile/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from collections import namedtuple

from shapely.geometry import LineString, Point
Expand Down
9 changes: 4 additions & 5 deletions pocean/dsg/profile/im.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from collections import OrderedDict
from copy import copy

Expand Down Expand Up @@ -92,7 +91,7 @@ def from_dataframe(cls, df, output, **kwargs):
if unique_dims is True:
# Rename the dimension to avoid a dimension and coordinate having the same name
# which is not support in xarray
changed_axes = { k: '{}_dim'.format(v) for k, v in axes._asdict().items() }
changed_axes = { k: f'{v}_dim' for k, v in axes._asdict().items() }
daxes = get_default_axes(changed_axes)

# Downcast anything from int64 to int32
Expand Down Expand Up @@ -235,10 +234,10 @@ def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
L.warning(f"Skipping variable {dnam} that is completely masked")
continue
else:
L.warning("Skipping variable {} since it didn't match any dimension sizes".format(dnam))
L.warning(f"Skipping variable {dnam} since it didn't match any dimension sizes")
continue

# Mark rows with data so we don't remove them with clear_rows
Expand All @@ -264,7 +263,7 @@ def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
return df

def nc_attributes(self, axes, daxes):
atts = super(IncompleteMultidimensionalProfile, self).nc_attributes()
atts = super().nc_attributes()
return dict_update(atts, {
'global' : {
'featureType': 'profile',
Expand Down
5 changes: 2 additions & 3 deletions pocean/dsg/profile/om.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from collections import OrderedDict
from copy import copy

Expand Down Expand Up @@ -173,10 +172,10 @@ def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
L.warning(f"Skipping variable {dnam} that is completely masked")
continue
else:
L.warning("Skipping variable {} since it didn't match any dimension sizes".format(dnam))
L.warning(f"Skipping variable {dnam} since it didn't match any dimension sizes")
continue

# Mark rows with data so we don't remove them with clear_rows
Expand Down
1 change: 0 additions & 1 deletion pocean/dsg/timeseries/cr.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from pocean import logger # noqa
from pocean.cf import CFDataset

Expand Down
1 change: 0 additions & 1 deletion pocean/dsg/timeseries/im.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from pocean import logger # noqa
from pocean.cf import CFDataset

Expand Down
1 change: 0 additions & 1 deletion pocean/dsg/timeseries/ir.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from pocean import logger # noqa
from pocean.cf import CFDataset

Expand Down
11 changes: 5 additions & 6 deletions pocean/dsg/timeseries/om.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from collections import OrderedDict
from copy import copy

Expand Down Expand Up @@ -85,7 +84,7 @@ def from_dataframe(cls, df, output, **kwargs):
if unique_dims is True:
# Rename the dimension to avoid a dimension and coordinate having the same name
# which is not support in xarray
changed_axes = { k: '{}_dim'.format(v) for k, v in axes._asdict().items() }
changed_axes = { k: f'{v}_dim' for k, v in axes._asdict().items() }
daxes = get_default_axes(changed_axes)

# Downcast anything from int64 to int32
Expand Down Expand Up @@ -169,7 +168,7 @@ def ts(i):
try:
v[ts(i)] = vvalues
except BaseException:
L.debug('{} was not written. Likely a metadata variable'.format(v.name))
L.debug(f'{v.name} was not written. Likely a metadata variable')

# Set global attributes
nc.update_attributes(attributes)
Expand Down Expand Up @@ -234,11 +233,11 @@ def to_dataframe(self, clean_cols=False, clean_rows=False, **kwargs):
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
L.warning(f"Skipping variable {dnam} that is completely masked")
continue
else:
if dvar[:].flatten().size != t.size:
L.warning("Variable {} is not the correct size, skipping.".format(dnam))
L.warning(f"Variable {dnam} is not the correct size, skipping.")
continue

# Mark rows with data so we don't remove them with clear_rows
Expand All @@ -264,7 +263,7 @@ def to_dataframe(self, clean_cols=False, clean_rows=False, **kwargs):
return df

def nc_attributes(self, axes, daxes):
atts = super(OrthogonalMultidimensionalTimeseries, self).nc_attributes()
atts = super().nc_attributes()
return dict_update(atts, {
'global' : {
'featureType': 'timeseries',
Expand Down
1 change: 0 additions & 1 deletion pocean/dsg/timeseriesProfile/im.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from pocean.cf import CFDataset


Expand Down
13 changes: 6 additions & 7 deletions pocean/dsg/timeseriesProfile/om.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from collections import OrderedDict
from copy import copy

Expand Down Expand Up @@ -76,7 +75,7 @@ def from_dataframe(cls, df, output, **kwargs):
if unique_dims is True:
# Rename the dimension to avoid a dimension and coordinate having the same name
# which is not supported in xarray
changed_axes = { k: '{}_dim'.format(v) for k, v in axes._asdict().items() }
changed_axes = { k: f'{v}_dim' for k, v in axes._asdict().items() }
daxes = get_default_axes(changed_axes)

# Downcast anything from int64 to int32
Expand Down Expand Up @@ -167,7 +166,7 @@ def from_dataframe(cls, df, output, **kwargs):
try:
v[:] = vvalues.reshape(v.shape)
except BaseException:
L.exception('Failed to add {}'.format(c))
L.exception(f'Failed to add {c}')
continue

full_columns = [ f for f in data_columns if f not in detach_z_columnms ]
Expand Down Expand Up @@ -253,7 +252,7 @@ def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
L.warning(f"Skipping variable {dnam} that is completely masked")
continue

# Carry through profile only variables
Expand All @@ -264,12 +263,12 @@ def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
vdata[:, 1:] = np.ma.masked
vdata = vdata.flatten()
if vdata.size != t.size:
L.warning("Variable {} is not the correct size, skipping.".format(dnam))
L.warning(f"Variable {dnam} is not the correct size, skipping.")
continue

else:
if vdata.size != t.size:
L.warning("Variable {} is not the correct size, skipping.".format(dnam))
L.warning(f"Variable {dnam} is not the correct size, skipping.")
continue

# Mark rows with data so we don't remove them with clear_rows
Expand All @@ -295,7 +294,7 @@ def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
return df

def nc_attributes(self, axes, daxes):
atts = super(OrthogonalMultidimensionalTimeseriesProfile, self).nc_attributes()
atts = super().nc_attributes()
return dict_update(atts, {
'global' : {
'featureType': 'timeSeriesProfile',
Expand Down
11 changes: 5 additions & 6 deletions pocean/dsg/timeseriesProfile/r.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from collections import OrderedDict
from copy import copy

Expand Down Expand Up @@ -75,7 +74,7 @@ def from_dataframe(cls, df, output, **kwargs):
if unique_dims is True:
# Rename the dimension to avoid a dimension and coordinate having the same name
# which is not supported in xarray
changed_axes = { k: '{}_dim'.format(v) for k, v in axes._asdict().items() }
changed_axes = { k: f'{v}_dim' for k, v in axes._asdict().items() }
daxes = get_default_axes(changed_axes)

# Downcast anything from int64 to int32
Expand Down Expand Up @@ -155,7 +154,7 @@ def from_dataframe(cls, df, output, **kwargs):
else:
v[:] = vvalues.reshape(v.shape)
except BaseException:
L.exception('Failed to add {}'.format(c))
L.exception(f'Failed to add {c}')
continue

# Metadata variables
Expand Down Expand Up @@ -282,10 +281,10 @@ def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
L.warning(f"Skipping variable {dnam} that is completely masked")
continue
else:
L.warning("Skipping variable {} since it didn't match any dimension sizes".format(dnam))
L.warning(f"Skipping variable {dnam} since it didn't match any dimension sizes")
continue

# Mark rows with data so we don't remove them with clear_rows
Expand All @@ -311,7 +310,7 @@ def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
return df

def nc_attributes(self, axes, daxes):
atts = super(RaggedTimeseriesProfile, self).nc_attributes()
atts = super().nc_attributes()
return dict_update(atts, {
'global' : {
'featureType': 'timeSeriesProfile',
Expand Down
1 change: 0 additions & 1 deletion pocean/dsg/trajectory/__init__.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from collections import namedtuple

from shapely.geometry import LineString, Point
Expand Down
13 changes: 6 additions & 7 deletions pocean/dsg/trajectory/cr.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!python
# coding=utf-8
from collections import OrderedDict
from copy import copy

Expand Down Expand Up @@ -69,7 +68,7 @@ def from_dataframe(cls, df, output, **kwargs):
if unique_dims is True:
# Rename the dimension to avoid a dimension and coordinate having the same name
# which is not support in xarray
changed_axes = { k: '{}_dim'.format(v) for k, v in axes._asdict().items() }
changed_axes = { k: f'{v}_dim' for k, v in axes._asdict().items() }
daxes = get_default_axes(changed_axes)

# Downcast anything from int64 to int32
Expand Down Expand Up @@ -123,7 +122,7 @@ def from_dataframe(cls, df, output, **kwargs):
try:
v[i] = vvalues
except BaseException:
L.exception('Failed to add {}'.format(c))
L.exception(f'Failed to add {c}')
continue

# Add all of the columns based on the sample dimension. Take all columns and remove the
Expand All @@ -149,7 +148,7 @@ def from_dataframe(cls, df, output, **kwargs):
else:
v[:] = vvalues.reshape(v.shape)
except BaseException:
L.exception('Failed to add {}'.format(c))
L.exception(f'Failed to add {c}')
continue

# Metadata variables
Expand Down Expand Up @@ -218,10 +217,10 @@ def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
L.warning(f"Skipping variable {dnam} that is completely masked")
continue
else:
L.warning("Skipping variable {} since it didn't match any dimension sizes".format(dnam))
L.warning(f"Skipping variable {dnam} since it didn't match any dimension sizes")
continue

# Mark rows with data so we don't remove them with clear_rows
Expand All @@ -247,7 +246,7 @@ def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
return df

def nc_attributes(self, axes, daxes):
atts = super(ContiguousRaggedTrajectory, self).nc_attributes()
atts = super().nc_attributes()
return dict_update(atts, {
'global' : {
'featureType': 'trajectory',
Expand Down
Loading

0 comments on commit c516f2d

Please sign in to comment.