Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add flake8 and pylint to lint workflow #1285

Merged
merged 13 commits into from Jul 1, 2022
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
62 changes: 62 additions & 0 deletions .github/workflows/lint.yaml
@@ -0,0 +1,62 @@
name: lint

on:
pull_request:
paths:
- '**'

push:
paths:
- '**'
- '!.github/**'
- '.github/workflows/lint.yaml'
- '!docker/**'
- '!docs/**'
- '!contrib/**'

jobs:
flake8:
name: flake8
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.9
- run: python -m pip install flake8 pep8-naming
# ignore and ignore-names list as per setup.cfg
- name: flake8
uses: liskin/gh-problem-matcher-wrap@v1.0.1
with:
linters: flake8
run: |
flake8 . --exclude Dockerfile --ignore=E711,E226,W503,W504,E124,F841,W605 --ignore-names=W,H,A,S,R,T,WS,X,Y,Z,XX,YY,XY,B,M,N,L,NX,NY

pylint:
runs-on: ubuntu-latest
name: Pylint
steps:
- name: checkout git
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Install dependencies and run pylint
run: |
pip install --upgrade -e '.[test]'
pip install pylint
pylint -j 2 --reports no datacube


pycodestyle:
name: pycodestyle
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Run pycodestyle
run: |
pip install --upgrade -e '.[test]'
pip install pycodestyle
pycodestyle tests integration_tests examples --max-line-length 120
16 changes: 0 additions & 16 deletions .github/workflows/main.yml
Expand Up @@ -81,22 +81,6 @@ jobs:
fi
EOF

- name: Check Code Style
run: |
docker run --rm \
-v $(pwd):/code \
-e SKIP_DB=yes \
${{ matrix.docker_image }} \
pycodestyle tests integration_tests examples --max-line-length 120

- name: Lint Code
run: |
docker run --rm \
-v $(pwd):/code \
-e SKIP_DB=yes \
${{ matrix.docker_image }} \
pylint -j 2 --reports no datacube

- name: Run Tests
run: |
cat <<EOF | docker run --rm -i -v $(pwd):/code ${{ matrix.docker_image }} bash -
Expand Down
27 changes: 15 additions & 12 deletions datacube/api/core.py
Expand Up @@ -27,7 +27,7 @@
from ..drivers import new_datasource


class TerminateCurrentLoad(Exception):
class TerminateCurrentLoad(Exception): # noqa: N818
""" This exception is raised by user code from `progress_cbk`
to terminate currently running `.load`
"""
Expand Down Expand Up @@ -126,16 +126,16 @@ def list_products(self, with_pandas=True, dataset_count=False):

# Optionally compute dataset count for each product and add to row/cols
# Product lists are sorted by product name to ensure 1:1 match
if dataset_count:
if dataset_count:

# Load counts
counts = [(p.name, c) for p, c in self.index.datasets.count_by_product()]

# Sort both rows and counts by product name
from operator import itemgetter
rows = sorted(rows, key=itemgetter(0))
counts = sorted(counts, key=itemgetter(0))

# Add sorted count to each existing row
rows = [row + [count[1]] for row, count in zip(rows, counts)]
cols = cols + ['dataset_count']
Expand Down Expand Up @@ -181,10 +181,11 @@ def _list_measurements(self):

#: pylint: disable=too-many-arguments, too-many-locals
def load(self, product=None, measurements=None, output_crs=None, resolution=None, resampling=None,
skip_broken_datasets=False, dask_chunks=None, like=None, fuse_func=None, align=None,
datasets=None, dataset_predicate=None, progress_cbk=None, **query):
skip_broken_datasets=False, dask_chunks=None, like=None, fuse_func=None, align=None,
datasets=None, dataset_predicate=None, progress_cbk=None, **query):
"""
Load data as an ``xarray.Dataset`` object. Each measurement will be a data variable in the :class:`xarray.Dataset`.
Load data as an ``xarray.Dataset`` object.
Each measurement will be a data variable in the :class:`xarray.Dataset`.

See the `xarray documentation <http://xarray.pydata.org/en/stable/data-structures.html>`_ for usage of the
:class:`xarray.Dataset` and :class:`xarray.DataArray` objects.
Expand Down Expand Up @@ -279,7 +280,8 @@ def load(self, product=None, measurements=None, output_crs=None, resolution=None

:param list(str) measurements:
Measurements name or list of names to be included, as listed in :meth:`list_measurements`.
These will be loaded as individual ``xr.DataArray`` variables in the output ``xarray.Dataset`` object.
These will be loaded as individual ``xr.DataArray`` variables in
the output ``xarray.Dataset`` object.

If a list is specified, the measurements will be returned in the order requested.
By default all available measurements are included.
Expand All @@ -289,8 +291,8 @@ def load(self, product=None, measurements=None, output_crs=None, resolution=None
For example: ``'x', 'y', 'time', 'crs'``.

:param str output_crs:
The CRS of the returned data, for example ``EPSG:3577``. If no CRS is supplied, the CRS of the stored data is used
if available.
The CRS of the returned data, for example ``EPSG:3577``.
If no CRS is supplied, the CRS of the stored data is used if available.

This differs from the ``crs`` parameter desribed above, which is used to define the CRS
of the coordinates in the query itself.
Expand Down Expand Up @@ -358,7 +360,8 @@ def load(self, product=None, measurements=None, output_crs=None, resolution=None

:param function dataset_predicate:
Optional. A function that can be passed to restrict loaded datasets. A predicate function should
take a :class:`datacube.model.Dataset` object (e.g. as returned from :meth:`find_datasets`) and return a boolean.
take a :class:`datacube.model.Dataset` object (e.g. as returned from :meth:`find_datasets`) and
return a boolean.
For example, loaded data could be filtered to January observations only by passing the following
predicate function that returns True for datasets acquired in January::

Expand Down
2 changes: 0 additions & 2 deletions datacube/api/grid_workflow.py
Expand Up @@ -3,9 +3,7 @@
# Copyright (c) 2015-2020 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import logging
import numpy
import xarray
from itertools import groupby
from collections import OrderedDict
import pandas as pd

Expand Down
2 changes: 1 addition & 1 deletion datacube/api/query.py
Expand Up @@ -46,7 +46,7 @@ def __init__(self, group_by_func, dimension, units, sort_key=None, group_key=Non
self.sort_key = sort_key

if group_key is None:
group_key = lambda datasets: group_by_func(datasets[0])
group_key = lambda datasets: group_by_func(datasets[0]) # noqa: E731
self.group_key = group_key


Expand Down
2 changes: 1 addition & 1 deletion datacube/config.py
Expand Up @@ -10,7 +10,7 @@
from pathlib import Path
import configparser
from urllib.parse import unquote_plus, urlparse, parse_qsl
from typing import Any, Dict, Iterable, MutableMapping, Optional, Tuple, Union, cast
from typing import Any, Dict, Iterable, MutableMapping, Optional, Tuple, Union

PathLike = Union[str, 'os.PathLike[Any]']

Expand Down
2 changes: 1 addition & 1 deletion datacube/drivers/netcdf/_safestrings.py
Expand Up @@ -75,7 +75,7 @@ def __getitem__(self, name):
return _VariableProxy(var)

#: pylint: disable=invalid-name
def createVariable(self, *args, **kwargs):
def createVariable(self, *args, **kwargs): # noqa: N802
new_var = super(_NC4DatasetProxy, self).createVariable(*args, **kwargs)
return _VariableProxy(new_var)

Expand Down
2 changes: 1 addition & 1 deletion datacube/drivers/netcdf/writer.py
Expand Up @@ -192,7 +192,7 @@ def _write_lcc2_params(crs_var, crs):

# e.g. http://spatialreference.org/ref/sr-org/mexico-inegi-lambert-conformal-conic/
crs_var.grid_mapping_name = cf['grid_mapping_name']
crs_var.standard_parallel = cf['standard_parallel']
crs_var.standard_parallel = cf['standard_parallel']
crs_var.latitude_of_projection_origin = cf['latitude_of_projection_origin']
crs_var.longitude_of_central_meridian = cf['longitude_of_central_meridian']
crs_var.false_easting = cf['false_easting']
Expand Down
2 changes: 1 addition & 1 deletion datacube/drivers/postgis/_connections.py
Expand Up @@ -21,7 +21,7 @@

from sqlalchemy import event, create_engine, text
from sqlalchemy.engine import Engine
from sqlalchemy.engine.url import URL as EngineUrl
from sqlalchemy.engine.url import URL as EngineUrl # noqa: N811

import datacube
from datacube.index.exceptions import IndexSetupError
Expand Down
19 changes: 10 additions & 9 deletions datacube/drivers/postgis/_core.py
Expand Up @@ -9,12 +9,12 @@
import logging

from datacube.drivers.postgis.sql import (INSTALL_TRIGGER_SQL_TEMPLATE,
SCHEMA_NAME, TYPES_INIT_SQL,
UPDATE_COLUMN_MIGRATE_SQL_TEMPLATE,
ADDED_COLUMN_MIGRATE_SQL_TEMPLATE,
UPDATE_TIMESTAMP_SQL,
escape_pg_identifier,
pg_column_exists, pg_exists)
SCHEMA_NAME, TYPES_INIT_SQL,
UPDATE_COLUMN_MIGRATE_SQL_TEMPLATE,
ADDED_COLUMN_MIGRATE_SQL_TEMPLATE,
UPDATE_TIMESTAMP_SQL,
escape_pg_identifier,
pg_column_exists)
from sqlalchemy import MetaData
from sqlalchemy.engine import Engine
from sqlalchemy.schema import CreateSchema
Expand All @@ -40,7 +40,7 @@

def install_timestamp_trigger(connection):
from . import _schema
TABLE_NAMES = [
TABLE_NAMES = [ # noqa: N806
_schema.METADATA_TYPE.name,
_schema.PRODUCT.name,
_schema.DATASET.name,
Expand All @@ -53,9 +53,10 @@ def install_timestamp_trigger(connection):
connection.execute(UPDATE_COLUMN_MIGRATE_SQL_TEMPLATE.format(schema=SCHEMA_NAME, table=name))
connection.execute(INSTALL_TRIGGER_SQL_TEMPLATE.format(schema=SCHEMA_NAME, table=name))


def install_added_column(connection):
from . import _schema
TABLE_NAME = _schema.DATASET_LOCATION.name
TABLE_NAME = _schema.DATASET_LOCATION.name # noqa: N806
connection.execute(ADDED_COLUMN_MIGRATE_SQL_TEMPLATE.format(schema=SCHEMA_NAME, table=TABLE_NAME))


Expand Down Expand Up @@ -113,7 +114,7 @@ def ensure_db(engine, with_permissions=True):
_LOG.info("Creating added column.")
install_added_column(c)
c.execute('commit')
except:
except: # noqa: E722
c.execute('rollback')
raise
finally:
Expand Down
2 changes: 1 addition & 1 deletion datacube/drivers/postgis/_schema.py
Expand Up @@ -72,7 +72,7 @@
# Typing note: sqlalchemy-stubs doesn't handle this legitimate calling pattern.
Column('metadata_type_ref', None, ForeignKey(METADATA_TYPE.c.id), nullable=False), # type: ignore[call-overload]
# Typing note: sqlalchemy-stubs doesn't handle this legitimate calling pattern.
Column('dataset_type_ref', None, ForeignKey(PRODUCT.c.id), index=True, nullable=False), # type: ignore[call-overload]
Column('dataset_type_ref', None, ForeignKey(PRODUCT.c.id), index=True, nullable=False), # type: ignore[call-overload] # noqa: E501

Column('metadata', postgres.JSONB, index=False, nullable=False),

Expand Down
1 change: 1 addition & 0 deletions datacube/drivers/postgis/sql.py
Expand Up @@ -18,6 +18,7 @@

class CreateView(Executable, ClauseElement):
inherit_cache = True

def __init__(self, name, select):
self.name = name
self.select = select
Expand Down
2 changes: 1 addition & 1 deletion datacube/drivers/postgres/_connections.py
Expand Up @@ -21,7 +21,7 @@

from sqlalchemy import event, create_engine, text
from sqlalchemy.engine import Engine
from sqlalchemy.engine.url import URL as EngineUrl
from sqlalchemy.engine.url import URL as EngineUrl # noqa: N811

import datacube
from datacube.index.exceptions import IndexSetupError
Expand Down
9 changes: 5 additions & 4 deletions datacube/drivers/postgres/_core.py
Expand Up @@ -14,7 +14,7 @@
ADDED_COLUMN_MIGRATE_SQL_TEMPLATE,
UPDATE_TIMESTAMP_SQL,
escape_pg_identifier,
pg_column_exists, pg_exists)
pg_column_exists)
from sqlalchemy import MetaData
from sqlalchemy.engine import Engine
from sqlalchemy.schema import CreateSchema
Expand All @@ -40,7 +40,7 @@

def install_timestamp_trigger(connection):
from . import _schema
TABLE_NAMES = [
TABLE_NAMES = [ # noqa: N806
_schema.METADATA_TYPE.name,
_schema.PRODUCT.name,
_schema.DATASET.name,
Expand All @@ -53,9 +53,10 @@ def install_timestamp_trigger(connection):
connection.execute(UPDATE_COLUMN_MIGRATE_SQL_TEMPLATE.format(schema=SCHEMA_NAME, table=name))
connection.execute(INSTALL_TRIGGER_SQL_TEMPLATE.format(schema=SCHEMA_NAME, table=name))


def install_added_column(connection):
from . import _schema
TABLE_NAME = _schema.DATASET_LOCATION.name
TABLE_NAME = _schema.DATASET_LOCATION.name # noqa: N806
connection.execute(ADDED_COLUMN_MIGRATE_SQL_TEMPLATE.format(schema=SCHEMA_NAME, table=TABLE_NAME))


Expand Down Expand Up @@ -113,7 +114,7 @@ def ensure_db(engine, with_permissions=True):
_LOG.info("Creating added column.")
install_added_column(c)
c.execute('commit')
except:
except: # noqa: E722
c.execute('rollback')
raise
finally:
Expand Down
2 changes: 1 addition & 1 deletion datacube/drivers/postgres/_schema.py
Expand Up @@ -72,7 +72,7 @@
# Typing note: sqlalchemy-stubs doesn't handle this legitimate calling pattern.
Column('metadata_type_ref', None, ForeignKey(METADATA_TYPE.c.id), nullable=False), # type: ignore[call-overload]
# Typing note: sqlalchemy-stubs doesn't handle this legitimate calling pattern.
Column('dataset_type_ref', None, ForeignKey(PRODUCT.c.id), index=True, nullable=False), # type: ignore[call-overload]
Column('dataset_type_ref', None, ForeignKey(PRODUCT.c.id), index=True, nullable=False), # type: ignore[call-overload] # noqa: E501

Column('metadata', postgres.JSONB, index=False, nullable=False),

Expand Down
1 change: 1 addition & 0 deletions datacube/drivers/postgres/sql.py
Expand Up @@ -18,6 +18,7 @@

class CreateView(Executable, ClauseElement):
inherit_cache = True

def __init__(self, name, select):
self.name = name
self.select = select
Expand Down
2 changes: 0 additions & 2 deletions datacube/helpers.py
Expand Up @@ -9,8 +9,6 @@
"""

import numpy as np
import rasterio # type: ignore[import]
import warnings

DEFAULT_PROFILE = {
'blockxsize': 256,
Expand Down