diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..6781ba03b --- /dev/null +++ b/.flake8 @@ -0,0 +1,23 @@ +[flake8] +extend-ignore = + E114, + E115, + E116, + E201, + E202, + E203, + E204, + E231, + E265, + E266, + E303, + E402, + E501, +exclude = + ; __init__.py # totally ignore __init__.py files + setup.py # ignore setup.py file + docs/ +#F401 ignore unused imports in __init__.py files +#F403 ignore unable to detect undefined names from import * +per-file-ignores = + __init__.py:F401,F403 \ No newline at end of file diff --git a/.github/linters/.flake8 b/.github/linters/.flake8 deleted file mode 100644 index e30e76b99..000000000 --- a/.github/linters/.flake8 +++ /dev/null @@ -1,2 +0,0 @@ -[flake8] -select: "F402,F404,F812,F823,F831,F821,F822,E112,E113,E901,E902,E999" diff --git a/.github/workflows/build-flake.yml b/.github/workflows/build-flake.yml index 3bf56d698..e69de29bb 100644 --- a/.github/workflows/build-flake.yml +++ b/.github/workflows/build-flake.yml @@ -1,40 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a single version of Python -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: Check module can be imported - -on: - push: - branches: [ "dev" ] - pull_request: - branches: [ "dev" ] - -permissions: - contents: read - -jobs: - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - name: Set up Python 3.10 - uses: actions/setup-python@v5 - with: - python-version: "3.10" - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install flake8 pytest - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - name: Lint with flake8 - run: | - # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics - # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - - name: Test that the module imports - run: | - pip install . - python -c "import py4DSTEM; print(py4DSTEM.__version__)" diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index a83e35d30..e86e49ed8 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -1,4 +1,4 @@ -name: Check for errors with flake8 +name: Lint with super-linter@v5-slim on: push: @@ -17,9 +17,14 @@ jobs: fetch-depth: 0 - name: Lint Code Base - uses: github/super-linter@v5 + uses: super-linter/super-linter/slim@v5 # updated to latest slim as quicker to download env: - VALIDATE_ALL_CODEBASE: false - VALIDATE_PYTHON_FLAKE8: true - DEFAULT_BRANCH: "dev" - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + VALIDATE_ALL_CODEBASE: false # only check changes + VALIDATE_PYTHON_FLAKE8: true # lint with flake8 + DEFAULT_BRANCH: "dev" # set default branch to dev + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # for github things + # FILTER_REGEX_EXCLUDE: .*test/.* # exclude test dirs + FILTER_REGEX_EXCLUDE: .*__init__.py/.* # exclude test dirs + FILTER_REGEX_INCLUDE: .*py4DSTEM/.* # only look for py4DSTEM + LINTER_RULES_PATH: / # set toplevel dir as the path to look for rules + PYTHON_FLAKE8_CONFIG_FILE: .flake8 # set specific config file diff --git a/py4DSTEM/braggvectors/braggvectors.py b/py4DSTEM/braggvectors/braggvectors.py index 6ccb46d1f..3c60c6e36 100644 --- a/py4DSTEM/braggvectors/braggvectors.py +++ b/py4DSTEM/braggvectors/braggvectors.py @@ -1,7 +1,7 @@ # Defines the BraggVectors class from py4DSTEM.data import Data -from emdfile import Custom, PointListArray, PointList, Metadata +from emdfile import Custom, PointListArray, Metadata from py4DSTEM.braggvectors.braggvector_methods import BraggVectorMethods from os.path import basename import numpy as np diff --git a/py4DSTEM/braggvectors/diskdetection.py b/py4DSTEM/braggvectors/diskdetection.py index 99818b75e..8789724d7 100644 --- a/py4DSTEM/braggvectors/diskdetection.py +++ b/py4DSTEM/braggvectors/diskdetection.py @@ -221,7 +221,7 @@ def find_Bragg_disks( mode = "dp" elif data.ndim == 3: mode = "dp_stack" - except: + except Exception: er = f"entry {data} for `data` could not be parsed" raise Exception(er) diff --git a/py4DSTEM/braggvectors/diskdetection_aiml.py b/py4DSTEM/braggvectors/diskdetection_aiml.py index 8e3607edb..d61fcab5f 100644 --- a/py4DSTEM/braggvectors/diskdetection_aiml.py +++ b/py4DSTEM/braggvectors/diskdetection_aiml.py @@ -4,18 +4,15 @@ """ import os -import glob import json import shutil import numpy as np from pathlib import Path -from scipy.ndimage import gaussian_filter from time import time -from numbers import Number -from emdfile import tqdmnd, PointList, PointListArray +from emdfile import tqdmnd, PointListArray from py4DSTEM.braggvectors.braggvectors import BraggVectors from py4DSTEM.data import QPoints from py4DSTEM.process.utils import get_maxima_2D @@ -105,12 +102,14 @@ def find_Bragg_disks_aiml_single_DP( """ try: import crystal4D - except: - raise ImportError("Import Error: Please install crystal4D before proceeding") + except ModuleNotFoundError: + raise ModuleNotFoundError( + "Import Error: Please install crystal4D before proceeding" + ) try: import tensorflow as tf - except: - raise ImportError( + except ModuleNotFoundError: + raise ModuleNotFoundError( "Please install tensorflow before proceeding - please check " + "https://www.tensorflow.org/install" + "for more information" @@ -258,8 +257,10 @@ def find_Bragg_disks_aiml_selected( try: import crystal4D - except: - raise ImportError("Import Error: Please install crystal4D before proceeding") + except ModuleNotFoundError: + raise ModuleNotFoundError( + "Import Error: Please install crystal4D before proceeding" + ) assert len(Rx) == len(Ry) peaks = [] @@ -435,8 +436,10 @@ def find_Bragg_disks_aiml_serial( try: import crystal4D - except: - raise ImportError("Import Error: Please install crystal4D before proceeding") + except ModuleNotFoundError: + raise ModuleNotFoundError( + "Import Error: Please install crystal4D before proceeding" + ) # Make the peaks PointListArray dtype = [("qx", float), ("qy", float), ("intensity", float)] @@ -645,8 +648,10 @@ def find_Bragg_disks_aiml( """ try: import crystal4D - except: - raise ImportError("Please install crystal4D before proceeding") + except ModuleNotFoundError: + raise ModuleNotFoundError( + "Import Error: Please install crystal4D before proceeding" + ) def _parse_distributed(distributed): import os @@ -842,7 +847,8 @@ def _integrate_disks(DP, maxima_x, maxima_y, maxima_int, int_window_radius=1): disks.append(np.average(disk)) try: disks = disks / max(disks) - except: + # possibly a ZeroDivideError + except Exception: pass return (maxima_x, maxima_y, disks) @@ -880,8 +886,8 @@ def _get_latest_model(model_path=None): try: import tensorflow as tf - except: - raise ImportError( + except ModuleNotFoundError: + raise ModuleNotFoundError( "Please install tensorflow before proceeding - please check " + "https://www.tensorflow.org/install" + "for more information" @@ -893,8 +899,11 @@ def _get_latest_model(model_path=None): if model_path is None: try: os.mkdir("./tmp") - except: + except FileExistsError: + pass + except Exception as e: pass + # raise e # download the json file with the meta data gdrive_download( "FCU-Net", @@ -912,7 +921,8 @@ def _get_latest_model(model_path=None): with open("./tmp/model_metadata_old.json") as f_old: metaold = json.load(f_old) file_id_old = metaold["file_id"] - except: + # I think just FileNotFoundError + except (FileNotFoundError, Exception): file_id_old = file_id if os.path.exists(file_path) and file_id == file_id_old: @@ -929,7 +939,7 @@ def _get_latest_model(model_path=None): gdrive_download(file_id, destination="./tmp", filename=filename.name) try: shutil.unpack_archive(filename, "./tmp", format="zip") - except: + except Exception: pass model_path = file_path os.rename("./tmp/model_metadata.json", "./tmp/model_metadata_old.json") diff --git a/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py b/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py index bd2736719..14eb5d83a 100644 --- a/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py +++ b/py4DSTEM/braggvectors/diskdetection_aiml_cuda.py @@ -9,7 +9,6 @@ from emdfile import tqdmnd from py4DSTEM.braggvectors.braggvectors import BraggVectors from emdfile import PointList, PointListArray -from py4DSTEM.data import QPoints from py4DSTEM.braggvectors.kernels import kernels from py4DSTEM.braggvectors.diskdetection_aiml import _get_latest_model @@ -23,8 +22,8 @@ try: import tensorflow as tf -except: - raise ImportError( +except ModuleNotFoundError: + raise ModuleNotFoundError( "Please install tensorflow before proceeding - please check " + "https://www.tensorflow.org/install" + "for more information" @@ -637,7 +636,8 @@ def upsampled_correlation_cp(imageCorr, upsampleFactor, xyShift): ) dx = (icc[2, 1] - icc[0, 1]) / (4 * icc[1, 1] - 2 * icc[2, 1] - 2 * icc[0, 1]) dy = (icc[1, 2] - icc[1, 0]) / (4 * icc[1, 1] - 2 * icc[1, 2] - 2 * icc[1, 0]) - except: + # I think this is just the IndexError + except Exception: dx, dy = ( 0, 0, @@ -733,6 +733,7 @@ def _integrate_disks_cp(DP, maxima_x, maxima_y, maxima_int, int_window_radius=1) disks.append(np.average(disk)) try: disks = disks / max(disks) - except: + # Possibly ZeroDivisionError + except Exception: pass return (maxima_x, maxima_y, disks) diff --git a/py4DSTEM/braggvectors/diskdetection_cuda.py b/py4DSTEM/braggvectors/diskdetection_cuda.py index ddea4d9ad..8b0120f3a 100644 --- a/py4DSTEM/braggvectors/diskdetection_cuda.py +++ b/py4DSTEM/braggvectors/diskdetection_cuda.py @@ -621,7 +621,8 @@ def upsampled_correlation(imageCorr, upsampleFactor, xyShift): dy = (icc[1, 2] - icc[1, 0]) / ( 4 * icc[1, 1] - 2 * icc[1, 2] - 2 * icc[1, 0] ) - except: + # TODO Work out what exception to use IndexError + except Exception: dx, dy = ( 0, 0, diff --git a/py4DSTEM/braggvectors/diskdetection_parallel_new.py b/py4DSTEM/braggvectors/diskdetection_parallel_new.py index dccc0dd4b..aacb61575 100644 --- a/py4DSTEM/braggvectors/diskdetection_parallel_new.py +++ b/py4DSTEM/braggvectors/diskdetection_parallel_new.py @@ -1,7 +1,4 @@ import numpy as np -import matplotlib.pyplot as plt -import h5py -import time import dill import dask @@ -9,16 +6,11 @@ import dask.config from dask import delayed from dask.distributed import Client, LocalCluster -from dask.diagnostics import ProgressBar -# import dask.bag as db - -# import distributed from distributed.protocol.serialize import register_serialization_family import distributed -import py4DSTEM -from emdfile import PointListArray, PointList +from emdfile import PointListArray from py4DSTEM.braggvectors.diskdetection import _find_Bragg_disks_single_DP_FK diff --git a/py4DSTEM/braggvectors/threshold.py b/py4DSTEM/braggvectors/threshold.py index 7e19404b1..7ae883180 100644 --- a/py4DSTEM/braggvectors/threshold.py +++ b/py4DSTEM/braggvectors/threshold.py @@ -25,7 +25,7 @@ def threshold_Braggpeaks( pattern """ assert all( - [item in pointlistarray.dtype.fields for item in ["qx", "qy", "intensity"]] + item in pointlistarray.dtype.fields for item in ["qx", "qy", "intensity"] ), "pointlistarray must include the coordinates 'qx', 'qy', and 'intensity'." for Rx, Ry in tqdmnd( pointlistarray.shape[0], @@ -112,7 +112,7 @@ def universal_threshold( assert isinstance(pointlistarray, PointListArray) assert metric in ("maximum", "average", "median", "manual") assert all( - [item in pointlistarray.dtype.fields for item in ["qx", "qy", "intensity"]] + item in pointlistarray.dtype.fields for item in ["qx", "qy", "intensity"] ), "pointlistarray must include the coordinates 'qx', 'qy', and 'intensity'." _pointlistarray = pointlistarray.copy() if name is None: diff --git a/py4DSTEM/data/calibration.py b/py4DSTEM/data/calibration.py index 408f977cc..84e1ec800 100644 --- a/py4DSTEM/data/calibration.py +++ b/py4DSTEM/data/calibration.py @@ -1,9 +1,7 @@ # Defines the Calibration class, which stores calibration metadata import numpy as np -from numbers import Number from typing import Optional -from warnings import warn from emdfile import Metadata, Root from py4DSTEM.data.propagating_calibration import call_calibrate @@ -505,7 +503,7 @@ def get_origin(self, rx=None, ry=None): qx0 = self._get_value("qx0", rx, ry) qy0 = self._get_value("qy0", rx, ry) ans = (qx0, qy0) - if any([x is None for x in ans]): + if any(x is None for x in ans): ans = None return ans @@ -518,7 +516,7 @@ def get_origin_shift(self, rx=None, ry=None): qx0 = self._get_value("qx0_shift", rx, ry) qy0 = self._get_value("qy0_shift", rx, ry) ans = (qx0, qy0) - if any([x is None for x in ans]): + if any(x is None for x in ans): ans = None return ans @@ -540,7 +538,7 @@ def get_origin_meas(self, rx=None, ry=None): qx0 = self._get_value("qx0_meas", rx, ry) qy0 = self._get_value("qy0_meas", rx, ry) ans = (qx0, qy0) - if any([x is None for x in ans]): + if any(x is None for x in ans): ans = None return ans @@ -615,7 +613,7 @@ def get_ellipse(self, rx=None, ry=None): b = self.get_b(rx, ry) theta = self.get_theta(rx, ry) ans = (a, b, theta) - if any([x is None for x in ans]): + if any(x is None for x in ans): ans = None return ans @@ -778,7 +776,7 @@ def get_probe_param(self): qx0 = self._get_value("qx0") qy0 = self._get_value("qy0") ans = (probe_semiangle, qx0, qy0) - if any([x is None for x in ans]): + if any(x is None for x in ans): ans = None return ans diff --git a/py4DSTEM/datacube/datacube.py b/py4DSTEM/datacube/datacube.py index 930dd4c13..1fbe25ffe 100644 --- a/py4DSTEM/datacube/datacube.py +++ b/py4DSTEM/datacube/datacube.py @@ -12,7 +12,7 @@ ) from typing import Optional, Union -from emdfile import Array, Metadata, Node, Root, tqdmnd +from emdfile import Array, Metadata, Node, tqdmnd from py4DSTEM.data import Data, Calibration from py4DSTEM.datacube.virtualimage import DataCubeVirtualImager from py4DSTEM.datacube.virtualdiffraction import DataCubeVirtualDiffraction @@ -669,7 +669,7 @@ def get_probe_size( "dp_mean" in self.treekeys ), "calculate .get_dp_mean() or pass a `dp` arg" DP = self.tree("dp_mean").data - elif type(dp) == str: + elif isinstance(dp, str): assert dp in self.treekeys, f"mode {dp} not found in the tree" DP = self.tree(dp) elif type(dp) == np.ndarray: diff --git a/py4DSTEM/datacube/virtualimage.py b/py4DSTEM/datacube/virtualimage.py index 627223d23..fa283788e 100644 --- a/py4DSTEM/datacube/virtualimage.py +++ b/py4DSTEM/datacube/virtualimage.py @@ -414,7 +414,7 @@ def position_detector( try: image = self.tree(k) break - except: + except AssertionError: pass if image is None: image = self[0, 0] diff --git a/py4DSTEM/io/filereaders/read_K2.py b/py4DSTEM/io/filereaders/read_K2.py index 5df91f2dc..23bbc396e 100644 --- a/py4DSTEM/io/filereaders/read_K2.py +++ b/py4DSTEM/io/filereaders/read_K2.py @@ -124,7 +124,7 @@ def __init__( # this may be wrong for binned data... in which case the reader doesn't work anyway! Q_Nx = gtg.allTags[".SI Image Tags.Acquisition.Parameters.Detector.height"] Q_Ny = gtg.allTags[".SI Image Tags.Acquisition.Parameters.Detector.width"] - except: + except (ValueError, KeyError, AttributeError): print("Warning: diffraction pattern shape not detected!") print("Assuming 1920x1792 as the diffraction pattern size!") Q_Nx = 1792 diff --git a/py4DSTEM/io/filereaders/read_arina.py b/py4DSTEM/io/filereaders/read_arina.py index 832499d3f..f3269a0f2 100644 --- a/py4DSTEM/io/filereaders/read_arina.py +++ b/py4DSTEM/io/filereaders/read_arina.py @@ -1,5 +1,5 @@ import h5py -import hdf5plugin +import hdf5plugin # noqa: F401 import numpy as np from py4DSTEM.datacube import DataCube from py4DSTEM.preprocess.utils import bin2D diff --git a/py4DSTEM/io/filereaders/read_dm.py b/py4DSTEM/io/filereaders/read_dm.py index 617529708..118eff6ec 100644 --- a/py4DSTEM/io/filereaders/read_dm.py +++ b/py4DSTEM/io/filereaders/read_dm.py @@ -1,7 +1,6 @@ # Reads a digital micrograph 4D-STEM dataset import numpy as np -from pathlib import Path from ncempy.io import dm from emdfile import tqdmnd, Array diff --git a/py4DSTEM/io/legacy/legacy12/read_v0_12.py b/py4DSTEM/io/legacy/legacy12/read_v0_12.py index 44aa86b6a..6622a5a2b 100644 --- a/py4DSTEM/io/legacy/legacy12/read_v0_12.py +++ b/py4DSTEM/io/legacy/legacy12/read_v0_12.py @@ -2,7 +2,7 @@ import h5py import numpy as np -from os.path import splitext, exists +from os.path import exists from py4DSTEM.io.legacy.read_utils import ( is_py4DSTEM_file, get_py4DSTEM_topgroups, @@ -101,7 +101,7 @@ def read_v0_12(fp, **kwargs): ), "Error: data must be specified with strings or integers only." if not isinstance(data_id, (int, np.int_, str)): assert all( - [isinstance(d, (int, np.int_, str)) for d in data_id] + isinstance(d, (int, np.int_, str)) for d in data_id ), "Error: data must be specified with strings or integers only." # Parse optional arguments @@ -250,7 +250,7 @@ def get_data_from_str(filepath, tg, data_id, mem="RAM", binfactor=1, bindtype=No def get_data_from_list(filepath, tg, data_id, mem="RAM", binfactor=1, bindtype=None): """Accepts a filepath to a valid py4DSTEM file and a list or tuple specifying data, and returns the data.""" assert isinstance(data_id, (list, tuple)) - assert all([isinstance(d, (int, np.int_, str)) for d in data_id]) + assert all(isinstance(d, (int, np.int_, str)) for d in data_id) data = [] for el in data_id: if isinstance(el, (int, np.int_)): diff --git a/py4DSTEM/io/legacy/legacy12/read_v0_5.py b/py4DSTEM/io/legacy/legacy12/read_v0_5.py index de7108b02..3a19c5a4c 100644 --- a/py4DSTEM/io/legacy/legacy12/read_v0_5.py +++ b/py4DSTEM/io/legacy/legacy12/read_v0_5.py @@ -2,12 +2,10 @@ import h5py import numpy as np -from os.path import splitext from py4DSTEM.io.legacy.read_utils import ( is_py4DSTEM_file, get_py4DSTEM_topgroups, get_py4DSTEM_version, - version_is_geq, ) from py4DSTEM.io.legacy.legacy12.read_utils_v0_5 import get_py4DSTEM_dataobject_info from emdfile import PointList, PointListArray @@ -97,7 +95,7 @@ def read_v0_5(fp, **kwargs): ), "Error: data must be specified with strings or integers only." if not isinstance(data_id, (int, str)): assert all( - [isinstance(d, (int, str)) for d in data_id] + isinstance(d, (int, str)) for d in data_id ), "Error: data must be specified with strings or integers only." # Parse optional arguments @@ -248,7 +246,7 @@ def get_data_from_str(fp, tg, data_id, mem="RAM", binfactor=1, bindtype=None): def get_data_from_list(fp, tg, data_id, mem="RAM", binfactor=1, bindtype=None): """Accepts a fp to a valid py4DSTEM file and a list or tuple specifying data, and returns the data.""" assert isinstance(data_id, (list, tuple)) - assert all([isinstance(d, (int, str)) for d in data_id]) + assert all(isinstance(d, (int, str)) for d in data_id) data = [] for el in data_id: if isinstance(el, int): diff --git a/py4DSTEM/io/legacy/legacy12/read_v0_6.py b/py4DSTEM/io/legacy/legacy12/read_v0_6.py index f746548ca..4f0149473 100644 --- a/py4DSTEM/io/legacy/legacy12/read_v0_6.py +++ b/py4DSTEM/io/legacy/legacy12/read_v0_6.py @@ -2,12 +2,10 @@ import h5py import numpy as np -from os.path import splitext from py4DSTEM.io.legacy.read_utils import ( is_py4DSTEM_file, get_py4DSTEM_topgroups, get_py4DSTEM_version, - version_is_geq, ) from py4DSTEM.io.legacy.legacy12.read_utils_v0_6 import get_py4DSTEM_dataobject_info from emdfile import PointList, PointListArray @@ -97,7 +95,7 @@ def read_v0_6(fp, **kwargs): ), "Error: data must be specified with strings or integers only." if not isinstance(data_id, (int, str)): assert all( - [isinstance(d, (int, str)) for d in data_id] + isinstance(d, (int, str)) for d in data_id ), "Error: data must be specified with strings or integers only." # Parse optional arguments @@ -248,7 +246,7 @@ def get_data_from_str(fp, tg, data_id, mem="RAM", binfactor=1, bindtype=None): def get_data_from_list(fp, tg, data_id, mem="RAM", binfactor=1, bindtype=None): """Accepts a fp to a valid py4DSTEM file and a list or tuple specifying data, and returns the data.""" assert isinstance(data_id, (list, tuple)) - assert all([isinstance(d, (int, str)) for d in data_id]) + assert all(isinstance(d, (int, str)) for d in data_id) data = [] for el in data_id: if isinstance(el, int): diff --git a/py4DSTEM/io/legacy/legacy12/read_v0_7.py b/py4DSTEM/io/legacy/legacy12/read_v0_7.py index fac779d64..d94c55fab 100644 --- a/py4DSTEM/io/legacy/legacy12/read_v0_7.py +++ b/py4DSTEM/io/legacy/legacy12/read_v0_7.py @@ -2,12 +2,10 @@ import h5py import numpy as np -from os.path import splitext from py4DSTEM.io.legacy.read_utils import ( is_py4DSTEM_file, get_py4DSTEM_topgroups, get_py4DSTEM_version, - version_is_geq, ) from py4DSTEM.io.legacy.legacy12.read_utils_v0_7 import get_py4DSTEM_dataobject_info from emdfile import PointList, PointListArray @@ -97,7 +95,7 @@ def read_v0_7(fp, **kwargs): ), "Error: data must be specified with strings or integers only." if not isinstance(data_id, (int, str)): assert all( - [isinstance(d, (int, str)) for d in data_id] + isinstance(d, (int, str)) for d in data_id ), "Error: data must be specified with strings or integers only." # Parse optional arguments @@ -248,7 +246,7 @@ def get_data_from_str(fp, tg, data_id, mem="RAM", binfactor=1, bindtype=None): def get_data_from_list(fp, tg, data_id, mem="RAM", binfactor=1, bindtype=None): """Accepts a fp to a valid py4DSTEM file and a list or tuple specifying data, and returns the data.""" assert isinstance(data_id, (list, tuple)) - assert all([isinstance(d, (int, str)) for d in data_id]) + assert all(isinstance(d, (int, str)) for d in data_id) data = [] for el in data_id: if isinstance(el, int): diff --git a/py4DSTEM/io/legacy/legacy12/read_v0_9.py b/py4DSTEM/io/legacy/legacy12/read_v0_9.py index 0cf186ffd..75bf91233 100644 --- a/py4DSTEM/io/legacy/legacy12/read_v0_9.py +++ b/py4DSTEM/io/legacy/legacy12/read_v0_9.py @@ -2,7 +2,7 @@ import h5py import numpy as np -from os.path import splitext, exists +from os.path import exists from py4DSTEM.io.legacy.read_utils import ( is_py4DSTEM_file, get_py4DSTEM_topgroups, @@ -103,7 +103,7 @@ def read_v0_9(fp, **kwargs): ), "Error: data must be specified with strings or integers only." if not isinstance(data_id, (int, np.int_, str)): assert all( - [isinstance(d, (int, np.int_, str)) for d in data_id] + isinstance(d, (int, np.int_, str)) for d in data_id ), "Error: data must be specified with strings or integers only." # Parse optional arguments @@ -255,7 +255,7 @@ def get_data_from_str(filepath, tg, data_id, mem="RAM", binfactor=1, bindtype=No def get_data_from_list(filepath, tg, data_id, mem="RAM", binfactor=1, bindtype=None): """Accepts a filepath to a valid py4DSTEM file and a list or tuple specifying data, and returns the data.""" assert isinstance(data_id, (list, tuple)) - assert all([isinstance(d, (int, np.int_, str)) for d in data_id]) + assert all(isinstance(d, (int, np.int_, str)) for d in data_id) data = [] for el in data_id: if isinstance(el, (int, np.int_)): diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/array.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/array.py index a5192ffa6..23d94b77e 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/array.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/array.py @@ -3,7 +3,6 @@ from typing import Optional, Union import numpy as np -import h5py from numbers import Number from py4DSTEM.io.legacy.legacy13.v13_emd_classes.tree import Tree diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py index ddbea9005..af65776f4 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/io.py @@ -160,7 +160,7 @@ def Metadata_to_h5(metadata, group): dset.attrs["type"] = np.string_("tuple") # of tuples - elif any([isinstance(v[i], tuple) for i in range(len(v))]): + elif any(isinstance(v[i], tuple) for i in range(len(v))): dset_grp = grp.create_group(k) dset_grp.attrs["type"] = np.string_("tuple_of_tuples") dset_grp.attrs["length"] = len(v) diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/metadata.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/metadata.py index d430528e1..e8e756965 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/metadata.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/metadata.py @@ -1,7 +1,5 @@ import numpy as np -from numbers import Number from typing import Optional -import h5py from py4DSTEM.io.legacy.legacy13.v13_emd_classes.tree import Tree diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlist.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlist.py index c7f0c7fc1..4621211b4 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlist.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlist.py @@ -3,8 +3,6 @@ # structured arrays. import numpy as np -import h5py -from copy import copy from typing import Optional from py4DSTEM.io.legacy.legacy13.v13_emd_classes.tree import Tree diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlistarray.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlistarray.py index c246672bd..7e27f21ca 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlistarray.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/pointlistarray.py @@ -1,7 +1,5 @@ import numpy as np -from copy import copy from typing import Optional -import h5py from py4DSTEM.io.legacy.legacy13.v13_emd_classes.tree import Tree from py4DSTEM.io.legacy.legacy13.v13_emd_classes.metadata import Metadata diff --git a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/root.py b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/root.py index c5137d9f4..39a36ab5a 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_emd_classes/root.py +++ b/py4DSTEM/io/legacy/legacy13/v13_emd_classes/root.py @@ -1,7 +1,4 @@ -import numpy as np -from numbers import Number from typing import Optional -import h5py from py4DSTEM.io.legacy.legacy13.v13_emd_classes.tree import Tree diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/braggvectors.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/braggvectors.py index 4e51bdebf..91471bd83 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/braggvectors.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/braggvectors.py @@ -1,9 +1,7 @@ # Defines the BraggVectors class -from typing import Optional, Union import numpy as np -import h5py from py4DSTEM.io.legacy.legacy13.v13_emd_classes import PointListArray from py4DSTEM.io.legacy.legacy13.v13_emd_classes.tree import Tree diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/datacube.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/datacube.py index 422d47bc6..2c9d697b0 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/datacube.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/datacube.py @@ -6,7 +6,6 @@ from typing import Optional, Union import numpy as np -import h5py class DataCube(Array): @@ -108,7 +107,7 @@ def R_pixel_size(self): @R_pixel_size.setter def R_pixel_size(self, x): - if type(x) is not list: + if not isinstance(x, list): x = [x, x] self.set_dim(0, [0, x[0]]) self.set_dim(1, [0, x[1]]) @@ -120,7 +119,7 @@ def R_pixel_units(self): @R_pixel_units.setter def R_pixel_units(self, x): - if type(x) is not list: + if not isinstance(x, list): x = [x, x] self.dim_units[0] = x[0] self.dim_units[1] = x[1] @@ -133,7 +132,7 @@ def Q_pixel_size(self): @Q_pixel_size.setter def Q_pixel_size(self, x): - if type(x) is not list: + if not isinstance(x, list): x = [x, x] self.set_dim(2, [0, x[0]]) self.set_dim(3, [0, x[1]]) @@ -145,7 +144,7 @@ def Q_pixel_units(self): @Q_pixel_units.setter def Q_pixel_units(self, x): - if type(x) is not list: + if not isinstance(x, list): x = [x, x] self.dim_units[2] = x[0] self.dim_units[3] = x[1] diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/diffractionslice.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/diffractionslice.py index b32877a4a..60e29af33 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/diffractionslice.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/diffractionslice.py @@ -5,7 +5,6 @@ from typing import Optional, Union import numpy as np -import h5py class DiffractionSlice(Array): diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/io.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/io.py index 2556ebe8f..23fd9fee6 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/io.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/io.py @@ -1,6 +1,5 @@ # Functions for reading and writing subclasses of the base EMD types -import numpy as np import h5py from os.path import basename diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/probe.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/probe.py index cd1c7d9d9..284616241 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/probe.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/probe.py @@ -1,14 +1,13 @@ # Defines the Probe class, which stores vacuum probes # and cross-correlation kernels derived from them -from py4DSTEM.io.legacy.legacy13.v13_emd_classes.array import Array, Metadata +from py4DSTEM.io.legacy.legacy13.v13_emd_classes.array import Metadata from py4DSTEM.io.legacy.legacy13.v13_py4dstem_classes.diffractionslice import ( DiffractionSlice, ) -from typing import Optional, Union +from typing import Optional import numpy as np -import h5py class Probe(DiffractionSlice): diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/qpoints.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/qpoints.py index 3429c4c8d..e9fa51723 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/qpoints.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/qpoints.py @@ -2,9 +2,8 @@ from py4DSTEM.io.legacy.legacy13.v13_emd_classes.pointlist import PointList -from typing import Optional, Union +from typing import Optional import numpy as np -import h5py class QPoints(PointList): diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/realslice.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/realslice.py index 367401055..e4d973fbc 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/realslice.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/realslice.py @@ -2,7 +2,6 @@ from typing import Optional, Union import numpy as np -import h5py class RealSlice(Array): @@ -30,9 +29,9 @@ def __init__( A new RealSlice instance """ # expand pixel inputs to include 2 dimensions - if type(pixel_size) is not list: + if not isinstance(pixel_size, list): pixel_size = [pixel_size, pixel_size] - if type(pixel_units) is not list: + if not isinstance(pixel_units, list): pixel_units = [pixel_units, pixel_units] # initialize as an Array @@ -63,7 +62,7 @@ def pixel_size(self): @pixel_size.setter def pixel_size(self, x): - if type(x) is not list: + if not isinstance(x, list): x = [x, x] self.set_dim(0, [0, x[0]]) self.set_dim(1, [0, x[1]]) @@ -75,7 +74,7 @@ def pixel_units(self): @pixel_units.setter def pixel_units(self, x): - if type(x) is not list: + if not isinstance(x, list): x = [x, x] self.dim_units[0] = x[0] self.dim_units[1] = x[1] diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualdiffraction.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualdiffraction.py index 188f1d646..7adc89a8e 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualdiffraction.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualdiffraction.py @@ -8,7 +8,6 @@ from typing import Optional, Union import numpy as np -import h5py class VirtualDiffraction(DiffractionSlice): diff --git a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualimage.py b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualimage.py index 4d6c38845..2feb96c62 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualimage.py +++ b/py4DSTEM/io/legacy/legacy13/v13_py4dstem_classes/virtualimage.py @@ -6,7 +6,6 @@ from typing import Optional, Union import numpy as np -import h5py class VirtualImage(RealSlice): diff --git a/py4DSTEM/io/legacy/legacy13/v13_to_14.py b/py4DSTEM/io/legacy/legacy13/v13_to_14.py index 650529b22..28e634eef 100644 --- a/py4DSTEM/io/legacy/legacy13/v13_to_14.py +++ b/py4DSTEM/io/legacy/legacy13/v13_to_14.py @@ -34,7 +34,6 @@ Calibration, DiffractionSlice, RealSlice, - QPoints, ) from py4DSTEM.datacube import ( DataCube, diff --git a/py4DSTEM/io/legacy/read_legacy_12.py b/py4DSTEM/io/legacy/read_legacy_12.py index 40bfcfc94..68aafcada 100644 --- a/py4DSTEM/io/legacy/read_legacy_12.py +++ b/py4DSTEM/io/legacy/read_legacy_12.py @@ -1,8 +1,6 @@ # File reader for py4DSTEM files -import h5py -import numpy as np -from os.path import splitext, exists +from os.path import exists from py4DSTEM.io.legacy.read_utils import is_py4DSTEM_file, get_py4DSTEM_topgroups from py4DSTEM.io.legacy.read_utils import get_py4DSTEM_version, version_is_geq from py4DSTEM.io.legacy.legacy12 import ( diff --git a/py4DSTEM/io/legacy/read_legacy_13.py b/py4DSTEM/io/legacy/read_legacy_13.py index 04da1e65a..43f666ebd 100644 --- a/py4DSTEM/io/legacy/read_legacy_13.py +++ b/py4DSTEM/io/legacy/read_legacy_13.py @@ -1,7 +1,6 @@ # File reader for py4DSTEM v13 files import h5py -import numpy as np import warnings from os.path import exists, basename, dirname, join from typing import Optional, Union diff --git a/py4DSTEM/io/legacy/read_utils.py b/py4DSTEM/io/legacy/read_utils.py index 7cd48cde7..afdbe1384 100644 --- a/py4DSTEM/io/legacy/read_utils.py +++ b/py4DSTEM/io/legacy/read_utils.py @@ -1,7 +1,6 @@ # Utility functions import h5py -import numpy as np def get_py4DSTEM_topgroups(filepath): @@ -20,9 +19,7 @@ def is_py4DSTEM_version13(filepath): for k in f.keys(): if "emd_group_type" in f[k].attrs: if f[k].attrs["emd_group_type"] == "root": - if all( - [x in f[k].attrs for x in ("version_major", "version_minor")] - ): + if all(x in f[k].attrs for x in ("version_major", "version_minor")): if ( int(f[k].attrs["version_major"]), int(f[k].attrs["version_minor"]), @@ -100,7 +97,8 @@ def get_N_dataobjects(filepath, topgroup="4DSTEM_experiment"): N_pla = len(f[topgroup]["data/pointlistarrays"].keys()) try: N_coords = len(f[topgroup]["data/coordinates"].keys()) - except: + # TODO work out what exception will be raised ValueError, AttributeError, BS thinks KeyError + except Exception: N_coords = 0 N_do = N_dc + N_cdc + N_ds + N_rs + N_pl + N_pla + N_coords return N_dc, N_cdc, N_ds, N_rs, N_pl, N_pla, N_coords, N_do diff --git a/py4DSTEM/preprocess/electroncount.py b/py4DSTEM/preprocess/electroncount.py index e3fc68e05..153e43d24 100644 --- a/py4DSTEM/preprocess/electroncount.py +++ b/py4DSTEM/preprocess/electroncount.py @@ -144,7 +144,6 @@ def electron_count_GPU( See electron_count() for additional documentation. """ import torch - import dm assert isinstance(output, str), "output must be a str" assert output in [ @@ -417,7 +416,6 @@ def counted_pointlistarray_to_datacube(counted_pointlistarray, shape, subpixel=F if __name__ == "__main__": - from py4DSTEM.process.preprocess import get_darkreference from py4DSTEM.io import DataCube, save from ncempy.io import dm diff --git a/py4DSTEM/preprocess/preprocess.py b/py4DSTEM/preprocess/preprocess.py index 41ddd7fd4..5338de1a6 100644 --- a/py4DSTEM/preprocess/preprocess.py +++ b/py4DSTEM/preprocess/preprocess.py @@ -11,7 +11,6 @@ import numpy as np from py4DSTEM.preprocess.utils import bin2D, get_shifted_ar from emdfile import tqdmnd -from scipy.ndimage import median_filter ### Editing datacube shape ### @@ -166,7 +165,9 @@ def bin_data_diffraction(datacube, bin_factor, dtype=None): """ # validate inputs - assert type(bin_factor) is int, f"Error: binning factor {bin_factor} is not an int." + assert isinstance( + bin_factor, int + ), f"Error: binning factor {bin_factor} is not an int." if bin_factor == 1: return datacube if dtype is None: @@ -225,7 +226,9 @@ def bin_data_mmap(datacube, bin_factor, dtype=np.float32): """ # validate inputs - assert type(bin_factor) is int, f"Error: binning factor {bin_factor} is not an int." + assert isinstance( + bin_factor, int + ), f"Error: binning factor {bin_factor} is not an int." if bin_factor == 1: return datacube @@ -268,7 +271,9 @@ def bin_data_real(datacube, bin_factor): Performs diffraction space binning of data by bin_factor. """ # validate inputs - assert type(bin_factor) is int, f"Bin factor {bin_factor} is not an int." + assert isinstance( + bin_factor, int + ), f"Error: binning factor {bin_factor} is not an int." if bin_factor <= 1: return datacube diff --git a/py4DSTEM/process/calibration/ellipse.py b/py4DSTEM/process/calibration/ellipse.py index 2954de377..39266a7bb 100644 --- a/py4DSTEM/process/calibration/ellipse.py +++ b/py4DSTEM/process/calibration/ellipse.py @@ -199,7 +199,7 @@ def fit_ellipse_amorphous_ring(data, center, fitradii, p0=None, mask=None): A, B, C = convert_ellipse_params_r(R, R, 0) # Populate initial parameters - p0_guess = tuple([I0, I1, sigma0, sigma1, sigma2, c_bkgd, x0, y0, A, B, C]) + p0_guess = (I0, I1, sigma0, sigma1, sigma2, c_bkgd, x0, y0, A, B, C) if p0 is None: _p0 = p0_guess else: diff --git a/py4DSTEM/process/calibration/origin.py b/py4DSTEM/process/calibration/origin.py index 7f0c07a81..023fe6489 100644 --- a/py4DSTEM/process/calibration/origin.py +++ b/py4DSTEM/process/calibration/origin.py @@ -1,13 +1,11 @@ # Find the origin of diffraction space -import functools import numpy as np import matplotlib.pyplot as plt from scipy.ndimage import gaussian_filter from scipy.optimize import leastsq -import matplotlib.pyplot as plt -from emdfile import tqdmnd, PointListArray +from emdfile import tqdmnd from py4DSTEM.datacube import DataCube from py4DSTEM.process.calibration.probe import get_probe_size from py4DSTEM.process.fit import plane, parabola, bezier_two, fit_2D diff --git a/py4DSTEM/process/calibration/qpixelsize.py b/py4DSTEM/process/calibration/qpixelsize.py index d59d5a45c..12e4ad162 100644 --- a/py4DSTEM/process/calibration/qpixelsize.py +++ b/py4DSTEM/process/calibration/qpixelsize.py @@ -2,10 +2,6 @@ import numpy as np from scipy.optimize import leastsq -from typing import Union, Optional - -from emdfile import tqdmnd -from py4DSTEM.process.utils import get_CoM def get_Q_pixel_size(q_meas, q_known, units="A"): diff --git a/py4DSTEM/process/calibration/rotation.py b/py4DSTEM/process/calibration/rotation.py index aaf8a49ce..eec4b3d4a 100644 --- a/py4DSTEM/process/calibration/rotation.py +++ b/py4DSTEM/process/calibration/rotation.py @@ -1,7 +1,6 @@ # Rotational calibrations import numpy as np -from typing import Optional import matplotlib.pyplot as plt from py4DSTEM import show diff --git a/py4DSTEM/process/classification/braggvectorclassification.py b/py4DSTEM/process/classification/braggvectorclassification.py index c3e36273f..1beca849a 100644 --- a/py4DSTEM/process/classification/braggvectorclassification.py +++ b/py4DSTEM/process/classification/braggvectorclassification.py @@ -856,7 +856,7 @@ def get_initial_classes( seed_new_class = True while seed_new_class: ind1, ind2 = np.unravel_index(np.argmax(n_point_function), (N, N)) - BP_set = set([ind1, ind2]) + BP_set = {ind1, ind2} grow_class = True while grow_class: frequencies = np.zeros(N) @@ -910,7 +910,7 @@ def get_initial_classes( seed_new_class = True while seed_new_class: ind1, ind2, ind3 = np.unravel_index(np.argmax(n_point_function), (N, N, N)) - BP_set = set([ind1, ind2, ind3]) + BP_set = {ind1, ind2, ind3} grow_class = True while grow_class: frequencies = np.zeros(N) diff --git a/py4DSTEM/process/classification/featurization.py b/py4DSTEM/process/classification/featurization.py index 126583413..9a1353622 100644 --- a/py4DSTEM/process/classification/featurization.py +++ b/py4DSTEM/process/classification/featurization.py @@ -477,7 +477,8 @@ def get_class_DPs(self, datacube, method, thresh): datacube.data.shape[2], datacube.data.shape[3], ) - except: + # TODO Work out if its a ValueError + except Exception: raise ValueError( "Datacube must have same R_Nx and R_Ny dimensions as Featurization instance." ) @@ -586,7 +587,7 @@ def get_class_ims(self, classification_method): """ class_maps = [] if classification_method == "NMF": - if type(self.W) == list: + if isinstance(self.W, list): for l in range(len(self.W)): small_class_maps = [] for k in range(self.W[l].shape[1]): @@ -599,7 +600,7 @@ def get_class_ims(self, classification_method): class_maps.append(self.W[:, l].reshape(self.R_Nx, self.R_Ny)) class_maps = [class_maps] elif classification_method == "GMM": - if type(self.gmm_labels) == list: + if isinstance(self.gmm_labels, list): for l in range(len(self.gmm_labels)): small_class_maps = [] for k in range(np.max(self.gmm_labels[l])): diff --git a/py4DSTEM/process/diffraction/crystal.py b/py4DSTEM/process/diffraction/crystal.py index aa1eb8555..9d512f894 100644 --- a/py4DSTEM/process/diffraction/crystal.py +++ b/py4DSTEM/process/diffraction/crystal.py @@ -35,7 +35,6 @@ class Crystal: save_ang_file, symmetry_reduce_directions, orientation_map_to_orix_CrystalMap, - save_ang_file, ) from py4DSTEM.process.diffraction.crystal_viz import ( @@ -520,41 +519,31 @@ def from_unitcell_parameters( elif lattice_type == "hexagonal": assert ( len(latt_params) == 2 - ), "2 lattice parametere are expected for hexagonal: a, c, but given {len(latt_params)}".format( - len(latt_params) - ) + ), f"2 lattice parametere are expected for hexagonal: a, c, but given {len(latt_params)}" lattice = mg.core.Lattice.hexagonal(latt_params[0], latt_params[1]) elif lattice_type == "tetragonal": assert ( len(latt_params) == 2 - ), "2 lattice parametere are expected for tetragonal: a, c, but given {len(latt_params)}".format( - len(latt_params) - ) + ), f"2 lattice parametere are expected for tetragonal: a, c, but given {len(latt_params)}" lattice = mg.core.Lattice.tetragonal(latt_params[0], latt_params[1]) elif lattice_type == "orthorhombic": assert ( len(latt_params) == 3 - ), "3 lattice parametere are expected for orthorhombic: a, b, c, but given {len(latt_params)}".format( - len(latt_params) - ) + ), f"3 lattice parametere are expected for orthorhombic: a, b, c, but given {len(latt_params)}" lattice = mg.core.Lattice.orthorhombic( latt_params[0], latt_params[1], latt_params[2] ) elif lattice_type == "monoclinic": assert ( len(latt_params) == 4 - ), "4 lattice parametere are expected for monoclinic: a, b, c, beta, but given {len(latt_params)}".format( - len(latt_params) - ) + ), f"4 lattice parametere are expected for monoclinic: a, b, c, beta, but given {len(latt_params)}" lattice = mg.core.Lattice.monoclinic( latt_params[0], latt_params[1], latt_params[2], latt_params[3] ) else: assert ( len(latt_params) == 6 - ), "all 6 lattice parametere are expected: a, b, c, alpha, beta, gamma, but given {len(latt_params)}".format( - len(latt_params) - ) + ), f"all 6 lattice parametere are expected: a, b, c, alpha, beta, gamma, but given {len(latt_params)}" lattice = mg.core.Lattice.from_parameters( latt_params[0], latt_params[1], @@ -750,9 +739,6 @@ def generate_diffraction_pattern( print("Accelerating voltage not set. Assuming 300 keV!") self.setup_diffraction(300e3) - # Tolerance for angular tests - tol = 1e-6 - # Parse orientation inputs if orientation is not None: if ind_orientation is None: @@ -811,9 +797,9 @@ def generate_diffraction_pattern( gy_proj = g_diff[1, keep_int] # Diffracted peak labels - h = hkl[0, keep_int] - k = hkl[1, keep_int] - l = hkl[2, keep_int] + h = hkl[0, keep_int] # noqa: E741 + k = hkl[1, keep_int] # noqa: E741 + l = hkl[2, keep_int] # noqa: E741 # Output as PointList if keep_qz: @@ -906,9 +892,7 @@ def generate_ring_pattern( ) # check accelerating voltage - if hasattr(self, "accel_voltage"): - accelerating_voltage = self.accel_voltage - else: + if not hasattr(self, "accel_voltage"): self.accel_voltage = 300e3 print("Accelerating voltage not set. Assuming 300 keV!") diff --git a/py4DSTEM/process/diffraction/crystal_bloch.py b/py4DSTEM/process/diffraction/crystal_bloch.py index ce8bb8622..343511b88 100644 --- a/py4DSTEM/process/diffraction/crystal_bloch.py +++ b/py4DSTEM/process/diffraction/crystal_bloch.py @@ -1,4 +1,3 @@ -import warnings import numpy as np import numpy.lib.recfunctions as rfn from scipy import linalg diff --git a/py4DSTEM/process/diffraction/crystal_calibrate.py b/py4DSTEM/process/diffraction/crystal_calibrate.py index c068bf79e..e07005468 100644 --- a/py4DSTEM/process/diffraction/crystal_calibrate.py +++ b/py4DSTEM/process/diffraction/crystal_calibrate.py @@ -1,13 +1,13 @@ import numpy as np -from typing import Union, Optional +from typing import Union from scipy.optimize import curve_fit -from py4DSTEM.process.diffraction.utils import Orientation, calc_1D_profile +from py4DSTEM.process.diffraction.utils import calc_1D_profile try: from pymatgen.symmetry.analyzer import SpacegroupAnalyzer from pymatgen.core.structure import Structure -except ImportError: +except (ImportError, ModuleNotFoundError): pass diff --git a/py4DSTEM/process/diffraction/crystal_phase.py b/py4DSTEM/process/diffraction/crystal_phase.py index d28616aa9..a36ad26d1 100644 --- a/py4DSTEM/process/diffraction/crystal_phase.py +++ b/py4DSTEM/process/diffraction/crystal_phase.py @@ -5,8 +5,7 @@ import matplotlib.pyplot as plt from emdfile import tqdmnd, PointListArray -from py4DSTEM.visualize import show, show_image_grid -from py4DSTEM.process.diffraction.crystal_viz import plot_diffraction_pattern +from py4DSTEM.visualize import show_image_grid class Crystal_Phase: diff --git a/py4DSTEM/process/diffraction/crystal_viz.py b/py4DSTEM/process/diffraction/crystal_viz.py index 47df2e6ca..e4097a407 100644 --- a/py4DSTEM/process/diffraction/crystal_viz.py +++ b/py4DSTEM/process/diffraction/crystal_viz.py @@ -1,6 +1,4 @@ import matplotlib.pyplot as plt -from matplotlib.figure import Figure -from matplotlib.axes import Axes import matplotlib.tri as mtri from mpl_toolkits.mplot3d import Axes3D, art3d from mpl_toolkits.mplot3d.art3d import Poly3DCollection @@ -14,7 +12,7 @@ import numpy as np from typing import Union, Optional -from emdfile import tqdmnd, PointList, PointListArray +from emdfile import tqdmnd, PointList from py4DSTEM.process.diffraction.utils import calc_1D_profile diff --git a/py4DSTEM/process/diffraction/flowlines.py b/py4DSTEM/process/diffraction/flowlines.py index 5814a972b..8887d1735 100644 --- a/py4DSTEM/process/diffraction/flowlines.py +++ b/py4DSTEM/process/diffraction/flowlines.py @@ -3,8 +3,6 @@ import numpy as np import matplotlib.pyplot as plt -from matplotlib.figure import Figure -from matplotlib.axes import Axes from scipy.ndimage import gaussian_filter1d from scipy.optimize import curve_fit @@ -12,7 +10,7 @@ from matplotlib.colors import rgb_to_hsv from matplotlib.colors import ListedColormap -from emdfile import tqdmnd, PointList, PointListArray +from emdfile import tqdmnd, PointListArray def make_orientation_histogram( diff --git a/py4DSTEM/process/diffraction/tdesign.py b/py4DSTEM/process/diffraction/tdesign.py index 1a0a81fb6..79edae315 100644 --- a/py4DSTEM/process/diffraction/tdesign.py +++ b/py4DSTEM/process/diffraction/tdesign.py @@ -35,7 +35,9 @@ def tdesign(degree): assert degree <= 21, "Degree must be 21 or less." assert degree >= 1, "Degree should be at least 1." - assert type(degree) is int, "Degree should be an integer." + assert isinstance( + degree, int + ), f"Degree should be an integer, {type(degree)} passed." vecs = _tdesigns[degree - 1] diff --git a/py4DSTEM/process/phase/utils.py b/py4DSTEM/process/phase/utils.py index 98d8fdd49..9191db745 100644 --- a/py4DSTEM/process/phase/utils.py +++ b/py4DSTEM/process/phase/utils.py @@ -298,12 +298,12 @@ def evaluate_chi( alpha = xp.array(alpha) array = xp.zeros(alpha.shape, dtype=np.float32) - if any([p[symbol] != 0.0 for symbol in ("C10", "C12", "phi12")]): + if any(p[symbol] != 0.0 for symbol in ("C10", "C12", "phi12")): array += ( 1 / 2 * alpha2 * (p["C10"] + p["C12"] * xp.cos(2 * (phi - p["phi12"]))) ) - if any([p[symbol] != 0.0 for symbol in ("C21", "phi21", "C23", "phi23")]): + if any(p[symbol] != 0.0 for symbol in ("C21", "phi21", "C23", "phi23")): array += ( 1 / 3 @@ -315,9 +315,7 @@ def evaluate_chi( ) ) - if any( - [p[symbol] != 0.0 for symbol in ("C30", "C32", "phi32", "C34", "phi34")] - ): + if any(p[symbol] != 0.0 for symbol in ("C30", "C32", "phi32", "C34", "phi34")): array += ( 1 / 4 @@ -330,10 +328,8 @@ def evaluate_chi( ) if any( - [ - p[symbol] != 0.0 - for symbol in ("C41", "phi41", "C43", "phi43", "C45", "phi41") - ] + p[symbol] != 0.0 + for symbol in ("C41", "phi41", "C43", "phi43", "C45", "phi41") ): array += ( 1 @@ -348,10 +344,8 @@ def evaluate_chi( ) if any( - [ - p[symbol] != 0.0 - for symbol in ("C50", "C52", "phi52", "C54", "phi54", "C56", "phi56") - ] + p[symbol] != 0.0 + for symbol in ("C50", "C52", "phi52", "C54", "phi54", "C56", "phi56") ): array += ( 1 @@ -1048,7 +1042,7 @@ def fourier_rotate_real_volume(array, angle, axes=(0, 1), xp=np): if len(axes) != 2: raise ValueError("axes should contain exactly two values") - if not all([float(ax).is_integer() for ax in axes]): + if not all(float(ax).is_integer() for ax in axes): raise ValueError("axes should contain only integer values") if axes[0] < 0: diff --git a/py4DSTEM/process/polar/polar_datacube.py b/py4DSTEM/process/polar/polar_datacube.py index 0ba284ada..bc124761d 100644 --- a/py4DSTEM/process/polar/polar_datacube.py +++ b/py4DSTEM/process/polar/polar_datacube.py @@ -1,6 +1,6 @@ import numpy as np from py4DSTEM.datacube import DataCube -from scipy.ndimage import binary_opening, binary_closing, gaussian_filter1d +from scipy.ndimage import gaussian_filter1d class PolarDatacube: diff --git a/py4DSTEM/process/polar/polar_peaks.py b/py4DSTEM/process/polar/polar_peaks.py index 4064fccaf..12dd30de1 100644 --- a/py4DSTEM/process/polar/polar_peaks.py +++ b/py4DSTEM/process/polar/polar_peaks.py @@ -4,7 +4,7 @@ from scipy.ndimage import gaussian_filter, gaussian_filter1d from scipy.signal import peak_prominences from skimage.feature import peak_local_max -from scipy.optimize import curve_fit, leastsq +from scipy.optimize import curve_fit import warnings # from emdfile import tqdmnd, PointList, PointListArray @@ -602,8 +602,8 @@ def refine_peaks_local( self.peaks[rx, ry]["qr"][a0] = p0[2] / q_step self.peaks[rx, ry]["sigma_annular"][a0] = p0[3] / t_step self.peaks[rx, ry]["sigma_radial"][a0] = p0[4] / q_step - - except: + # TODO work out what error is raised + except Exception: pass else: @@ -643,8 +643,8 @@ def refine_peaks_local( self.peaks[rx, ry]["qr"][a0] = p0[2] / q_step self.peaks[rx, ry]["sigma_annular"][a0] = p0[3] / t_step self.peaks[rx, ry]["sigma_radial"][a0] = p0[4] / q_step - - except: + # TODO work out what exception is raised + except Exception: pass @@ -1044,7 +1044,8 @@ def fit_image(basis, *coefs): ), name="peaks_polar", ) - except: + # TODO work out what exception is raised + except Exception: # if fitting has failed, we will still output the last iteration # TODO - add a flag for unconverged fits coefs_peaks = np.reshape(coefs_all[(3 * num_rings + 3) :], (5, num_peaks)).T diff --git a/py4DSTEM/process/rdf/rdf.py b/py4DSTEM/process/rdf/rdf.py index cee7eeee9..d09c8a35f 100644 --- a/py4DSTEM/process/rdf/rdf.py +++ b/py4DSTEM/process/rdf/rdf.py @@ -4,7 +4,7 @@ import numpy as np from scipy.special import erf -from scipy.fftpack import dst, idst +from scipy.fftpack import dst from py4DSTEM.process.utils import single_atom_scatter diff --git a/py4DSTEM/process/utils/multicorr.py b/py4DSTEM/process/utils/multicorr.py index 58c5fc051..6972ed373 100644 --- a/py4DSTEM/process/utils/multicorr.py +++ b/py4DSTEM/process/utils/multicorr.py @@ -99,7 +99,8 @@ def upsampled_correlation(imageCorr, upsampleFactor, xyShift, device="cpu"): ) dx = (icc[2, 1] - icc[0, 1]) / (4 * icc[1, 1] - 2 * icc[2, 1] - 2 * icc[0, 1]) dy = (icc[1, 2] - icc[1, 0]) / (4 * icc[1, 1] - 2 * icc[1, 2] - 2 * icc[1, 0]) - except: + # TODO work out what error is raised IndexError + except Exception: dx, dy = ( 0, 0, diff --git a/py4DSTEM/process/wholepatternfit/wpf.py b/py4DSTEM/process/wholepatternfit/wpf.py index 5d4a4e91f..f14a177fe 100644 --- a/py4DSTEM/process/wholepatternfit/wpf.py +++ b/py4DSTEM/process/wholepatternfit/wpf.py @@ -639,7 +639,7 @@ def _finalize_model(self): self.upper_bound = np.array([param.upper_bound for param in unique_params]) self.lower_bound = np.array([param.lower_bound for param in unique_params]) - self.hasJacobian = all([m.hasJacobian for m in self.model]) + self.hasJacobian = all(m.hasJacobian for m in self.model) self.nParams = self.x0.shape[0] diff --git a/py4DSTEM/process/wholepatternfit/wpf_viz.py b/py4DSTEM/process/wholepatternfit/wpf_viz.py index 436ae40a2..6ad8fbc20 100644 --- a/py4DSTEM/process/wholepatternfit/wpf_viz.py +++ b/py4DSTEM/process/wholepatternfit/wpf_viz.py @@ -1,9 +1,7 @@ -from typing import Optional import numpy as np import matplotlib.pyplot as plt import matplotlib.colors as mpl_c -from matplotlib.gridspec import GridSpec from py4DSTEM.process.wholepatternfit.wp_models import WPFModelType @@ -17,7 +15,7 @@ def show_model_grid(self, x=None, **plot_kwargs): cols = int(np.ceil(np.sqrt(N))) rows = (N + 1) // cols - kwargs = dict(constrained_layout=True) + kwargs = {"constrained_layout": True} kwargs.update(plot_kwargs) fig, ax = plt.subplots(rows, cols, **kwargs) @@ -213,7 +211,7 @@ def show_lattice_points( def show_fit_metrics(self, returnfig=False, **subplots_kwargs): assert hasattr(self, "fit_metrics"), "Please run fitting first!" - kwargs = dict(figsize=(14, 12), constrained_layout=True) + kwargs = {"figsize": (14, 12), "constrained_layout": True} kwargs.update(subplots_kwargs) fig, ax = plt.subplots(2, 2, **kwargs) im = ax[0, 0].matshow(self.fit_metrics["cost"].data, norm=mpl_c.LogNorm()) diff --git a/py4DSTEM/visualize/overlay.py b/py4DSTEM/visualize/overlay.py index 8c6c06d9f..016252bc5 100644 --- a/py4DSTEM/visualize/overlay.py +++ b/py4DSTEM/visualize/overlay.py @@ -23,13 +23,13 @@ def add_rectangles(ax, d): lims = [lims] assert isinstance(lims, list) N = len(lims) - assert all([isinstance(t, tuple) for t in lims]) - assert all([len(t) == 4 for t in lims]) + assert all(isinstance(t, tuple) for t in lims) + assert all(len(t) == 4 for t in lims) # color color = d["color"] if "color" in d.keys() else "r" if isinstance(color, list): assert len(color) == N - assert all([is_color_like(c) for c in color]) + assert all(is_color_like(c) for c in color) else: assert is_color_like(color) color = [color for i in range(N)] @@ -40,7 +40,7 @@ def add_rectangles(ax, d): else: assert isinstance(fill, list) assert len(fill) == N - assert all([isinstance(f, bool) for f in fill]) + assert all(isinstance(f, bool) for f in fill) # alpha alpha = d["alpha"] if "alpha" in d.keys() else 1 if isinstance(alpha, (float, int, np.float64)): @@ -48,7 +48,7 @@ def add_rectangles(ax, d): else: assert isinstance(alpha, list) assert len(alpha) == N - assert all([isinstance(a, (float, int, np.float64)) for a in alpha]) + assert all(isinstance(a, (float, int, np.float64)) for a in alpha) # linewidth linewidth = d["linewidth"] if "linewidth" in d.keys() else 2 if isinstance(linewidth, (float, int, np.float64)): @@ -56,12 +56,12 @@ def add_rectangles(ax, d): else: assert isinstance(linewidth, list) assert len(linewidth) == N - assert all([isinstance(lw, (float, int, np.float64)) for lw in linewidth]) + assert all(isinstance(lw, (float, int, np.float64)) for lw in linewidth) # additional parameters kws = [ k for k in d.keys() if k not in ("lims", "color", "fill", "alpha", "linewidth") ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] @@ -97,8 +97,8 @@ def add_circles(ax, d): center = [center] assert isinstance(center, list) N = len(center) - assert all([isinstance(x, tuple) for x in center]) - assert all([len(x) == 2 for x in center]) + assert all(isinstance(x, tuple) for x in center) + assert all(len(x) == 2 for x in center) # radius assert "R" in d.keys() R = d["R"] @@ -106,12 +106,12 @@ def add_circles(ax, d): R = [R for i in range(N)] assert isinstance(R, list) assert len(R) == N - assert all([isinstance(i, Number) for i in R]) + assert all(isinstance(i, Number) for i in R) # color color = d["color"] if "color" in d.keys() else "r" if isinstance(color, list): assert len(color) == N - assert all([is_color_like(c) for c in color]) + assert all(is_color_like(c) for c in color) else: assert is_color_like(color) color = [color for i in range(N)] @@ -122,7 +122,7 @@ def add_circles(ax, d): else: assert isinstance(fill, list) assert len(fill) == N - assert all([isinstance(f, bool) for f in fill]) + assert all(isinstance(f, bool) for f in fill) # alpha alpha = d["alpha"] if "alpha" in d.keys() else 1 if isinstance(alpha, (float, int, np.float64)): @@ -130,7 +130,7 @@ def add_circles(ax, d): else: assert isinstance(alpha, list) assert len(alpha) == N - assert all([isinstance(a, (float, int, np.float64)) for a in alpha]) + assert all(isinstance(a, (float, int, np.float64)) for a in alpha) # linewidth linewidth = d["linewidth"] if "linewidth" in d.keys() else 2 if isinstance(linewidth, (float, int, np.float64)): @@ -138,14 +138,14 @@ def add_circles(ax, d): else: assert isinstance(linewidth, list) assert len(linewidth) == N - assert all([isinstance(lw, (float, int, np.float64)) for lw in linewidth]) + assert all(isinstance(lw, (float, int, np.float64)) for lw in linewidth) # additional parameters kws = [ k for k in d.keys() if k not in ("center", "R", "color", "fill", "alpha", "linewidth") ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] @@ -194,8 +194,8 @@ def add_annuli(ax, d): assert len(center) == 2 center = [center] * N # assert(isinstance(center,list)) - assert all([isinstance(x, tuple) for x in center]) - assert all([len(x) == 2 for x in center]) + assert all(isinstance(x, tuple) for x in center) + assert all(len(x) == 2 for x in center) # radii if isinstance(radii, tuple): assert len(radii) == 2 @@ -203,17 +203,17 @@ def add_annuli(ax, d): ro = [radii[1] for i in range(N)] else: assert isinstance(radii, list) - assert all([isinstance(x, tuple) for x in radii]) + assert all(isinstance(x, tuple) for x in radii) assert len(radii) == N ri = [radii[i][0] for i in range(N)] ro = [radii[i][1] for i in range(N)] - assert all([isinstance(i, Number) for i in ri]) - assert all([isinstance(i, Number) for i in ro]) + assert all(isinstance(i, Number) for i in ri) + assert all(isinstance(i, Number) for i in ro) # color color = d["color"] if "color" in d.keys() else "r" if isinstance(color, list): assert len(color) == N - assert all([is_color_like(c) for c in color]) + assert all(is_color_like(c) for c in color) else: assert is_color_like(color) color = [color for i in range(N)] @@ -224,7 +224,7 @@ def add_annuli(ax, d): else: assert isinstance(fill, list) assert len(fill) == N - assert all([isinstance(f, bool) for f in fill]) + assert all(isinstance(f, bool) for f in fill) # alpha alpha = d["alpha"] if "alpha" in d.keys() else 1 if isinstance(alpha, (float, int, np.float64)): @@ -232,7 +232,7 @@ def add_annuli(ax, d): else: assert isinstance(alpha, list) assert len(alpha) == N - assert all([isinstance(a, (float, int, np.float64)) for a in alpha]) + assert all(isinstance(a, (float, int, np.float64)) for a in alpha) # linewidth linewidth = d["linewidth"] if "linewidth" in d.keys() else 2 if isinstance(linewidth, (float, int, np.float64)): @@ -240,14 +240,14 @@ def add_annuli(ax, d): else: assert isinstance(linewidth, list) assert len(linewidth) == N - assert all([isinstance(lw, (float, int, np.float64)) for lw in linewidth]) + assert all(isinstance(lw, (float, int, np.float64)) for lw in linewidth) # additional parameters kws = [ k for k in d.keys() if k not in ("center", "radii", "color", "fill", "alpha", "linewidth") ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] @@ -303,7 +303,7 @@ def add_ellipses(ax, d): a = [a] assert isinstance(a, list) N = len(a) - assert all([isinstance(i, Number) for i in a]) + assert all(isinstance(i, Number) for i in a) # semiminor axis length assert "b" in d.keys() b = d["b"] @@ -311,7 +311,7 @@ def add_ellipses(ax, d): b = [b] assert isinstance(b, list) assert len(b) == N - assert all([isinstance(i, Number) for i in b]) + assert all(isinstance(i, Number) for i in b) # center assert "center" in d.keys() center = d["center"] @@ -320,8 +320,8 @@ def add_ellipses(ax, d): center = [center for i in range(N)] assert isinstance(center, list) assert len(center) == N - assert all([isinstance(x, tuple) for x in center]) - assert all([len(x) == 2 for x in center]) + assert all(isinstance(x, tuple) for x in center) + assert all(len(x) == 2 for x in center) # theta assert "theta" in d.keys() theta = d["theta"] @@ -329,12 +329,12 @@ def add_ellipses(ax, d): theta = [theta for i in range(N)] assert isinstance(theta, list) assert len(theta) == N - assert all([isinstance(i, Number) for i in theta]) + assert all(isinstance(i, Number) for i in theta) # color color = d["color"] if "color" in d.keys() else "r" if isinstance(color, list): assert len(color) == N - assert all([is_color_like(c) for c in color]) + assert all(is_color_like(c) for c in color) else: assert is_color_like(color) color = [color for i in range(N)] @@ -345,7 +345,7 @@ def add_ellipses(ax, d): else: assert isinstance(fill, list) assert len(fill) == N - assert all([isinstance(f, bool) for f in fill]) + assert all(isinstance(f, bool) for f in fill) # alpha alpha = d["alpha"] if "alpha" in d.keys() else 1 if isinstance(alpha, (float, int, np.float64)): @@ -353,7 +353,7 @@ def add_ellipses(ax, d): else: assert isinstance(alpha, list) assert len(alpha) == N - assert all([isinstance(alp, (float, int, np.float64)) for alp in alpha]) + assert all(isinstance(alp, (float, int, np.float64)) for alp in alpha) # linewidth linewidth = d["linewidth"] if "linewidth" in d.keys() else 2 if isinstance(linewidth, (float, int, np.float64)): @@ -361,7 +361,7 @@ def add_ellipses(ax, d): else: assert isinstance(linewidth, list) assert len(linewidth) == N - assert all([isinstance(lw, (float, int, np.float64)) for lw in linewidth]) + assert all(isinstance(lw, (float, int, np.float64)) for lw in linewidth) # linestyle linestyle = d["linestyle"] if "linestyle" in d.keys() else "-" if isinstance(linestyle, (str)): @@ -369,7 +369,7 @@ def add_ellipses(ax, d): else: assert isinstance(linestyle, list) assert len(linestyle) == N - assert all([isinstance(lw, (str)) for lw in linestyle]) + assert all(isinstance(lw, (str)) for lw in linestyle) # additional parameters kws = [ k @@ -387,7 +387,7 @@ def add_ellipses(ax, d): "linestyle", ) ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] @@ -454,7 +454,7 @@ def add_points(ax, d): color = d["pointcolor"] if "pointcolor" in d.keys() else "r" if isinstance(color, (list, np.ndarray)): assert len(color) == N - assert all([is_color_like(c) for c in color]) + assert all(is_color_like(c) for c in color) else: assert is_color_like(color) color = [color for i in range(N)] @@ -470,7 +470,7 @@ def add_points(ax, d): for k in d.keys() if k not in ("x", "y", "s", "scale", "pointcolor", "alpha", "open_circles") ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] @@ -521,7 +521,7 @@ def add_pointlabels(ax, d): kws = [ k for k in d.keys() if k not in ("x", "y", "size", "color", "alpha", "labels") ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] @@ -668,7 +668,7 @@ def add_vector(ax, d): "labelcolor", ) ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] @@ -720,7 +720,7 @@ def add_grid_overlay(ax, d): for k in d.keys() if k not in ("x0", "y0", "xL", "yL", "color", "alpha", "linewidth") ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] @@ -817,7 +817,7 @@ def add_scalebar(ax, d): "ticks", ) ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] @@ -956,7 +956,7 @@ def add_cartesian_grid(ax, d): "alpha", ) ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] @@ -1122,7 +1122,7 @@ def add_polarelliptical_grid(ax, d): "alpha", ) ] - kwargs = dict() + kwargs = {} for k in kws: kwargs[k] = d[k] diff --git a/py4DSTEM/visualize/show_extention.py b/py4DSTEM/visualize/show_extention.py index 8fdf522a2..027292ddc 100644 --- a/py4DSTEM/visualize/show_extention.py +++ b/py4DSTEM/visualize/show_extention.py @@ -9,7 +9,7 @@ def _show_grid(**kwargs): # parse grid of images if isinstance(ar[0], list): - assert all([isinstance(ar[i], list) for i in range(len(ar))]) + assert all(isinstance(ar[i], list) for i in range(len(ar))) W = len(ar[0]) H = len(ar) diff --git a/py4DSTEM/visualize/vis_RQ.py b/py4DSTEM/visualize/vis_RQ.py index 85c0eb042..8d111b2b6 100644 --- a/py4DSTEM/visualize/vis_RQ.py +++ b/py4DSTEM/visualize/vis_RQ.py @@ -1,6 +1,5 @@ import numpy as np import matplotlib.pyplot as plt -from matplotlib.axes import Axes from py4DSTEM.visualize.show import show, show_points diff --git a/py4DSTEM/visualize/vis_grid.py b/py4DSTEM/visualize/vis_grid.py index cb2581e01..46548d3c4 100644 --- a/py4DSTEM/visualize/vis_grid.py +++ b/py4DSTEM/visualize/vis_grid.py @@ -289,7 +289,7 @@ def show_image_grid( ) except IndexError: ax.axis("off") - if type(title) == str: + if isinstance(title, str): fig.suptitle(title) if suptitle: fig.suptitle(suptitle) diff --git a/py4DSTEM/visualize/vis_special.py b/py4DSTEM/visualize/vis_special.py index 2db48e371..1ea72abec 100644 --- a/py4DSTEM/visualize/vis_special.py +++ b/py4DSTEM/visualize/vis_special.py @@ -4,18 +4,14 @@ from mpl_toolkits.axes_grid1 import make_axes_locatable from scipy.spatial import Voronoi -from emdfile import PointList from py4DSTEM.visualize import show from py4DSTEM.visualize.overlay import ( add_pointlabels, - add_vector, - add_bragg_index_labels, add_ellipses, add_points, add_scalebar, ) from py4DSTEM.visualize.vis_grid import show_image_grid -from py4DSTEM.visualize.vis_RQ import ax_addaxes, ax_addaxes_QtoR from colorspacious import cspace_convert @@ -616,7 +612,7 @@ def show_selected_dps( assert isinstance(datacube, DataCube) N = len(positions) assert all( - [len(x) == 2 for x in positions] + len(x) == 2 for x in positions ), "Improperly formated argument `positions`" if bragg_pos is not None: show_disk_pos = True @@ -639,7 +635,7 @@ def show_selected_dps( H = int(np.ceil(N / W)) else: H, W = HW - assert all([isinstance(x, (int, np.integer)) for x in (H, W)]) + assert all(isinstance(x, (int, np.integer)) for x in (H, W)) x = [i[0] for i in positions] y = [i[1] for i in positions]