diff --git a/doc/bibliography.bib b/doc/bibliography.bib index 2367613a..2e1e44ef 100644 --- a/doc/bibliography.bib +++ b/doc/bibliography.bib @@ -72,3 +72,21 @@ @phdthesis{lassen1994automated title = {{Automated Determanation of Crystal Orientations from Electron Backscattering Patterns}}, year = {1994} } +@article{chen2015dictionary, + author = {Chen, Yu H. and Park, Se Un and Wei, Dennis and Newstadt, Greg and Jackson, Michael A. and Simmons, Jeff P. and {De Graef}, Marc and Hero, Alfred O.}, + doi = {10.1017/S1431927615000756}, + issn = {14358115}, + journal = {Microscopy and Microanalysis}, + keywords = {EBSD, Von Mises-Fisher mixture distribution, dictionary matching, dynamical electron scattering, electron backscatter diffraction pattern, maximum likelihood orientation estimates}, + number = {3}, + pages = {739–752}, + title = {{A Dictionary Approach to Electron Backscatter Diffraction Indexing}}, + volume = {21}, + year = {2015} +} +@book{goshtasby2012image, + author = {Goshtasby, A Ardeshir}, + publisher = {Springer Science \& Business Media}, + title = {{Image registration: Principles, tools and methods}}, + year = {2012} +} diff --git a/doc/changelog.rst b/doc/changelog.rst index 6c6e5402..b93615d4 100644 --- a/doc/changelog.rst +++ b/doc/changelog.rst @@ -29,10 +29,14 @@ Added (`#236 `_, `#237 `_, `#243 `_) -- Indexing of EBSD patterns through matching of patterns with a static - dictionary of simulated patterns with known orientations. +- Pattern matching of EBSD patterns with a dictionary of pre-computed simulated + patterns with known crystal orientations, and related useful tools (`#231 `_, - `#233 `_) + `#233 `_, + `#234 `_): (1) A framework for + creation of similarity metrics used in pattern matching, (2) computation of an + orientation similarity map from indexing results, and (3) creation of a multi + phase crystal map from single phase maps from pattern matching. - EBSD.xmap property storing an orix CrystalMap object. So far only read from a EMsoft simulated EBSD pattern file. Relevant documentation updated. (`#226 `_) @@ -55,8 +59,8 @@ Added Changed ------- -- The EBSDMasterPattern gets unsettable phase, hemisphere and projection - properties. (`#246 `_) +- The EBSDMasterPattern gets phase, hemisphere and projection properties. + (`#246 `_) - EMsoft EBSD master pattern plugin can read a single energy pattern. Parameter `energy_range` changed to `energy`. (`240 `_) diff --git a/doc/reference.rst b/doc/reference.rst index e1130329..ebfafcfe 100644 --- a/doc/reference.rst +++ b/doc/reference.rst @@ -224,22 +224,22 @@ indexing .. currentmodule:: kikuchipy.indexing .. autosummary:: - pattern_matching + StaticPatternMatching + orientation_similarity_map + merge_crystal_maps similarity_metrics -pattern_matching ----------------- - -.. currentmodule:: kikuchipy.indexing.pattern_matching - -.. autosummary:: - pattern_match - -.. automodule:: kikuchipy.indexing.pattern_matching +.. autoclass:: StaticPatternMatching :members: :undoc-members: :show-inheritance: + .. automethod:: __init__ + .. automethod:: __call__ + +.. autofunction:: orientation_similarity_map +.. autofunction:: merge_crystal_maps + similarity_metrics ------------------ @@ -248,16 +248,21 @@ similarity_metrics .. autosummary:: make_similarity_metric MetricScope + ncc + ndp .. automodule:: kikuchipy.indexing.similarity_metrics :members: :undoc-members: :show-inheritance: +.. autofunction:: ncc +.. autofunction:: ndp + .... io -== +=== .. automodule:: kikuchipy.io @@ -457,6 +462,7 @@ All methods listed here are also available to .. autosummary:: adaptive_histogram_equalization average_neighbour_patterns + match_patterns fft_filter get_decomposition_model get_dynamic_background diff --git a/kikuchipy/conftest.py b/kikuchipy/conftest.py index 6e9a6ca2..fcb677ea 100644 --- a/kikuchipy/conftest.py +++ b/kikuchipy/conftest.py @@ -19,11 +19,12 @@ import gc import os import tempfile +from typing import Tuple from diffpy.structure import Atom, Lattice, Structure from diffsims.crystallography import ReciprocalLatticePoint import numpy as np -from orix.crystal_map import Phase +from orix.crystal_map import CrystalMap, Phase, PhaseList from orix.quaternion.rotation import Rotation from orix.vector import Vector3d, neo_euler import pytest @@ -40,8 +41,8 @@ @pytest.fixture def dummy_signal(): - """Dummy signal of shape <3, 3|3, 3>. If this is changed, all tests - using this signal will fail since they compare the output from + """Dummy signal of shape <(3, 3)|(3, 3)>. If this is changed, all + tests using this signal will fail since they compare the output from methods using this signal (as input) to hard-coded outputs. """ # fmt: off @@ -292,3 +293,59 @@ def nickel_zone_axes(nickel_kikuchi_band, nickel_rotations, pc1): in_pattern=uvw_in_pattern, gnomonic_radius=detector.r_max, ) + + +@pytest.fixture +def rotations(): + return Rotation([(2, 4, 6, 8), (-1, -3, -5, -7)]) + + +@pytest.fixture +def get_single_phase_xmap(rotations): + def _get_single_phase_xmap( + nav_shape, + rotations_per_point=5, + prop_names=["scores", "simulation_indices"], + name="a", + phase_id=0, + ): + d, map_size = _get_spatial_array_dicts(nav_shape) + rot_idx = np.random.choice( + np.arange(rotations.size), map_size * rotations_per_point + ) + data_shape = (map_size,) + if rotations_per_point > 1: + data_shape += (rotations_per_point,) + d["rotations"] = rotations[rot_idx].reshape(*data_shape) + d["phase_id"] = np.ones(map_size) * phase_id + d["phase_list"] = PhaseList(Phase(name=name)) + # Scores and simulation indices + d["prop"] = { + prop_names[0]: np.ones(data_shape, dtype=np.float32), + prop_names[1]: np.arange(np.prod(data_shape)).reshape(data_shape), + } + return CrystalMap(**d) + + return _get_single_phase_xmap + + +def _get_spatial_array_dicts( + nav_shape: Tuple[int, int], step_sizes: Tuple[int, int] = (1.5, 1) +) -> Tuple[dict, int]: + ny, nx = nav_shape + dy, dx = step_sizes + d = {"x": None, "y": None, "z": None} + map_size = 1 + if nx > 1: + if ny > 1: + d["x"] = np.tile(np.arange(nx) * dx, ny) + else: + d["x"] = np.arange(nx) * dx + map_size *= nx + if ny > 1: + if nx > 1: + d["y"] = np.sort(np.tile(np.arange(ny) * dy, nx)) + else: + d["y"] = np.arange(ny) * dy + map_size *= ny + return d, map_size diff --git a/kikuchipy/generators/virtual_bse_generator.py b/kikuchipy/generators/virtual_bse_generator.py index b4274823..6351ee3b 100644 --- a/kikuchipy/generators/virtual_bse_generator.py +++ b/kikuchipy/generators/virtual_bse_generator.py @@ -129,8 +129,8 @@ def get_rgb_image( See Also -------- - kikuchipy.signals.EBSD.plot_virtual_bse_intensity, - kikuchipy.signals.EBSD.get_virtual_bse_intensity, + ~kikuchipy.signals.EBSD.plot_virtual_bse_intensity + ~kikuchipy.signals.EBSD.get_virtual_bse_intensity Notes ----- diff --git a/kikuchipy/indexing/__init__.py b/kikuchipy/indexing/__init__.py index b670fd88..f319217c 100644 --- a/kikuchipy/indexing/__init__.py +++ b/kikuchipy/indexing/__init__.py @@ -16,16 +16,23 @@ # You should have received a copy of the GNU General Public License # along with kikuchipy. If not, see . -"""Indexing of EBSD patterns.""" +"""Tools for indexing of EBSD patterns by comparison to simulated +patterns. -from kikuchipy.indexing.similarity_metrics import ( - make_similarity_metric, - MetricScope, +The EBSD method :meth:`~kikuchipy.signals.EBSD.match_patterns` uses +these tools for pattern matching. +""" + +from kikuchipy.indexing._merge_crystal_maps import merge_crystal_maps +from kikuchipy.indexing.orientation_similarity_map import ( + orientation_similarity_map, ) -from kikuchipy.indexing.pattern_matching import pattern_match +from kikuchipy.indexing import similarity_metrics +from kikuchipy.indexing._static_pattern_matching import StaticPatternMatching __all__ = [ - "make_similarity_metric", - "MetricScope", - "pattern_match", + "merge_crystal_maps", + "orientation_similarity_map", + "similarity_metrics", + "StaticPatternMatching", ] diff --git a/kikuchipy/indexing/_merge_crystal_maps.py b/kikuchipy/indexing/_merge_crystal_maps.py new file mode 100644 index 00000000..e9bc21ba --- /dev/null +++ b/kikuchipy/indexing/_merge_crystal_maps.py @@ -0,0 +1,221 @@ +# -*- coding: utf-8 -*- +# Copyright 2019-2020 The kikuchipy developers +# +# This file is part of kikuchipy. +# +# kikuchipy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# kikuchipy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with kikuchipy. If not, see . + +from math import copysign +from typing import List, Union, Tuple +import warnings + +import numpy as np +from orix.crystal_map import CrystalMap, PhaseList +from orix.quaternion.rotation import Rotation + +from kikuchipy.indexing.similarity_metrics import ( + SimilarityMetric, + _SIMILARITY_METRICS, +) + + +def merge_crystal_maps( + crystal_maps: List[CrystalMap], + mean_n_best: int = 1, + metric: Union[str, SimilarityMetric] = None, + simulation_indices_prop: str = "simulation_indices", + scores_prop: str = "scores", +): + """Merge a list of at least two single phase + :class:`~orix.crystal_map.crystal_map.CrystalMap` with a 1D or 2D + navigation shape into one multi phase map. + + It is required that there are at least as many simulation indices as + scores per point, and that all maps have the same number of + rotations, scores and simulation indices per point. + + Parameters + ---------- + crystal_maps : list of\ + :class:`~orix.crystal_map.crystal_map.CrystalMap` + A list of crystal maps with simulated indices and scores among + their properties. + mean_n_best : int, optional + Number of best metric results to take the mean of before + comparing. Default is 1. + metric : str or SimilarityMetric, optional + Similarity metric, default is None. + simulation_indices_prop : str, optional + Name of simulated indices array in the crystal maps' properties. + Default is "simulation_indices". + scores_prop : str, optional + Name of scores array in the crystal maps' properties. Default + is "scores". + + Returns + ------- + merged_xmap : ~orix.crystal_map.crystal_map.CrystalMap + A crystal map where the rotation of the phase with the best + matching score(s) is assigned to each point. The best matching + simulation indices and scores, merge sorted, are added to its + properties with names equal to whatever passed to `scores_prop` + and `simulation_indices_prop` with "merged" as a suffix, + respectively. + + Notes + ----- + `mean_n_best` can be given with a negative sign if `metric` is not + given, in order to choose the lowest valued metric results. + """ + map_shapes = [xmap.shape for xmap in crystal_maps] + if not np.sum(abs(np.diff(map_shapes, axis=0))) == 0: + raise ValueError("All crystal maps must have the same navigation shape") + + rot_per_point_per_map = [xmap.rotations_per_point for xmap in crystal_maps] + if not all(np.diff(rot_per_point_per_map) == 0): + raise ValueError( + "All crystal maps must have the same number of rotations, scores " + "and simulation indices per point." + ) + + if metric is None: + sign = copysign(1, mean_n_best) + mean_n_best = abs(mean_n_best) + else: + sign = _SIMILARITY_METRICS.get(metric, metric).sign + + # Notation used in the comments below: + # - M: number of map points + # - N: number of scores per point + # - I: number of simulation indices per point + # - K: number of maps to merge + + # Shape of the combined (unsorted) scores array, and the total + # number of scores per point. Shape: (M, N, K) or (M, K) if only one + # score is available (e.g. refined dot products from EMsoft) + (comb_shape, n_scores_per_point) = _get_combined_scores_shape( + crystal_maps=crystal_maps, scores_prop=scores_prop + ) + + # Combined (unsorted) scores array of shape (M, N, K) or (M, K) + combined_scores = np.dstack( + [xmap.prop[scores_prop] for xmap in crystal_maps] + ) + combined_scores = combined_scores.reshape(comb_shape) + + # Best score in each map point + if n_scores_per_point > 1: # (M, N, K) + best_scores = np.mean(combined_scores[:, :mean_n_best], axis=1) + else: # (M, K) + best_scores = combined_scores + + # Phase of best score in each map point + phase_id = np.argmax(sign * best_scores, axis=1) + + # Get the new CrystalMap's rotations, scores and indices, restricted + # to one phase per point (uncombined) + new_rotations = Rotation(np.zeros_like(crystal_maps[0].rotations.data)) + new_scores = np.zeros_like(crystal_maps[0].prop[scores_prop]) + new_indices = np.zeros_like(crystal_maps[0].prop[simulation_indices_prop]) + phase_list = PhaseList() + for i, xmap in enumerate(crystal_maps): + mask = phase_id == i + new_rotations[mask] = xmap.rotations[mask] + new_scores[mask] = xmap.prop[scores_prop][mask] + new_indices[mask] = xmap.prop[simulation_indices_prop][mask] + if np.sum(mask) != 0: + current_id = xmap.phases_in_data.ids[0] + phase = xmap.phases_in_data[current_id].deepcopy() + try: + phase_list.add(phase) + except ValueError: + name = phase.name + warnings.warn( + f"There are duplicates of phase {name}, will therefore " + f"rename this phase's name to {name + str(i)} in the merged" + " PhaseList", + ) + phase.name = name + str(i) + phase_list.add(phase) + + # To get the combined, best, sorted scores and simulation indices + # from all maps (phases), we collapse the second and (potentially) + # third axis to get (M, N * K) or (M, K) + mergesort_shape = (comb_shape[0], np.prod(comb_shape[1:])) + comb_scores_reshaped = combined_scores.reshape(mergesort_shape) + best_sorted_idx = np.argsort( + sign * -comb_scores_reshaped, kind="mergesort", axis=1 + ) + + # Best, sorted scores in all maps (for all phases) per point + merged_best_scores = np.take_along_axis( + comb_scores_reshaped, best_sorted_idx, axis=-1 + ) + + # Combined (unsorted) simulation indices array of shape (M, N, K) or + # (M, K), accounting for the case where there are more simulation + # indices per point than scores (e.g. refined dot products from + # EMsoft) + comb_sim_idx = np.dstack( + [xmap.prop[simulation_indices_prop] for xmap in crystal_maps] + ) + + # To enable calculation of an orientation similarity map from the + # combined, sorted simulation indices array, we must make the + # indices unique across all maps + for i in range(1, comb_sim_idx.shape[-1]): + increment = ( + abs(comb_sim_idx[..., i - 1].max() - comb_sim_idx[..., i].min()) + 1 + ) + comb_sim_idx[..., i] += increment + + # Collapse axes as for the combined scores array above + comb_sim_idx = comb_sim_idx.reshape(mergesort_shape) + + # Best, sorted simulation indices in all maps (for all phases) per + # point + merged_simulated_indices = np.take_along_axis( + comb_sim_idx, best_sorted_idx, axis=-1 + ) + + return CrystalMap( + rotations=new_rotations, + phase_id=phase_id, + phase_list=phase_list, + x=crystal_maps[0].x, + y=crystal_maps[0].y, + z=crystal_maps[0].z, + prop={ + scores_prop: new_scores, + simulation_indices_prop: new_indices, + f"merged_{scores_prop}": merged_best_scores, + f"merged_{simulation_indices_prop}": merged_simulated_indices, + }, + scan_unit=crystal_maps[0].scan_unit, + ) + + +def _get_combined_scores_shape( + crystal_maps: List[CrystalMap], scores_prop: str = "scores" +) -> Tuple[tuple, int]: + xmap = crystal_maps[0] + all_scores_shape = (xmap.size,) + single_scores_shape = xmap.prop[scores_prop].shape + if len(single_scores_shape) == 1: + n_scores_per_point = 1 + else: + n_scores_per_point = single_scores_shape[1] + all_scores_shape += (single_scores_shape[-1],) + all_scores_shape += (len(crystal_maps),) + return all_scores_shape, n_scores_per_point diff --git a/kikuchipy/indexing/pattern_matching.py b/kikuchipy/indexing/_pattern_matching.py similarity index 62% rename from kikuchipy/indexing/pattern_matching.py rename to kikuchipy/indexing/_pattern_matching.py index b896341c..b4644dd9 100644 --- a/kikuchipy/indexing/pattern_matching.py +++ b/kikuchipy/indexing/_pattern_matching.py @@ -18,38 +18,40 @@ """Matching of experimental to simulated gray-tone patterns.""" -import sys -from typing import Union, Tuple +from typing import Optional, Tuple, Union import dask.array as da from dask.diagnostics import ProgressBar import numpy as np +import psutil +from tqdm import tqdm from kikuchipy.indexing.similarity_metrics import ( - SIMILARITY_METRICS, + _SIMILARITY_METRICS, SimilarityMetric, _get_nav_shape, _get_number_of_simulated, ) + # TODO: Support masking signal space # TODO: Support masking navigation space -def pattern_match( +def _pattern_match( experimental: Union[da.Array, np.ndarray], simulated: Union[da.Array, np.ndarray], - keep_n: int = 1, - metric: Union[str, SimilarityMetric] = "zncc", + keep_n: int = 50, + metric: Union[str, SimilarityMetric] = "ncc", compute: bool = True, n_slices: int = 1, + phase_name: Optional[str] = None, ) -> Union[Tuple[np.ndarray, np.ndarray], Tuple[da.Array, da.Array]]: """Find the best matching simulations to experimental data based on given `metric`. Function is primarily for use in - :class:`~kikuchipy.indexing.StaticDictionaryIndexing` and - :class:`~kikuchipy.indexing.DynamicDictionaryIndexing`. + :class:`~kikuchipy.indexing.StaticPatternMatching`. Parameters ---------- @@ -58,15 +60,18 @@ def pattern_match( simulated : numpy.ndarray or dask.array.Array Simulated patterns. keep_n : int, optional - Number of match results to keep for each pattern, by default 1. + Number of match results to keep for each pattern, by default 50. metric : str or SimilarityMetric - Similarity metric, by default "zncc". + Similarity metric, by default "ncc". compute : bool, optional Whether to compute dask arrays before returning, by default True. n_slices : int, optional - Number of simulated slices to process sequentially, by default - 1. + Number of simulated slices to process sequentially. Default is + 1, i.e. the simulated pattern array is not sliced. + phase_name : str, optional + Simulated patterns phase name, shown in the progressbar if + `n_slices` > 1. Returns ------- @@ -76,11 +81,12 @@ def pattern_match( Metric results with data shapes (ny*nx, keep_n). Sorted along `keep_n` axis according to the metric used. """ - metric = SIMILARITY_METRICS.get(metric, metric) + metric = _SIMILARITY_METRICS.get(metric, metric) if not isinstance(metric, SimilarityMetric): raise ValueError( - f"{metric} must be either of {list(SIMILARITY_METRICS.keys())} " - "or an instance of SimilarityMetric. See make_similarity_metric" + f"{metric} must be either of {list(_SIMILARITY_METRICS.keys())} " + "or an instance of SimilarityMetric. See " + "kikuchipy.indexing.similarity_metrics.make_similarity_metric()" ) # Expects signal data to be located on the two last axis for all scopes @@ -99,6 +105,8 @@ def pattern_match( f"{metric.scope} of {type(metric).__name__}" ) + keep_n = min(keep_n, _get_number_of_simulated(simulated)) + if n_slices == 1: return _pattern_match_single_slice( experimental, @@ -119,17 +127,19 @@ def pattern_match( keep_n=keep_n, metric=metric, n_slices=n_slices, + phase_name=phase_name, ) -def _pattern_match_single_slice( +def _pattern_match_slice_simulated( experimental: Union[np.ndarray, da.Array], simulated: Union[np.ndarray, da.Array], keep_n: int, metric: SimilarityMetric, - compute: bool, -) -> Union[Tuple[np.ndarray, np.ndarray], Tuple[da.Array, da.Array]]: - """See :func:`pattern_match`. + n_slices: int = 1, + phase_name: Optional[str] = None, +) -> Tuple[np.ndarray, np.ndarray]: + """See :func:`_pattern_match`. Parameters ---------- @@ -141,55 +151,96 @@ def _pattern_match_single_slice( Number of results to keep. metric : SimilarityMetric Similarity metric. - compute : bool - Whether to compute dask arrays before returning, by default - True. + n_slices : int, optional + Number of simulation slices to process sequentially. Default is + 1 (no slicing). + phase_name : str, optional + Simulated patterns phase name, shown in the progressbar. Returns ------- simulation_indices : numpy.ndarray or dask.array.Array - Simulation indices corresponding with metric results. + Ranked simulation indices corresponding to metric results. scores : numpy.ndarray or dask.array.Array - Metric results with data shapes (ny*nx, keep_n). Sorted along - `keep_n` axis according to the metric used. + Ranked metric results with data shapes (ny*nx, keep_n). """ - similarities = metric(experimental, simulated) - similarities = da.asarray(similarities) + # TODO: Try to respect/utilize chunks when slicing - # ONE_TO_ONE - if similarities.shape == (): - similarity = ( - np.array([similarities.compute()]) if compute else similarities - ) - return np.array([0]), similarity + nav_shape = _get_nav_shape(experimental) + nav_size = int(np.prod(nav_shape)) + num_simulated = _get_number_of_simulated(simulated) + slice_size = num_simulated // n_slices - # If N is < keep_n => keep_n = N - keep_n = min(keep_n, len(simulated)) + n = min(keep_n, num_simulated) + aggregate_shape = (nav_size, n_slices * n) + result_shape = (nav_size, n) + simulated_indices_aggregate = np.zeros(aggregate_shape, np.int32) + scores_aggregate = np.zeros(aggregate_shape, metric._dtype_out) - simulated_indices = similarities.argtopk(metric.sign * keep_n, axis=-1) - scores = similarities.topk(metric.sign * keep_n, axis=-1) + if phase_name is None or phase_name == "": + desc = "Matching patterns" + else: + desc = f"Matching {phase_name[:10]} patterns" + + with tqdm( + iterable=range(n_slices), desc=desc, unit="slice", total=n_slices + ) as t: + start = 0 + for i in range(n_slices): + t.set_postfix_str(f"mem={psutil.virtual_memory().percent}%") + + if i != n_slices - 1: + end = start + slice_size + else: # Last iteration + end = num_simulated + + simulated_indices, scores = _pattern_match_single_slice( + experimental, + simulated[start:end], + keep_n=keep_n, + metric=metric, + compute=False, + ) - if compute: - with ProgressBar(): - simulated_indices, scores = da.compute(simulated_indices, scores) + # Adjust simulation indices matches to correspond with + # original simulated + simulated_indices += start - # Flattens the signal axis if not already flat. - # This is foremost a design choice for returning standard outputs - if not metric.flat: - simulated_indices = simulated_indices.reshape(-1, keep_n) - scores = scores.reshape(-1, keep_n) + da.store( + sources=[simulated_indices, scores], + targets=[simulated_indices_aggregate, scores_aggregate], + regions=[np.s_[:, i * n : (i + 1) * n]] * 2, + ) + + start += slice_size + + # Update progressbar + t.update() + + # TODO: Perform a test to see if memory use in this loop + # would benefit from garbage collection + # gc.collect() + + simulated_indices = np.zeros(result_shape, np.int32) + scores = np.zeros(result_shape, metric._dtype_out) + for i in range(nav_size): + indices = (metric.sign * -scores_aggregate[i]).argsort( + kind="mergesort" + )[:keep_n] + simulated_indices[i] = simulated_indices_aggregate[i][indices] + scores[i] = scores_aggregate[i][indices] return simulated_indices, scores -def _pattern_match_slice_simulated( +def _pattern_match_single_slice( experimental: Union[np.ndarray, da.Array], simulated: Union[np.ndarray, da.Array], keep_n: int, metric: SimilarityMetric, - n_slices: int, -) -> Tuple[np.ndarray, np.ndarray]: - """See :func:`pattern_match`. + compute: bool, +) -> Union[Tuple[np.ndarray, np.ndarray], Tuple[da.Array, da.Array]]: + """See :func:`_pattern_match`. Parameters ---------- @@ -201,70 +252,40 @@ def _pattern_match_slice_simulated( Number of results to keep. metric : SimilarityMetric Similarity metric. - n_slices : int - Number of simulation slices to process sequentially. + compute : bool + Whether to compute dask arrays before returning the results. Returns ------- - simulation_indices : numpy.ndarray - Simulation indices corresponding with metric results. - scores : numpy.ndarray - Sorted metric results. + simulation_indices : numpy.ndarray or dask.array.Array + Ranked simulation indices corresponding to metric results. + scores : numpy.ndarray or dask.array.Array + Ranked metric results with data shapes (ny*nx, keep_n). """ - # This is a naive implementation, hopefully not stupid, of slicing - # the simulated in batches without thinking about aligning with - # dask chunks or rechunking dask seem to handle the sequential - # slicing decently - - nav_shape = _get_nav_shape(experimental) - nav_size = int(np.prod(nav_shape)) - num_simulated = _get_number_of_simulated(simulated) - slice_size = num_simulated // n_slices - - n = min(keep_n, slice_size) - simulated_indices_aggregate = np.zeros((nav_size, n_slices * n), np.int) - scores_aggregate = np.zeros((nav_size, n_slices * n), metric._dtype_out) - - start = 0 - for i in range(n_slices): - end = start + slice_size if i != n_slices - 1 else num_simulated + similarities = metric(experimental, simulated) + similarities = da.asarray(similarities) - simulated_indices, scores = _pattern_match_single_slice( - experimental, - simulated[start:end], - keep_n=keep_n, - metric=metric, - compute=False, + # If MetricScope.ONE_TO_ONE + if similarities.shape == (): + similarity = ( + np.array([similarities.compute()]) if compute else similarities ) + return np.array([0]), similarity - # Adjust simulation indicies matches to correspond with - # original simulated - simulated_indices += start + # keep_n_aggregate: If N is < keep_n => keep_n = N + keep_n = min(keep_n, len(simulated)) - result_slice = np.s_[:, i * n : (i + 1) * n] - with ProgressBar(): - print( - f"Matching patterns, batch {i + 1}/{n_slices}:", file=sys.stdout - ) - da.store( - [simulated_indices, scores], - [ - simulated_indices_aggregate[result_slice], - scores_aggregate[result_slice], - ], - # This should be possible, but do we gain anything? - # regions=(slice(......)) - ) + simulated_indices = similarities.argtopk(metric.sign * keep_n, axis=-1) + scores = similarities.topk(metric.sign * keep_n, axis=-1) - start += slice_size + if compute: + with ProgressBar(): + simulated_indices, scores = da.compute(simulated_indices, scores) - simulated_indices = np.zeros((nav_size, n), np.int32) - scores = np.zeros((nav_size, n), np.float32) - for i in range(nav_size): - indices = (metric.sign * -scores_aggregate[i]).argsort( - kind="mergesort" - )[:keep_n] - simulated_indices[i] = simulated_indices_aggregate[i][indices] - scores[i] = scores_aggregate[i][indices] + # Flattens the signal axis if not already flat. This is foremost a + # design choice for returning standard outputs. + if not metric.flat: + simulated_indices = simulated_indices.reshape(-1, keep_n) + scores = scores.reshape(-1, keep_n) return simulated_indices, scores diff --git a/kikuchipy/indexing/_static_pattern_matching.py b/kikuchipy/indexing/_static_pattern_matching.py new file mode 100644 index 00000000..12c4511d --- /dev/null +++ b/kikuchipy/indexing/_static_pattern_matching.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2019-2020 The kikuchipy developers +# +# This file is part of kikuchipy. +# +# kikuchipy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# kikuchipy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with kikuchipy. If not, see . + +from typing import Union, List + +import numpy as np +from orix.crystal_map import CrystalMap + +from kikuchipy.indexing._merge_crystal_maps import merge_crystal_maps +from kikuchipy.indexing.orientation_similarity_map import ( + orientation_similarity_map, +) +from kikuchipy.indexing._pattern_matching import _pattern_match +from kikuchipy.indexing.similarity_metrics import ( + SimilarityMetric, + _SIMILARITY_METRICS, +) + + +class StaticPatternMatching: + """Pattern matching of experimental patterns to simulated patterns, + of known crystal orientations in pre-computed dictionaries + :cite:`chen2015dictionary,jackson2019dictionary`, for phase and + orientation determination. + """ + + def __init__(self, dictionaries): + """Set up pattern matching with one or more dictionaries of + pre-computed simulated patterns of known crystal orientations. + + Parameters + ---------- + dictionaries : EBSD or list of EBSD + Dictionaries as EBSD signals with a 1D navigation axis and + the `xmap` property with known crystal orientations set. + """ + if not isinstance(dictionaries, list): + dictionaries = [dictionaries] + self.dictionaries = dictionaries + + def __call__( + self, + signal, + metric: Union[str, SimilarityMetric] = "ncc", + keep_n: int = 50, + n_slices: int = 1, + return_merged_crystal_map: bool = False, + get_orientation_similarity_map: bool = False, + ) -> Union[CrystalMap, List[CrystalMap]]: + """Match each experimental pattern to all simulated patterns, of + known crystal orientations in pre-computed dictionaries + :cite:`chen2015dictionary,jackson2019dictionary`, to determine + their phase and orientation. + + A suitable similarity metric, the normalized cross-correlation + (:func:`~kikuchipy.indexing.similarity_metrics.ncc`), is used by + default, but a valid user-defined similarity metric may be used + instead. + + :class:`~orix.crystal_map.crystal_map.CrystalMap`'s for each + dictionary with "scores" and "simulation_indices" as properties + are returned. + + Parameters + ---------- + signal : EBSD + EBSD signal with experimental patterns. + metric : str or SimilarityMetric, optional + Similarity metric, by default "ncc" (normalized + cross-correlation). + keep_n : int, optional + Number of best matches to keep, by default 50 or the number + of simulated patterns if fewer than 50 are available. + n_slices : int, optional + Number of simulation slices to process sequentially, by + default 1 (no slicing). + return_merged_crystal_map : bool, optional + Whether to return a merged crystal map, the best matches + determined from the similarity scores, in addition to the + single phase maps. By default False. + get_orientation_similarity_map : bool, optional + Add orientation similarity maps to the returned crystal + maps' properties named "osm". By default False. + + Returns + ------- + xmaps : :class:`~orix.crystal_map.crystal_map.CrystalMap` or \ + list of \ + :class:`~orix.crystal_map.crystal_map.CrystalMap` + A crystal map for each dictionary loaded and one merged map + if `return_merged_crystal_map = True`. + + Notes + ----- + Merging of crystal maps and calculations of orientation + similarity maps can be done afterwards with + :func:`~kikuchipy.indexing.merge_crystal_maps` and + :func:`~kikuchipy.indexing.orientation_similarity_map`, + respectively. + + See Also + -------- + ~kikuchipy.indexing.similarity_metrics.make_similarity_metric + ~kikuchipy.indexing.similarity_metrics.ndp + """ + # This needs a rework before sent to cluster and possibly more + # automatic slicing with dask + n_simulations = max( + [d.axes_manager.navigation_size for d in self.dictionaries] + ) + good_number = 13500 + if (n_simulations // n_slices) > good_number: + answer = input( + "You should probably increase n_slices depending on your " + f"available memory, try above {n_simulations // good_number}." + " Do you want to proceed? [y/n]" + ) + if answer != "y": + return + + # Get metric from optimized metrics if it is available, or + # return the metric if it is not + metric = _SIMILARITY_METRICS.get(metric, metric) + + axes_manager = signal.axes_manager + spatial_arrays = _get_spatial_arrays( + shape=axes_manager.navigation_shape, + extent=axes_manager.navigation_extent, + step_sizes=[i.scale for i in axes_manager.navigation_axes], + ) + n_nav_dims = axes_manager.navigation_dimension + if n_nav_dims == 0: + xmap_kwargs = dict() + elif n_nav_dims == 1: + scan_unit = axes_manager.navigation_axes[0].units + xmap_kwargs = dict(x=spatial_arrays, scan_unit=scan_unit) + else: # 2d + scan_unit = axes_manager.navigation_axes[0].units + xmap_kwargs = dict( + x=spatial_arrays[0], y=spatial_arrays[1], scan_unit=scan_unit, + ) + + keep_n = min([keep_n] + [d.xmap.size for d in self.dictionaries]) + + # Naively let dask compute them seperately, should try in the + # future combined compute for better performance + xmaps = [] + patterns = signal.data + for dictionary in self.dictionaries: + simulation_indices, scores = _pattern_match( + patterns, + dictionary.data, + metric=metric, + keep_n=keep_n, + n_slices=n_slices, + phase_name=dictionary.xmap.phases_in_data.names[0], + ) + new_xmap = CrystalMap( + rotations=dictionary.xmap.rotations[simulation_indices], + phase_list=dictionary.xmap.phases_in_data, + prop={ + "scores": scores, + "simulation_indices": simulation_indices, + }, + **xmap_kwargs, + ) + xmaps.append(new_xmap) + + # Create a merged CrystalMap using best metric result across all + # dictionaries + if return_merged_crystal_map and len(self.dictionaries) > 1: + xmap_merged = merge_crystal_maps(xmaps, metric=metric) + xmaps.append(xmap_merged) + + # Compute orientation similarity map + if get_orientation_similarity_map: + for xmap in xmaps: + osm = orientation_similarity_map(xmap, n_best=keep_n) + xmap.prop["osm"] = osm.flatten() + + if len(xmaps) == 1: + xmaps = xmaps[0] + + return xmaps + + +def _get_spatial_arrays( + shape: tuple, extent: tuple, step_sizes: tuple +) -> Union[tuple, np.ndarray]: + n_nav_dims = len(shape) + if n_nav_dims == 0: + return () + if n_nav_dims == 1: + x0, x1 = extent + dx = step_sizes[0] + return np.arange(x0, x1 + dx, dx) + else: + x0, x1, y0, y1 = extent + dx, dy = step_sizes + x = np.tile(np.arange(x0, x1 + dx, dx), shape[1]) + y = np.tile(np.arange(y0, y1 + dy, dy), shape[0]) + return x, y diff --git a/kikuchipy/indexing/orientation_similarity_map.py b/kikuchipy/indexing/orientation_similarity_map.py new file mode 100644 index 00000000..615859e0 --- /dev/null +++ b/kikuchipy/indexing/orientation_similarity_map.py @@ -0,0 +1,155 @@ +# -*- coding: utf-8 -*- +# Copyright 2019-2020 The kikuchipy developers +# +# This file is part of kikuchipy. +# +# kikuchipy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# kikuchipy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with kikuchipy. If not, see . + +"""Compute an orientation similarity map, where the ranked list of the +array indices of the best matching simulated patterns in one map point +is compared to the corresponding lists in the nearest neighbour points. +""" + +# TODO: Consider moving to orix. + +import numpy as np +from scipy.ndimage import generic_filter + + +def orientation_similarity_map( + xmap, + n_best: int = None, + simulation_indices_prop: str = "simulation_indices", + normalize: bool = True, + from_n_best: int = None, + footprint: np.ndarray = None, + center_index: int = 2, +) -> np.ndarray: + r"""Compute an orientation similarity map following + :cite:`marquardt2017quantitative`, where the ranked list of the + array indices of the best matching simulated patterns in one point + is compared to the corresponding lists in the nearest neighbour + points. + + Parameters + ---------- + xmap : ~orix.crystal_map.crystal_map.CrystalMap + A crystal map with a ranked list of the array indices of the + best matching simulated patterns among its properties. + n_best : int, optional + Number of ranked indices to compare. If None (default), all + indices are compared. + simulation_indices_prop : str, optional + Name of simulated indices array in the crystal maps' properties. + Default is "simulation_indices". + normalize : bool, optional + Whether to normalize the number of equal indices to the range + [0, 1], by default True. + from_n_best : int, optional + Return an OSM for each n in the range [`from_n_best`, `n_best`]. + If None (default), only the OSM for `n_best` indices is + returned. + footprint : numpy.ndarray, optional + Boolean 2D array specifying which neighbouring points to compare + lists with, by default the four nearest neighbours. + center_index : int, optional + Flat index of central navigation point in the truthy values of + footprint, by default 2. + + Returns + ------- + osm : numpy.ndarray + Orientation similarity map(s). If `from_n_best` is not None, + the returned array has three dimensions, where `n_best` is at + array[:, :, 0] and `from_n_best` at array[:, :, -1]. + + Notes + ----- + If the set :math:`S_{r,c}` is the ranked list of best matching + indices for a given point :math:`(r,c)`, then the orientation + similarity index :math:`\eta_{r,c}` is the average value of the + cardinalities (\#) of the intersections with the neighbouring sets + + .. math:: + + \eta_{r,c} = \frac{1}{4} + \left( + \#(S_{r,c} \cap S_{r-1,c}) + + \#(S_{r,c} \cap S_{r+1,c}) + + \#(S_{r,c} \cap S_{r,c-1}) + + \#(S_{r,c} \cap S_{r,c+1}) + \right). + """ + simulation_indices = xmap.prop[simulation_indices_prop] + nav_size, keep_n = simulation_indices.shape + + if n_best is None: + n_best = keep_n + elif n_best > keep_n: + raise ValueError( + f"n_best {n_best} cannot be greater than keep_n {keep_n}" + ) + + data_shape = xmap.shape + flat_index_map = np.arange(nav_size).reshape(data_shape) + + if from_n_best is None: + from_n_best = n_best + + osm = np.zeros(data_shape + (n_best - from_n_best + 1,), dtype=np.float32) + + if footprint is None: + footprint = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + + for i, n in enumerate(range(n_best, from_n_best - 1, -1)): + match_indicies = simulation_indices[:, :n] + osm[:, :, i] = generic_filter( + flat_index_map, + lambda v: _orientation_similarity_per_pixel( + v, center_index, match_indicies, n, normalize, + ), + footprint=footprint, + mode="constant", + cval=-1, + output=np.float32, + ) + + return osm.squeeze() + + +def _orientation_similarity_per_pixel( + v: np.ndarray, + center_index: int, + match_indices: np.ndarray, + n: int, + normalize: bool, +) -> np.ndarray: + # v are indices picked out with the footprint from flat_index_map + v = v.astype(np.int) + center_value = v[center_index] + # Filter only true neighbours, -1 out of image and not include itself + neighbours = v[np.where((v != -1) & (v != center_value))] + + # Cardinality of the intersection between a and b + number_of_equal_matches_to_its_neighbours = [ + len(np.intersect1d(match_indices[center_value], mi)) + for mi in match_indices[neighbours] + ] + + os = np.mean(number_of_equal_matches_to_its_neighbours) + + if normalize: + os /= n + + return os diff --git a/kikuchipy/indexing/similarity_metrics.py b/kikuchipy/indexing/similarity_metrics.py index 8f525582..fef3f34d 100644 --- a/kikuchipy/indexing/similarity_metrics.py +++ b/kikuchipy/indexing/similarity_metrics.py @@ -31,9 +31,11 @@ class MetricScope(Enum): """ MANY_TO_MANY = "many_to_many" + SOME_TO_MANY = "some_to_many" ONE_TO_MANY = "one_to_many" ONE_TO_ONE = "one_to_one" MANY_TO_ONE = "many_to_one" + SOME_TO_ONE = "some_to_one" def make_similarity_metric( @@ -48,10 +50,8 @@ def make_similarity_metric( equal size. This factory function wraps metric functions for use in - :func:`~kikuchipy.indexing.pattern_matching.pattern_match`, which - again is used by - :class:`~kikuchipy.indexing.StaticDictionaryIndexing` and - :class:`~kikuchipy.indexing.DynamicDictionaryIndexing`. + :meth:`~kikuchipy.signals.EBSD.match_patterns` (which uses + :class:`~kikuchipy.indexing.StaticPatternMatching`). Parameters ---------- @@ -96,16 +96,27 @@ def make_similarity_metric( ============ ============= ========= ========= ============= ========= ========= MetricScope flat = False flat = True ------------ --------------------------------- --------------------------------- - \- experimental simulated returns experimental simulated returns + - experimental simulated returns experimental simulated returns ============ ============= ========= ========= ============= ========= ========= MANY_TO_MANY (ny,nx,sy,sx) (N,sy,sx) (ny,nx,N) (ny*nx,sy*sx) (N,sy*sx) (ny*nx,N) + SOME_TO_MANY (nx,sy,sx) (N,sy,sx) (nx,N) - - - ONE_TO_MANY (sy,sx) (N,sy,sx) (N,) (sy*sx,) (N,sy*sx) (N,) MANY_TO_ONE (ny,nx,sy,sx) (sy,sx) (ny,nx) (ny*nx,sy*sx) (sy*sx,) (ny*nx) + SOME_TO_ONE (nx,sy,sx) (sy,sx) (nx,) - - - ONE_TO_ONE (sy,sx) (sy,sx) (1,) (sy*sx,) (sy*sx,) (1,) ============ ============= ========= ========= ============= ========= ========= + + If a scope of `SOME_TO_MANY` or `SOME_TO_ONE` and `flat=True` is + desired, the returned similarity metric has the scope `MANY_TO_MANY` + or `MANY_TO_ONE`, respectively. """ sign = 1 if greater_is_better else -1 if flat: + if "some" in scope.value: + if "many" in scope.value: + scope = MetricScope.MANY_TO_MANY + else: # "one" in scope.value + scope = MetricScope.MANY_TO_ONE return FlatSimilarityMetric( metric_func, sign, @@ -129,36 +140,43 @@ class SimilarityMetric: """Similarity metric between 2D gray-tone patterns.""" # See table in docstring of `make_similarity_metric` - # TODO: Support for 1D navigation shape _EXPT_SIM_NDIM_TO_SCOPE = { (4, 3): MetricScope.MANY_TO_MANY, + (3, 3): MetricScope.SOME_TO_MANY, (2, 3): MetricScope.ONE_TO_MANY, (4, 2): MetricScope.MANY_TO_ONE, + (3, 2): MetricScope.SOME_TO_ONE, (2, 2): MetricScope.ONE_TO_ONE, } _SCOPE_TO_EXPT_SIM_NDIM = { MetricScope.MANY_TO_MANY: (4, 3), + MetricScope.SOME_TO_MANY: (3, 3), MetricScope.ONE_TO_MANY: (2, 3), MetricScope.MANY_TO_ONE: (4, 2), + MetricScope.SOME_TO_ONE: (3, 2), MetricScope.ONE_TO_ONE: (2, 2), } _SCOPE_TO_LOWER_SCOPES = { MetricScope.MANY_TO_MANY: ( - MetricScope.MANY_TO_ONE, + MetricScope.SOME_TO_MANY, MetricScope.ONE_TO_MANY, + MetricScope.MANY_TO_ONE, + MetricScope.SOME_TO_ONE, MetricScope.ONE_TO_ONE, ), - MetricScope.ONE_TO_MANY: ( + MetricScope.SOME_TO_MANY: ( MetricScope.ONE_TO_MANY, + MetricScope.SOME_TO_ONE, MetricScope.ONE_TO_ONE, ), - MetricScope.ONE_TO_ONE: (), + MetricScope.ONE_TO_MANY: (MetricScope.ONE_TO_ONE,), MetricScope.MANY_TO_ONE: ( - MetricScope.MANY_TO_ONE, + MetricScope.SOME_TO_ONE, MetricScope.ONE_TO_ONE, ), + MetricScope.ONE_TO_ONE: (), } def __init__( @@ -246,8 +264,9 @@ def _is_compatible(self, expt_ndim: int, sim_ndim: int) -> bool: class FlatSimilarityMetric(SimilarityMetric): - """Similarity metric between 2D gray-tone images where the images - are flattened before sent to `metric_func`. + """Similarity metric between 2D gray-tone images where the + navigation and signal axes are flattened before sent to + `metric_func`. """ # See table in docstring of `make_similarity_metric` @@ -361,8 +380,9 @@ def _zero_mean( """ squeeze = 1 not in expt.shape + sim.shape expt, sim = _expand_dims_to_many_to_many(expt, sim, flat) - expt_mean_axis = 1 if flat else (2, 3) - sim_mean_axis = 1 if flat else (1, 2) + # Always take the mean along the last two axes (signal axes) + expt_mean_axis = 1 if flat else (-2, -1) + sim_mean_axis = 1 if flat else (-2, -1) expt -= expt.mean(axis=expt_mean_axis, keepdims=True) sim -= sim.mean(axis=sim_mean_axis, keepdims=True) @@ -381,24 +401,25 @@ def _normalize( Parameters ---------- - expt : np.ndarray or da.Array + expt : numpy.ndarray or dask.array.Array Experimental patterns. - sim : np.ndarray or da.Array + sim : numpy.ndarray or dask.array.Array Simulated patterns. flat : bool, optional Whether `expt` and `sim` are flattened, by default False. Returns ------- - expt + expt : numpy.ndarray or dask.array.Array Experimental patterns divided by their L2 norms. - sim + sim : numpy.ndarray or dask.array.Array Simulated patterns divided by their L2 norms. """ squeeze = 1 not in expt.shape + sim.shape expt, sim = _expand_dims_to_many_to_many(expt, sim, flat) - expt_sum_axis = 1 if flat else (2, 3) - sim_sum_axis = 1 if flat else (1, 2) + # Always take the sum along the last two axes (signal axes) + expt_sum_axis = 1 if flat else (-2, -1) + sim_sum_axis = 1 if flat else (-2, -1) expt /= (expt ** 2).sum(axis=expt_sum_axis, keepdims=True) ** 0.5 sim /= (sim ** 2).sum(axis=sim_sum_axis, keepdims=True) ** 0.5 @@ -412,79 +433,121 @@ def _zncc_einsum( experimental: Union[da.Array, np.ndarray], simulated: Union[da.Array, np.ndarray], ) -> Union[np.ndarray, da.Array]: - """Compute (lazily) the zero-mean normalized cross-correlation - coefficient between experimental and simulated patterns. - - Parameters - ---------- - experimental - Experimental patterns. - simulated - Simulated patterns. - - Returns - ------- - zncc - Correlation coefficients in range [-1, 1] for all comparisons, - as :class:`np.ndarray` if both `experimental` and `simulated` - are :class:`np.ndarray`, else :class:`da.Array`. - - Notes - ----- - Equivalent results are obtained with :func:`dask.Array.tensordot` - with the `axes` argument `axes=([2, 3], [1, 2]))`. - """ experimental, simulated = _zero_mean(experimental, simulated) experimental, simulated = _normalize(experimental, simulated) - zncc = da.einsum("ijkl,mkl->ijm", experimental, simulated, optimize=True) + r = da.einsum("ijkl,mkl->ijm", experimental, simulated, optimize=True) if isinstance(experimental, np.ndarray) and isinstance( simulated, np.ndarray ): - return zncc.compute() + return r.compute() else: - return zncc + return r def _ndp_einsum( experimental: Union[da.Array, np.ndarray], simulated: Union[da.Array, np.ndarray], ) -> Union[np.ndarray, da.Array]: - """Compute the normalized dot product between experimental and - simulated patterns. + experimental, simulated = _normalize(experimental, simulated) + rho = da.einsum("ijkl,mkl->ijm", experimental, simulated, optimize=True) + if isinstance(experimental, np.ndarray) and isinstance( + simulated, np.ndarray + ): + return rho.compute() + else: + return rho + + +ncc = make_similarity_metric( + metric_func=_zncc_einsum, + scope=MetricScope.MANY_TO_MANY, + make_compatible_to_lower_scopes=True, +) +ncc.__doc__ = r""" + A similarity metric for calculation of the normalized + cross-correlation coefficient (NCC) `r` :cite:`goshtasby2012image` + between experimental and simulated patterns. + + Parameters + ---------- + experimental : numpy.ndarray or dask.array.Array + Experimental patterns. + simulated : numpy.ndarray or dask.array.Array + Simulated patterns. + + Returns + ------- + r : numpy.ndarray or dask.array.Array + Correlation coefficients in range [-1, 1] for all comparisons, + as :class:`numpy.ndarray` if both `experimental` and `simulated` + are :class:`numpy.ndarray`, else :class:`dask.array.Array`. + + Notes + ----- + The NCC, or Pearson Correlation Coefficient, is defined as + + .. math:: + + r = \frac + {\sum^n_{i=1}(x_i - \bar{x})(y_i - \bar{y})} + { + \sqrt{\sum ^n _{i=1}(x_i - \bar{x})^2} + \sqrt{\sum ^n _{i=1}(y_i - \bar{y})^2} + }, + + where experimental patterns :math:`x` and simulated patterns + :math:`y` are centered by subtracting out the mean of each pattern, + and the sum of cross-products of the centered patterns is + accumulated. The denominator adjusts the scales of the patterns to + have equal units. + + Equivalent results are obtained with :func:`dask.array.tensordot` + with ``axes=([2, 3], [1, 2]))`` for 4D and 3D experimental and + simulated data sets, respectively. +""" + + +ndp = make_similarity_metric( + metric_func=_ndp_einsum, + scope=MetricScope.MANY_TO_MANY, + make_compatible_to_lower_scopes=True, +) +ndp.__doc__ = r""" + A similarity metric for calculation of the normalized dot product + (NDP) :math:`\rho` :cite:`chen2015dictionary` between experimental + and simulated patterns. Parameters ---------- - experimental + experimental : numpy.ndarray or dask.array.Array Experimental patterns. - simulated + simulated : numpy.ndarray or dask.array.Array Simulated patterns. Returns ------- - ndp + rho : numpy.ndarray or dask.array.Array Normalized dot products in range [0, 1] for all comparisons, - as :class:`np.ndarray` if both `experimental` and `simulated` - are :class:`np.ndarray`, else :class:`da.Array`. - """ - experimental, simulated = _normalize(experimental, simulated) - ndp = da.einsum("ijkl,mkl->ijm", experimental, simulated, optimize=True) - if isinstance(experimental, np.ndarray) and isinstance( - simulated, np.ndarray - ): - return ndp.compute() - else: - return ndp - - -SIMILARITY_METRICS = { - "zncc": make_similarity_metric( - metric_func=_zncc_einsum, - scope=MetricScope.MANY_TO_MANY, - make_compatible_to_lower_scopes=True, - ), - "ndp": make_similarity_metric( - metric_func=_ndp_einsum, - scope=MetricScope.MANY_TO_MANY, - make_compatible_to_lower_scopes=True, - ), + as :class:`numpy.ndarray` if both `experimental` and `simulated` + are :class:`numpy.ndarray`, else :class:`dask.array.Array`. + + Notes + ----- + The NDP is defined as + + .. math:: + + \rho = \frac + {\langle \mathbf{X}, \mathbf{Y} \rangle} + {||\mathbf{X}|| \cdot ||\mathbf{Y}||}, + + where :math:`{\langle \mathbf{X}, \mathbf{Y} \rangle}` is the dot + (inner) product of the pattern vectors :math:`\mathbf{X}` and + :math:`\mathbf{Y}`. +""" + + +_SIMILARITY_METRICS = { + "ncc": ncc, + "ndp": ndp, } diff --git a/kikuchipy/indexing/tests/test_merge_crystal_maps.py b/kikuchipy/indexing/tests/test_merge_crystal_maps.py new file mode 100644 index 00000000..ca1765ea --- /dev/null +++ b/kikuchipy/indexing/tests/test_merge_crystal_maps.py @@ -0,0 +1,417 @@ +# -*- coding: utf-8 -*- +# Copyright 2019-2020 The kikuchipy developers +# +# This file is part of kikuchipy. +# +# kikuchipy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# kikuchipy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with kikuchipy. If not, see . + +import numpy as np +import pytest + +from kikuchipy.indexing._merge_crystal_maps import merge_crystal_maps +from kikuchipy.indexing.similarity_metrics import make_similarity_metric + + +class TestMergeCrystalMaps: + @pytest.mark.parametrize( + "map_shape, rot_per_point, phase_names", + [ + ((0, 3), 10, ["a", "b"]), + ((0, 4), 1, ["a", "b", "c"]), + ((3, 0), 5, ["austenite", "ferrite"]), + ((4, 0), 1, ["al", "cu", "si"]), + ], + ) + def test_merge_crystal_maps_1d( + self, get_single_phase_xmap, map_shape, rot_per_point, phase_names + ): + """Crystal maps with a 1D navigation shape can be merged + successfully and yields an expected output. + """ + n_phases = len(phase_names) + scores_prop, sim_idx_prop = "scores", "sim_idx" + + map_size = np.sum(map_shape) + data_shape = (map_size,) + if rot_per_point > 1: + data_shape += (rot_per_point,) + + desired_phase_ids = np.zeros(map_size) + desired_scores = np.ones(data_shape) + desired_idx = np.arange(np.prod(data_shape)).reshape(data_shape) + + xmaps = [] + xmap_args = (map_shape, rot_per_point, [scores_prop, sim_idx_prop]) + phase_ids = np.arange(n_phases) + for i in range(n_phases): + xmap = get_single_phase_xmap( + *xmap_args, phase_names[i], phase_ids[i] + ) + # All maps have at least one point with the best score + xmap[i].prop[scores_prop] += i + 1 + xmaps.append(xmap) + + desired_phase_ids[i] = i + desired_scores[i] = xmap[i].prop[scores_prop] + desired_idx[i] = xmap[i].prop[sim_idx_prop] + + if i == 0: + desired_rot = xmap.rotations.data + else: + desired_rot[i] = xmap[i].rotations.data + + merged_xmap = merge_crystal_maps( + crystal_maps=xmaps, + scores_prop=scores_prop, + simulation_indices_prop=sim_idx_prop, + ) + + assert merged_xmap.shape == xmaps[0].shape + assert merged_xmap.size == xmaps[0].size + for v1, v2 in zip( + merged_xmap._coordinates.values(), xmaps[0]._coordinates.values() + ): + if v1 is None: + assert v1 is v2 + else: + np.allclose(v1, v2) + + assert np.allclose(merged_xmap.phase_id, desired_phase_ids) + assert np.allclose(merged_xmap.prop[scores_prop], desired_scores) + assert np.allclose(merged_xmap.prop[sim_idx_prop], desired_idx) + assert np.allclose(merged_xmap.rotations.data, desired_rot) + + desired_merged_shapes = (map_size, rot_per_point * n_phases) + assert ( + merged_xmap.prop[f"merged_{scores_prop}"].shape + == desired_merged_shapes + ) + assert ( + merged_xmap.prop[f"merged_{sim_idx_prop}"].shape + == desired_merged_shapes + ) + + @pytest.mark.parametrize( + "map_shape, rot_per_point, phase_names, mean_n_best", + [ + ((4, 3), 10, ["a", "b"], 5), + ((5, 4), 1, ["a", "b", "c"], 1), + ((3, 4), 5, ["austenite", "ferrite"], 4), + ((4, 5), 1, ["al", "cu", "si"], 1), + ], + ) + def test_merge_crystal_maps_2d( + self, + get_single_phase_xmap, + map_shape, + rot_per_point, + phase_names, + mean_n_best, + ): + """Crystal maps with a 2D navigation shape can be merged + successfully and yields an expected output. + """ + n_phases = len(phase_names) + scores_prop, sim_idx_prop = "scores", "sim_idx" + + map_size = np.prod(map_shape) + data_shape = (map_size,) + if rot_per_point > 1: + data_shape += (rot_per_point,) + + desired_phase_ids = np.zeros(map_size) + desired_scores = np.ones(data_shape) + desired_idx = np.arange(np.prod(data_shape)).reshape(data_shape) + + xmaps = [] + xmap_args = (map_shape, rot_per_point, [scores_prop, sim_idx_prop]) + phase_ids = np.arange(n_phases) + ny, nx = map_shape + for i in range(n_phases): + xmap = get_single_phase_xmap( + *xmap_args, phase_names[i], phase_ids[i] + ) + # All maps have at least one point with the best score along + # the map diagonal + idx = (i, i) + xmap[idx].prop[scores_prop] += i + 1 + xmaps.append(xmap) + + j = i * (1 + nx) + desired_phase_ids[j] = i + desired_scores[j] = xmap[idx].prop[scores_prop] + desired_idx[j] = xmap[idx].prop[sim_idx_prop] + + if i == 0: + desired_rot = xmap.rotations.data + else: + desired_rot[j] = xmap[idx].rotations.data + + merged_xmap = merge_crystal_maps( + crystal_maps=xmaps, + mean_n_best=mean_n_best, + scores_prop=scores_prop, + simulation_indices_prop=sim_idx_prop, + ) + + assert merged_xmap.shape == xmaps[0].shape + assert merged_xmap.size == xmaps[0].size + for v1, v2 in zip( + merged_xmap._coordinates.values(), xmaps[0]._coordinates.values() + ): + if v1 is None: + assert v1 is v2 + else: + np.allclose(v1, v2) + + assert np.allclose(merged_xmap.phase_id, desired_phase_ids) + assert np.allclose(merged_xmap.prop[scores_prop], desired_scores) + assert np.allclose(merged_xmap.prop[sim_idx_prop], desired_idx) + assert np.allclose(merged_xmap.rotations.data, desired_rot) + + desired_merged_shapes = (map_size, rot_per_point * n_phases) + assert ( + merged_xmap.prop[f"merged_{scores_prop}"].shape + == desired_merged_shapes + ) + assert ( + merged_xmap.prop[f"merged_{sim_idx_prop}"].shape + == desired_merged_shapes + ) + + @pytest.mark.parametrize( + "scores_prop, sim_idx_prop", + [("scores", "sim_idx"), ("similar", "simulated")], + ) + def test_property_names( + self, get_single_phase_xmap, scores_prop, sim_idx_prop + ): + """Passing scores and simulation indices property names returns + expected properties in merged map. + """ + map_shape = (5, 6) + rot_per_point = 50 + + xmap1 = get_single_phase_xmap( + map_shape, rot_per_point, [scores_prop, sim_idx_prop], "a", 0 + ) + xmap2 = get_single_phase_xmap( + map_shape, rot_per_point, [scores_prop, sim_idx_prop], "b", 1 + ) + + xmap2[3, 3].prop[scores_prop] = 2 + merged_xmap = merge_crystal_maps( + crystal_maps=[xmap1, xmap2], + scores_prop=scores_prop, + simulation_indices_prop=sim_idx_prop, + ) + + assert scores_prop in merged_xmap.prop.keys() + assert sim_idx_prop in merged_xmap.prop.keys() + + desired_merged_shapes = (np.prod(map_shape), rot_per_point * 2) + assert ( + merged_xmap.prop[f"merged_{scores_prop}"].shape + == desired_merged_shapes + ) + assert ( + merged_xmap.prop[f"merged_{sim_idx_prop}"].shape + == desired_merged_shapes + ) + + def test_negative_metric(self, get_single_phase_xmap): + def negative_sad(p, t): # pragma: no cover + return -np.sum(np.abs(p - t), axis=(2, 3)) + + metric = make_similarity_metric(negative_sad, greater_is_better=False) + + map_shape = (5, 6) + rot_per_point = 5 + scores_prop = "scores" + sim_idx_prop = "simulation_indices" + + xmap1 = get_single_phase_xmap( + map_shape, rot_per_point, [scores_prop, sim_idx_prop], "a", 0 + ) + xmap2 = get_single_phase_xmap( + map_shape, rot_per_point, [scores_prop, sim_idx_prop], "b", 1 + ) + + xmap2[0, 3].prop[scores_prop] = 0 + desired_phase_id = np.zeros(np.prod(map_shape)) + desired_phase_id[3] = 1 + + merged_xmap = merge_crystal_maps( + crystal_maps=[xmap1, xmap2], metric=metric, + ) + + assert np.allclose(merged_xmap.phase_id, desired_phase_id) + + @pytest.mark.parametrize( + "phase_names, desired_phase_names", + [ + (["a"] * 3, ["a", "a1", "a2"]), + (["hello_there1"] * 2, ["hello_there1", "hello_there11"]), + (["1"] * 5, ["1", "11", "12", "13", "14"]), + ], + ) + def test_warning_merge_maps_with_same_phase( + self, get_single_phase_xmap, phase_names, desired_phase_names, + ): + n_phases = len(phase_names) + scores_prop = "scores" + sim_idx_prop = "simulated_indices" + map_shape = (5, 6) + rot_per_point = 5 + + xmaps = [] + xmap_args = (map_shape, rot_per_point, [scores_prop, sim_idx_prop]) + phase_ids = np.arange(n_phases) + for i in range(n_phases): + xmap = get_single_phase_xmap( + *xmap_args, phase_names[i], phase_ids[i] + ) + # All maps have at least one point with the best score + xmap[i, i].scores += i + 1 + xmaps.append(xmap) + + with pytest.warns( + UserWarning, match=f"There are duplicates of phase {phase_names[0]}" + ): + merged_xmap = merge_crystal_maps( + crystal_maps=xmaps, + scores_prop=scores_prop, + simulation_indices_prop=sim_idx_prop, + ) + + assert all( + [name in merged_xmap.phases.names for name in desired_phase_names] + ) + + @pytest.mark.parametrize( + ( + "nav_shape, rot_per_point, mean_n_best, desired_merged_scores, " + "desired_merged_sim_idx" + ), + [ + ((2, 0), 1, 1, [[1, 1], [2, 1]], [[0, 2], [3, 1]]), + ((1, 2), 1, 1, [[1, 1], [2, 1]], [[0, 2], [3, 1]]), + ( + (1, 3), + 1, + 1, + [[1, 1, 1], [2, 1, 1], [3, 1, 1]], + [[0, 3, 6], [4, 1, 7], [8, 2, 5]], + ), + ( + (2, 1), + 2, + 2, + [[1, 1, 1, 1], [2, 2, 1, 1]], + [[0, 4, 1, 5], [6, 7, 2, 3]], + ), + ( + (3, 2), + 1, + 1, + [ + [1, 1, 1], + [1, 1, 1], + [2, 1, 1], + [2, 1, 1], + [3, 1, 1], + [3, 1, 1], + ], + [ + [0, 6, 12], + [1, 7, 13], + [8, 2, 14], + [9, 3, 15], + [16, 4, 10], + [17, 5, 11], + ], + ), + ], + ) + def test_mean_n_best( + self, + get_single_phase_xmap, + nav_shape, + rot_per_point, + mean_n_best, + desired_merged_scores, + desired_merged_sim_idx, + ): + """Ensure that the mergesorted scores and simulation index + properties in the merged map has the correct values and shape. + """ + n_phases = np.shape(desired_merged_scores)[-1] // rot_per_point + xmaps = [] + for i in range(n_phases): + xmap = get_single_phase_xmap(nav_shape, rot_per_point, name=str(i)) + xmap[i].scores += i + xmaps.append(xmap) + + # The simulation indices should be the same in all maps + all_sim_idx = np.dstack([xmap.simulation_indices for xmap in xmaps]) + assert np.sum(np.diff(all_sim_idx)) == 0 + + merged_xmap = merge_crystal_maps( + crystal_maps=xmaps, mean_n_best=mean_n_best, + ) + + assert merged_xmap.phases.size == n_phases + assert np.allclose(merged_xmap.merged_scores, desired_merged_scores) + assert np.allclose( + merged_xmap.merged_simulation_indices, desired_merged_sim_idx + ) + + def test_mean_n_best_varying_scores(self, get_single_phase_xmap): + """Ensure various combinations of scores per point and how many + of these are evaulated to find the best match return expected + results. + """ + nav_shape = (2, 3) + rot_per_point = 3 + xmap1 = get_single_phase_xmap(nav_shape, rot_per_point, name="a") + xmap2 = get_single_phase_xmap(nav_shape, rot_per_point, name="b") + idx = (0, 0) + xmap1[idx].scores = [1, 2, 2.1] + xmap2[idx].scores = [1, 1.9, 3] + xmap2[0, 1].scores = 2.0 # Both maps in both merged maps + + crystal_maps = [xmap1, xmap2] + merged_xmap1 = merge_crystal_maps(crystal_maps, mean_n_best=2) + merged_xmap2 = merge_crystal_maps(crystal_maps, mean_n_best=3) + + assert np.allclose(merged_xmap1.phase_id, [0, 1, 0, 0, 0, 0]) + assert np.allclose(merged_xmap2.phase_id, [1, 1, 0, 0, 0, 0]) + + def test_merging_maps_different_shapes_raises(self, get_single_phase_xmap): + xmap1 = get_single_phase_xmap((4, 3)) + xmap2 = get_single_phase_xmap((3, 4)) + with pytest.raises(ValueError, match="All crystal maps must have the"): + _ = merge_crystal_maps([xmap1, xmap2]) + + def test_merging_maps_different_number_of_scores_raises( + self, get_single_phase_xmap + ): + nav_shape = (2, 3) + xmap1 = get_single_phase_xmap(nav_shape, 3, name="a") + xmap2 = get_single_phase_xmap(nav_shape, 4, name="b") + xmap2[0, 1].scores = 2.0 # Both maps in both merged maps + + crystal_maps = [xmap1, xmap2] + with pytest.raises(ValueError, match="All crystal maps must have the"): + _ = merge_crystal_maps(crystal_maps) diff --git a/kikuchipy/indexing/tests/test_orientation_similarity_map.py b/kikuchipy/indexing/tests/test_orientation_similarity_map.py new file mode 100644 index 00000000..b194235c --- /dev/null +++ b/kikuchipy/indexing/tests/test_orientation_similarity_map.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2019-2020 The kikuchipy developers +# +# This file is part of kikuchipy. +# +# kikuchipy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# kikuchipy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with kikuchipy. If not, see . + +import numpy as np +from orix.crystal_map import CrystalMap +from orix.quaternion import Rotation +import pytest + +from kikuchipy.indexing.orientation_similarity_map import ( + orientation_similarity_map, +) + + +class TestOrientationSimilarityMap: + def test_orientation_similarity_map(self): + xmap = CrystalMap( + rotations=Rotation(np.zeros((100, 4))), + prop={"simulation_indices": np.tile(np.arange(5), (100, 1))}, + x=np.tile(np.arange(10), 10), + y=np.tile(np.arange(10), 10), + ) + assert np.allclose(orientation_similarity_map(xmap), np.ones((10, 10))) + + def test_n_best_too_great(self): + xmap = CrystalMap( + rotations=Rotation(np.zeros((100, 4))), + prop={"simulation_indices": np.ones((100, 5))}, + x=np.tile(np.arange(10), 10), + y=np.tile(np.arange(10), 10), + ) + with pytest.raises(ValueError, match="n_best 6 cannot be greater than"): + orientation_similarity_map(xmap, n_best=6) + + def test_from_n_best(self): + sim_idx_prop = "simulated_indices" + xmap = CrystalMap( + rotations=Rotation(np.zeros((100, 4))), + prop={sim_idx_prop: np.ones((100, 5))}, + x=np.tile(np.arange(10), 10), + y=np.tile(np.arange(10), 10), + ) + osm = orientation_similarity_map( + xmap, simulation_indices_prop=sim_idx_prop, from_n_best=2 + ) + assert osm.shape == (10, 10, 4) diff --git a/kikuchipy/indexing/tests/test_pattern_matching.py b/kikuchipy/indexing/tests/test_pattern_matching.py index cc6ab780..9e937ac4 100644 --- a/kikuchipy/indexing/tests/test_pattern_matching.py +++ b/kikuchipy/indexing/tests/test_pattern_matching.py @@ -21,11 +21,11 @@ import pytest from scipy.spatial.distance import cdist - -from kikuchipy.indexing import ( +from kikuchipy.data import nickel_ebsd_small +from kikuchipy.indexing._pattern_matching import _pattern_match +from kikuchipy.indexing.similarity_metrics import ( make_similarity_metric, MetricScope, - pattern_match, ) @@ -35,34 +35,31 @@ class TestPatternMatching: greater_is_better=False, flat=True, ) - dummy_metric = make_similarity_metric(lambda p, t: 1.0) def test_not_recognized_metric(self): with pytest.raises(ValueError): - pattern_match( + _pattern_match( np.zeros((2, 2)), np.zeros((2, 2)), metric="not_recognized" ) def test_mismatching_signal_shapes(self): self.dummy_metric.scope = MetricScope.MANY_TO_MANY with pytest.raises(OSError): - pattern_match( + _pattern_match( np.zeros((2, 2)), np.zeros((3, 3)), metric=self.dummy_metric ) def test_metric_not_compatible_with_data(self): self.dummy_metric.scope = MetricScope.ONE_TO_MANY with pytest.raises(OSError): - pattern_match( + _pattern_match( np.zeros((2, 2, 2, 2)), np.zeros((2, 2)), metric=self.dummy_metric, ) - @pytest.mark.parametrize( - "n_slices", [1, 2], - ) + @pytest.mark.parametrize("n_slices", [1, 2]) def test_pattern_match_compute_true(self, n_slices): # Four patterns p = np.array( @@ -84,14 +81,14 @@ def test_pattern_match_compute_true(self, n_slices): np.int8, ) t_da = da.from_array(t) - mr = pattern_match(p, t_da, n_slices=n_slices) + mr = _pattern_match(p, t_da, n_slices=n_slices, keep_n=1) assert mr[0][2] == 1 # Template index in t of perfect match - assert pytest.approx(mr[1][2]) == 1.0 # ZNCC of perfect match + assert np.allclose(mr[1][2], 1.0) # ZNCC of perfect match def test_pattern_match_compute_false(self): p = np.arange(16).reshape((2, 2, 2, 2)) t = np.arange(8).reshape((2, 2, 2)) - mr = pattern_match(p, t, compute=False) + mr = _pattern_match(p, t, compute=False) assert len(mr) == 2 assert isinstance(mr[0], da.Array) and isinstance(mr[1], da.Array) @@ -99,9 +96,22 @@ def test_pattern_match_slices_compute_false(self): p = np.arange(16).reshape((2, 2, 2, 2)) t = np.arange(8).reshape((2, 2, 2)) with pytest.raises(NotImplementedError): - pattern_match(p, t, n_slices=2, compute=False) + _pattern_match(p, t, n_slices=2, compute=False) def test_pattern_match_one_to_one(self): p = np.random.random(3 * 3).reshape((3, 3)) - mr = pattern_match(p, p) + mr = _pattern_match(p, p) assert mr[0][0] == 0 + + def test_pattern_match_phase_name(self): + """Ensure that the `phase_name` accepts different types.""" + exp = nickel_ebsd_small().data + sim = exp.reshape((-1,) + exp.shape[-2:]) + + sim_idx1, scores1 = _pattern_match(exp, sim, n_slices=2) + sim_idx2, scores2 = _pattern_match(exp, sim, phase_name="a", n_slices=2) + sim_idx3, scores3 = _pattern_match(exp, sim, phase_name="", n_slices=2) + + assert np.allclose(sim_idx1[0], [0, 3, 6, 4, 7, 1, 8, 5, 2]) + assert np.allclose(sim_idx2[0], [0, 3, 6, 4, 7, 1, 8, 5, 2]) + assert np.allclose(sim_idx3[0], [0, 3, 6, 4, 7, 1, 8, 5, 2]) diff --git a/kikuchipy/indexing/tests/test_similarity_metrics.py b/kikuchipy/indexing/tests/test_similarity_metrics.py index 6e10bd1e..12eb6a62 100644 --- a/kikuchipy/indexing/tests/test_similarity_metrics.py +++ b/kikuchipy/indexing/tests/test_similarity_metrics.py @@ -26,15 +26,16 @@ SimilarityMetric, MetricScope, FlatSimilarityMetric, - SIMILARITY_METRICS, + _SIMILARITY_METRICS, _get_number_of_simulated, + _zncc_einsum, ) -class TestSimilarityMetrics: +class TestSimilarityMetric: @pytest.mark.parametrize( - "flat,returned_class", - [(False, SimilarityMetric), (True, FlatSimilarityMetric),], + "flat, returned_class", + [(False, SimilarityMetric), (True, FlatSimilarityMetric)], ) def test_make_similarity_metric(self, flat, returned_class): assert ( @@ -50,56 +51,9 @@ def test_make_similarity_metric(self, flat, returned_class): is returned_class ) - def test_zncc(self): - zncc_metric = SIMILARITY_METRICS["zncc"] - # Four experimental data - expt = np.array( - [ - [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], - [[[9, 8], [1, 7]], [[5, 2], [2, 7]]], - ], - np.int8, - ) - expt_da = da.from_array(expt) - - # One perfect match, at [1,0,1] in results, and one close match - # Two simulated - sim = np.array([[[5, 3], [2, 7]], [[9, 8], [1, 7]]], np.int8) - sim_da = da.from_array(sim) - - # many to many - assert ( - pytest.approx(zncc_metric(expt_da, sim_da).compute()[1, 0, 1]) == 1 - ) - - # Working with lower scopes, here one to many: - assert ( - pytest.approx(zncc_metric(expt_da[1, 0], sim_da).compute()[1]) == 1 - ) - - def test_ndp(self): - ndp_metric = SIMILARITY_METRICS["ndp"] - expt = np.array( - [ - [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], - [[[9, 8], [1, 7]], [[5, 2], [2, 7]]], - ], - np.int8, - ) - expt_da = da.from_array(expt) - - # One perfect match and one close match - sim = np.array([[[5, 3], [2, 7]], [[9, 8], [1, 7]]], np.int8) - sim_da = da.from_array(sim) - - # many to many - assert ( - pytest.approx(ndp_metric(expt_da, sim_da).compute()[1, 0, 1]) == 1 - ) - - @pytest.mark.parametrize("metric", ["zncc", "ndp"]) - def test_zncc_ndp_returns_desired_array_type(self, metric): - metric = SIMILARITY_METRICS[metric] + @pytest.mark.parametrize("metric", ["ncc", "ndp"]) + def test_ncc_ndp_returns_desired_array_type(self, metric): + metric = _SIMILARITY_METRICS[metric] expt = np.array( [ [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], @@ -132,14 +86,13 @@ def test_flat_metric(self): scope=MetricScope.MANY_TO_MANY, make_compatible_to_lower_scopes=True, ) - assert ( - euclidean_metric._is_compatible(expt.ndim, sim.ndim) is True - and pytest.approx(euclidean_metric(expt, sim)[2, 1]) == 0 - ) + assert euclidean_metric._is_compatible( + expt.ndim, sim.ndim + ) is True and np.allclose(euclidean_metric(expt, sim)[2, 1], 0) def test_make_compatible_to_lower_scopes(self): - zncc_metric = SIMILARITY_METRICS["zncc"] - assert zncc_metric._is_compatible( + ncc_metric = _SIMILARITY_METRICS["ncc"] + assert ncc_metric._is_compatible( np.zeros((2, 2)).ndim, np.zeros((2, 2)).ndim ) @@ -195,15 +148,142 @@ def test_similarity_metric_representation(self): scope=MetricScope.ONE_TO_MANY, flat=True, ), - SIMILARITY_METRICS["zncc"], - SIMILARITY_METRICS["ndp"], + _SIMILARITY_METRICS["ncc"], + _SIMILARITY_METRICS["ndp"], ] desired_repr = [ - f"SimilarityMetric , scope: many_to_many", - f"FlatSimilarityMetric , scope: one_to_many", - f"SimilarityMetric _zncc_einsum, scope: many_to_many", - f"SimilarityMetric _ndp_einsum, scope: many_to_many", + "SimilarityMetric , scope: many_to_many", + "FlatSimilarityMetric , scope: one_to_many", + "SimilarityMetric _zncc_einsum, scope: many_to_many", + "SimilarityMetric _ndp_einsum, scope: many_to_many", ] for i in range(len(desired_repr)): assert repr(metrics[i]) == desired_repr[i] + + def test_some_to_many(self, dummy_signal): + scope = MetricScope.SOME_TO_MANY + assert scope.name == "SOME_TO_MANY" + assert scope.value == "some_to_many" + + sig_shape = dummy_signal.axes_manager.signal_shape + expt = dummy_signal.data.reshape((-1,) + sig_shape) + sim = expt[:3] + dims = (expt.ndim, sim.ndim) + assert dims == (3, 3) + + # Expansion of dimensions works + ncc_metric = _SIMILARITY_METRICS["ncc"] + ncc = ncc_metric(expt, sim) + assert ncc.shape == (9, 3) + assert np.allclose(np.diagonal(ncc), 1) + + def dot_product(a, b): + norm_a = np.linalg.norm(a, axis=(1, 2))[:, np.newaxis, np.newaxis] + norm_b = np.linalg.norm(b, axis=(1, 2))[:, np.newaxis, np.newaxis] + return np.tensordot(a / norm_a, b / norm_b, axes=([1, 2], [2, 1])) + + metric = make_similarity_metric(metric_func=dot_product, scope=scope) + assert metric._EXPT_SIM_NDIM_TO_SCOPE[dims] == scope + assert metric._SCOPE_TO_EXPT_SIM_NDIM[scope] == dims + + ndp = metric(expt, sim) + assert ndp.shape == (9, 3) + assert np.allclose(np.sum(ndp), 19.92476) + + def test_some_to_many_flat(self, dummy_signal): + scope_in = MetricScope.SOME_TO_MANY + metric = make_similarity_metric( + metric_func=_zncc_einsum, scope=scope_in, flat=True + ) + scope_out = metric.scope + + assert metric.flat + assert scope_out.name == "MANY_TO_MANY" + + def test_some_to_one(self, dummy_signal): + scope = MetricScope.SOME_TO_ONE + assert scope.name == "SOME_TO_ONE" + assert scope.value == "some_to_one" + + sig_shape = dummy_signal.axes_manager.signal_shape + expt = dummy_signal.data.reshape((-1,) + sig_shape) + sim = expt[0] + dims = (expt.ndim, sim.ndim) + assert dims == (3, 2) + + # Expansion of dimensions works + ndp_metric = _SIMILARITY_METRICS["ndp"] + ndp = ndp_metric(expt, sim) + assert ndp.shape == (9,) + assert np.allclose(ndp[0], 1) + + def dot_product(a, b): + norm_a = np.linalg.norm(a, axis=(1, 2))[:, np.newaxis, np.newaxis] + norm_b = np.linalg.norm(b) + return np.tensordot(a / norm_a, b / norm_b, axes=([1, 2], [1, 0])) + + metric = make_similarity_metric(metric_func=dot_product, scope=scope) + assert metric._EXPT_SIM_NDIM_TO_SCOPE[dims] == scope + assert metric._SCOPE_TO_EXPT_SIM_NDIM[scope] == dims + + ndp = metric(expt, sim) + assert ndp.shape == (9,) + assert np.allclose(np.sum(ndp), 6.9578266) + + def test_some_to_one_flat(self, dummy_signal): + scope_in = MetricScope.SOME_TO_ONE + metric = make_similarity_metric( + metric_func=_zncc_einsum, scope=scope_in, flat=True + ) + scope_out = metric.scope + + assert metric.flat + assert scope_out.name == "MANY_TO_ONE" + + +class TestNCC: + def test_zncc(self): + ncc_metric = _SIMILARITY_METRICS["ncc"] + # Four experimental data + expt = np.array( + [ + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], + [[[9, 8], [1, 7]], [[5, 2], [2, 7]]], + ], + np.int8, + ) + expt_da = da.from_array(expt) + + # One perfect match, at [1,0,1] in results, and one close match + # Two simulated + sim = np.array([[[5, 3], [2, 7]], [[9, 8], [1, 7]]], np.int8) + sim_da = da.from_array(sim) + + # many to many + assert np.allclose(ncc_metric(expt_da, sim_da).compute()[1, 0, 1], 1) + + # Working with lower scopes, here one to many: + assert np.allclose(ncc_metric(expt_da[1, 0], sim_da).compute()[1], 1) + + +class TestNDP: + def test_ndp(self): + ndp_metric = _SIMILARITY_METRICS["ndp"] + expt = np.array( + [ + [[[1, 2], [3, 4]], [[5, 6], [7, 8]]], + [[[9, 8], [1, 7]], [[5, 2], [2, 7]]], + ], + np.int8, + ) + expt_da = da.from_array(expt) + + # One perfect match and one close match + sim = np.array([[[5, 3], [2, 7]], [[9, 8], [1, 7]]], np.int8) + sim_da = da.from_array(sim) + + # many to many + assert ( + pytest.approx(ndp_metric(expt_da, sim_da).compute()[1, 0, 1]) == 1 + ) diff --git a/kikuchipy/indexing/tests/test_static_pattern_matching.py b/kikuchipy/indexing/tests/test_static_pattern_matching.py new file mode 100644 index 00000000..7fdbb876 --- /dev/null +++ b/kikuchipy/indexing/tests/test_static_pattern_matching.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- +# Copyright 2019-2020 The kikuchipy developers +# +# This file is part of kikuchipy. +# +# kikuchipy is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# kikuchipy is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with kikuchipy. If not, see . + +import io + +import numpy as np +from orix.crystal_map import CrystalMap +from orix.quaternion import Rotation +import pytest + +from kikuchipy.data import nickel_ebsd_small +from kikuchipy.indexing._static_pattern_matching import ( + StaticPatternMatching, + _get_spatial_arrays, +) +from kikuchipy.io.tests.test_util import replace_stdin +from kikuchipy.signals import EBSD + + +class TestStaticPatternMatching: + def test_init_static_pattern_matching(self): + s = nickel_ebsd_small() + sdi = StaticPatternMatching(s) + + assert isinstance(sdi.dictionaries, list) + assert sdi.dictionaries[0] == s + assert isinstance(sdi.dictionaries[0], EBSD) + assert np.may_share_memory(sdi.dictionaries[0].data, s.data) + + def test_get_orientation_similarity_map(self): + s = nickel_ebsd_small() + + s_dict1 = EBSD(s.data.reshape(-1, 60, 60)) + s_dict2 = EBSD(s.data.reshape(-1, 60, 60)) + n_patterns = s_dict1.axes_manager.navigation_size + s_dict1._xmap = CrystalMap(Rotation(np.zeros((n_patterns, 4)))) + s_dict2._xmap = CrystalMap(Rotation(np.zeros((n_patterns, 4)))) + s_dict1.xmap.phases[0].name = "a" + s_dict2.xmap.phases[0].name = "b" + + sd = StaticPatternMatching([s_dict1, s_dict2]) + res = sd(s, keep_n=1, get_orientation_similarity_map=True) + xmap1, _ = res + + assert np.allclose(xmap1.scores, 1) + assert np.all(["osm" in xmap.prop for xmap in res]) + + @pytest.mark.parametrize( + "n_rot_in, n_rot_out, keep_n", [(60, 50, 10), (40, 40, 5)] + ) + def test_keep_n(self, n_rot_in, n_rot_out, keep_n): + s = nickel_ebsd_small() + s_dict = EBSD(np.random.random((n_rot_in, 60, 60)).astype(np.float32)) + s_dict._xmap = CrystalMap(Rotation(np.zeros((n_rot_in, 4)))) + sd = StaticPatternMatching(s_dict) + xmap = sd(s) + + assert xmap.rotations_per_point == n_rot_out + + xmap2 = sd(s, keep_n=keep_n) + + assert xmap2.rotations_per_point == keep_n + + @pytest.mark.parametrize( + "return_merged_xmap, desired_n_xmaps_out", [(True, 3), (False, 2)] + ) + def test_return_merged_crystal_map( + self, return_merged_xmap, desired_n_xmaps_out + ): + s = nickel_ebsd_small() + s_dict1 = EBSD(s.data.reshape(-1, 60, 60)) + s_dict2 = s_dict1.deepcopy() + n_patterns = s_dict1.axes_manager.navigation_size + s_dict1._xmap = CrystalMap(Rotation(np.zeros((n_patterns, 4)))) + s_dict2._xmap = s_dict1.xmap.deepcopy() + s_dict1.xmap.phases[0].name = "a" + s_dict2.xmap.phases[0].name = "b" + + sd = StaticPatternMatching([s_dict1, s_dict2]) + res1 = sd(s, return_merged_crystal_map=return_merged_xmap) + + assert len(res1) == desired_n_xmaps_out + + sd.dictionaries.pop(-1) + res2 = sd(s, return_merged_crystal_map=True) + + assert isinstance(res2, CrystalMap) + + res3 = sd(s) + + assert isinstance(res3, CrystalMap) + + def test_n_slices_input(self, dummy_signal): + sig_shape = dummy_signal.axes_manager.signal_shape + n_px = np.prod(sig_shape) + n_sim = 13500 + 1 + rand_data = ( + np.random.randint(0, 255, n_sim * n_px) + .reshape((n_sim,) + sig_shape) + .astype(np.uint8) + ) + s_dict1 = EBSD(rand_data) + s_dict1._xmap = CrystalMap(Rotation(np.zeros((n_sim, 4)))) + sd = StaticPatternMatching(s_dict1) + + with replace_stdin(io.StringIO("y")): + res = sd(dummy_signal, n_slices=1) + assert isinstance(res, CrystalMap) + + with replace_stdin(io.StringIO("n")): + res = sd(dummy_signal, n_slices=1) + assert res is None + + @pytest.mark.parametrize( + "slices, desired_xmap_shape", [((0, 0), ()), ((0, slice(0, 2)), (2,))], + ) + def test_signal_varying_dimensions( + self, dummy_signal, slices, desired_xmap_shape + ): + s = dummy_signal.inav[slices] + sig_shape = dummy_signal.axes_manager.signal_shape + s_dict1 = EBSD(dummy_signal.data.reshape((-1,) + sig_shape)) + n_sim = s_dict1.axes_manager.navigation_size + s_dict1._xmap = CrystalMap(Rotation(np.zeros((n_sim, 4)))) + sd = StaticPatternMatching(s_dict1) + res = sd(s) + + assert res.shape == desired_xmap_shape + + @pytest.mark.parametrize( + "nav_slice, step_sizes, desired_arrays", + [ + # 0d + ((0, 0), (1, 1), ()), + ((slice(0, 0), slice(0, 0)), (1, 1), (np.array([]),) * 2), + # 1d + ((0, slice(None)), (1, 1.5), np.tile(np.arange(0, 4.5, 1.5), 3)), + # 2d + ( + (slice(None), slice(0, 2)), + (2, 1.5), + ( + np.tile(np.arange(0, 6, 2), 2), + np.tile(np.arange(0, 3, 1.5), 3), + ), + ), + ( + (slice(None), slice(0, 2)), + (0.5, 1), + ( + np.tile(np.arange(0, 1.5, 0.5), 2), + np.tile(np.arange(0, 2, 1), 3), + ), + ), + ], + ) + def test_get_spatial_arrays(self, nav_slice, step_sizes, desired_arrays): + """Ensure spatial arrays for 0d, 1d and 2d EBSD signals are + returned correctly. + """ + s = nickel_ebsd_small() + s.axes_manager["x"].scale = step_sizes[0] + s.axes_manager["y"].scale = step_sizes[1] + axes_manager = s.inav[nav_slice].axes_manager + spatial_arrays = _get_spatial_arrays( + shape=axes_manager.navigation_shape, + extent=axes_manager.navigation_extent, + step_sizes=[i.scale for i in axes_manager.navigation_axes], + ) + + if len(spatial_arrays) == 0: + assert spatial_arrays == desired_arrays + else: + assert [ + np.allclose(spatial_arrays[i], desired_arrays[i]) + for i in range(len(spatial_arrays)) + ] diff --git a/kikuchipy/pattern/_pattern.py b/kikuchipy/pattern/_pattern.py index 6d737564..2b599639 100644 --- a/kikuchipy/pattern/_pattern.py +++ b/kikuchipy/pattern/_pattern.py @@ -152,7 +152,7 @@ def remove_dynamic_background( See Also -------- - kikuchipy.signals.EBSD.remove_dynamic_background, + kikuchipy.signals.EBSD.remove_dynamic_background kikuchipy.pattern.remove_dynamic_background """ if std is None: diff --git a/kikuchipy/signals/ebsd.py b/kikuchipy/signals/ebsd.py index b4d59483..b450dfc5 100644 --- a/kikuchipy/signals/ebsd.py +++ b/kikuchipy/signals/ebsd.py @@ -51,6 +51,8 @@ fft_filter, _dynamic_background_frequency_space_setup, ) +from kikuchipy.indexing import StaticPatternMatching +from kikuchipy.indexing.similarity_metrics import SimilarityMetric from kikuchipy.signals.util._metadata import ( ebsd_metadata, metadata_nodes, @@ -193,7 +195,7 @@ def set_experimental_parameters( See Also -------- - kikuchipy.signals.EBSD.set_phase_parameters + ~kikuchipy.signals.EBSD.set_phase_parameters Examples -------- @@ -295,7 +297,7 @@ def set_phase_parameters( See Also -------- - kikuchipy.signals.EBSD.set_experimental_parameters + ~kikuchipy.signals.EBSD.set_experimental_parameters Examples -------- @@ -354,7 +356,7 @@ def set_scan_calibration( See Also -------- - kikuchipy.signals.EBSD.set_detector_calibration + ~kikuchipy.signals.EBSD.set_detector_calibration Examples -------- @@ -380,7 +382,7 @@ def set_detector_calibration(self, delta: Union[int, float]): See Also -------- - kikuchipy.signals.EBSD.set_scan_calibration + ~kikuchipy.signals.EBSD.set_scan_calibration Examples -------- @@ -428,7 +430,7 @@ def remove_static_background( See Also -------- - kikuchipy.signals.EBSD.remove_dynamic_background, + ~kikuchipy.signals.EBSD.remove_dynamic_background Examples -------- @@ -564,9 +566,9 @@ def remove_dynamic_background( See Also -------- - kikuchipy.signals.EBSD.remove_static_background, - kikuchipy.signals.EBSD.get_dynamic_background, - kikuchipy.pattern.remove_dynamic_background, + kikuchipy.signals.EBSD.remove_static_background + kikuchipy.signals.EBSD.get_dynamic_background + kikuchipy.pattern.remove_dynamic_background kikuchipy.pattern.get_dynamic_background Examples @@ -755,8 +757,8 @@ def adaptive_histogram_equalization( See also -------- - kikuchipy.signals.EBSD.rescale_intensity, - kikuchipy.signals.EBSD.normalize_intensity + ~kikuchipy.signals.EBSD.rescale_intensity + ~kikuchipy.signals.EBSD.normalize_intensity Examples -------- @@ -878,6 +880,84 @@ def get_image_quality(self, normalize: bool = True) -> np.ndarray: return image_quality_map + def match_patterns( + self, + simulations, + metric: Union[str, SimilarityMetric] = "ncc", + keep_n: int = 50, + n_slices: int = 1, + return_merged_crystal_map: bool = False, + get_orientation_similarity_map: bool = False, + ) -> Union[CrystalMap, List[CrystalMap]]: + """Match each experimental pattern to all simulated patterns, of + known crystal orientations in pre-computed dictionaries + :cite:`chen2015dictionary,jackson2019dictionary`, to determine + their phase and orientation. + + A suitable similarity metric, the normalized cross-correlation + (:func:`~kikuchipy.indexing.similarity_metrics.ncc`), is used by + default, but a valid user-defined similarity metric may be used + instead (see + :func:`~kikuchipy.indexing.similarity_metrics.make_similarity_metric`). + + :class:`~orix.crystal_map.crystal_map.CrystalMap`'s for each + dictionary with "scores" and "simulation_indices" as properties + are returned. + + Parameters + ---------- + simulations : EBSD or list of EBSD + An EBSD signal or a list of EBSD signals with simulated + patterns (dictionaries). The signals must have a 1D + navigation axis and the `xmap` property with crystal + orientations set. + metric : str or SimilarityMetric, optional + Similarity metric, by default "ncc" (normalized + cross-correlation). + keep_n : int, optional + Number of best matches to keep, by default 50 or the number + of simulated patterns if fewer than 50 are available. + n_slices : int, optional + Number of simulation slices to process sequentially, by + default 1 (no slicing). + return_merged_crystal_map : bool, optional + Whether to return a merged crystal map, the best matches + determined from the similarity scores, in addition to the + single phase maps. By default False. + get_orientation_similarity_map : bool, optional + Add orientation similarity maps to the returned crystal + maps' properties named "osm". By default False. + + Returns + ------- + xmaps : ~orix.crystal_map.crystal_map.CrystalMap or list of \ + ~orix.crystal_map.crystal_map.CrystalMap + A crystal map for each dictionary loaded and one merged map + if `return_merged_crystal_map = True`. + + Notes + ----- + Merging of crystal maps and calculations of orientation + similarity maps can be done afterwards with + :func:`~kikuchipy.indexing.merge_crystal_maps` and + :func:`~kikuchipy.indexing.orientation_similarity_map`, + respectively. + + See Also + -------- + ~kikuchipy.indexing.similarity_metrics.make_similarity_metric + ~kikuchipy.indexing.similarity_metrics.ndp + """ + sdi = StaticPatternMatching(simulations) + return sdi( + signal=self, + metric=metric, + keep_n=keep_n, + n_slices=n_slices, + return_merged_crystal_map=return_merged_crystal_map, + get_orientation_similarity_map=get_orientation_similarity_map, + ) + def fft_filter( self, transfer_function: Union[np.ndarray, Window], @@ -928,7 +1008,7 @@ def fft_filter( See Also -------- - :class:`~kikuchipy.filters.window.Window` + ~kikuchipy.filters.window.Window """ dtype_out = self.data.dtype @@ -1017,8 +1097,8 @@ def average_neighbour_patterns( See Also -------- - :class:`~kikuchipy.filters.window.Window`, - :func:`scipy.signal.windows.get_window`, + ~kikuchipy.filters.window.Window + :func:`scipy.signal.windows.get_window` :func:`scipy.ndimage.correlate` Examples @@ -1192,7 +1272,7 @@ def plot_virtual_bse_intensity( See Also -------- - kikuchipy.signals.EBSD.get_virtual_bse_intensity + ~kikuchipy.signals.EBSD.get_virtual_bse_intensity """ # Plot signal if necessary if self._plot is None or not self._plot.is_active: @@ -1270,7 +1350,7 @@ def get_virtual_bse_intensity( See Also -------- - kikuchipy.signals.EBSD.plot_virtual_bse_intensity + ~kikuchipy.signals.EBSD.plot_virtual_bse_intensity """ vbse = roi(self, axes=self.axes_manager.signal_axes) vbse_sum = self._get_sum_signal(vbse, out_signal_axes) @@ -1322,7 +1402,7 @@ def save( See Also -------- - kikuchipy.io.plugins.h5ebsd.file_writer,\ + kikuchipy.io.plugins.h5ebsd.file_writer kikuchipy.io.plugins.nordif.file_writer """ if filename is None: diff --git a/kikuchipy/signals/tests/test_ebsd.py b/kikuchipy/signals/tests/test_ebsd.py index 99a2726c..6e18aad2 100644 --- a/kikuchipy/signals/tests/test_ebsd.py +++ b/kikuchipy/signals/tests/test_ebsd.py @@ -26,6 +26,7 @@ from matplotlib.pyplot import close import numpy as np from orix.crystal_map import CrystalMap +from orix.quaternion import Rotation import pytest from scipy.ndimage import correlate from skimage.exposure import rescale_intensity @@ -1212,3 +1213,15 @@ class TestEBSDdetectorProperty: def test_init_detector(self): """The attribute is set correctly.""" pass + + +class TestPatternMatching: + def test_match_patterns(self, dummy_signal): + """Scores are all 1.0 for a dictionary containing all patterns + from dummy_signal(). + """ + s_dict = EBSD(dummy_signal.data.reshape(-1, 3, 3)) + s_dict._xmap = CrystalMap(Rotation(np.zeros((9, 4)))) + xmap = dummy_signal.match_patterns(s_dict) + + assert np.allclose(xmap.scores[:, 0], 1) diff --git a/setup.py b/setup.py index 695fcae1..fc4fc237 100644 --- a/setup.py +++ b/setup.py @@ -19,6 +19,7 @@ from itertools import chain from setuptools import setup, find_packages + # Get release information without importing anything from the project with open("kikuchipy/release.py") as fid: for line in fid: @@ -124,6 +125,7 @@ "numba >= 0.48", "orix >= 0.5", "pooch", + "psutil", "tqdm >= 0.5.2", "scikit-image >= 0.16", "scikit-learn",