diff --git a/docs/references.rst b/docs/references.rst index 7274106442..fa7123c6a9 100644 --- a/docs/references.rst +++ b/docs/references.rst @@ -164,6 +164,10 @@ References *Computational assignment of cell-cycle stage from single-cell transcriptome data* `Methods `__. +.. [Setty18] Setty *et al.* (2018), + *Palantir characterizes cell fate continuities in human hematopoiesis* + `bioRxiv `__. + .. [Traag17] Traag (2017), *Louvain*, `GitHub `__. diff --git a/docs/release_notes.rst b/docs/release_notes.rst index 8af3f0f5ab..c942c3dda1 100644 --- a/docs/release_notes.rst +++ b/docs/release_notes.rst @@ -6,6 +6,13 @@ .. role:: smaller .. role:: noteversion +On master :small:`March 21, 2019` +------------------------------------- + +- :func:`~scanpy.pp.downsample_counts` has been sped up, changed default value of `replace` parameter to `False`, see `here `__ :smaller:`thanks to I Virshup` +- :func:`~scanpy.pl.density_embedding` allows plot cell densities on embeddings, see `here `__ :smaller:`thanks to M Luecken` +- :func:`~scanpy.external.palantir` interfaces Palantir [Setty18]_, see `here `__ :smaller:`thanks to A Mousa` + Version 1.4 :small:`February 5, 2019` ------------------------------------- diff --git a/scanpy/external/__init__.py b/scanpy/external/__init__.py index 0937afc285..10f2c40cfa 100644 --- a/scanpy/external/__init__.py +++ b/scanpy/external/__init__.py @@ -54,6 +54,7 @@ :toctree: . tl.phate + tl.palantir Clustering and trajectory inference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -80,6 +81,7 @@ :toctree: . pl.phate + tl.palantir Exporting diff --git a/scanpy/external/_tools/_palantir.py b/scanpy/external/_tools/_palantir.py new file mode 100644 index 0000000000..fe7e06b213 --- /dev/null +++ b/scanpy/external/_tools/_palantir.py @@ -0,0 +1,294 @@ +"""Run Diffusion maps using the adaptive anisotropic kernel +""" + +from scanpy import logging as logg + + +def palantir(adata): + """ + Run Diffusion maps using the adaptive anisotropic kernel [Setty18]_. + + Palantir is an algorithm to align cells along differentiation trajectories. + Palantir models differentiation as a stochastic process where stem cells + differentiate to terminally differentiated cells by a series of steps through + a low dimensional phenotypic manifold. Palantir effectively captures the + continuity in cell states and the stochasticity in cell fate determination. + Palantir has been designed to work with multidimensional single cell data + from diverse technologies such as Mass cytometry and single cell RNA-seq. + + .. note:: + More information and bug reports `here `__. + + Parameters + ---------- + adata : :class:`~anndata.AnnData` + An AnnData object, or Dataframe of cells X genes. + + Returns + ------- + + `.uns['palantir_norm_data']` which is a `data_df` copy of adata if normalized + + `pca_results` PCA projections and explained variance ratio of adata: + - `.uns['palantir_pca_results']['pca_projections']` + - `.uns['palantir_pca_results']['variance_ratio']` + + `dm_res` Diffusion components, corresponding eigen values and diffusion operator: + - `.uns['palantir_diff_maps']['EigenVectors']` + - `.uns['palantir_diff_maps']['EigenValues']` + - `.uns['palantir_diff_maps']['T']` + + `.uns['palantir_ms_data']` which is the `ms_data` - Multi scale data matrix + + `.uns['palantir_tsne']` which is `tsne` - tSNE on diffusion maps + + `.uns['palantir_imp_df']` which is `imp_df` - Imputed data matrix (MAGIC imputation) + + Example + ------- + + >>> import scanpy.external as sce + >>> import scanpy as sc + + A sample data is available `here `_. + + To view the plots, it is recommended to run Jupyter notebook + + *Load sample data* + + >>> adata = sc.read_csv(filename="Palantir/data/marrow_sample_scseq_counts.csv.gz") + + **Pre-processing** + + The provided adata will be used as input to the embedded `palantir` methods: + + >>> d = sce.tl.palantir( adata=adata ) + + At this point, a new class object, `d`, will be instantiated. If the data + needs pre-processing - filtering low genes/cells counts, or normalization, + or log transformation, set the `filter_low`, `normalize`, or `log_transform` + to `True`: + + >>> d.filter_low = True + >>> d.normalize = True + >>> d.log_transform = True + + The created object `d.palantir` can be used to override the default + parameters used for pre-processing. + + Follow the next step to pass the data to palantir methods, to generate the + return objects listed above. + + **Run Palantir** + + >>> d.process() + + By calling this method `palantir` will run and generate the various outputs. + The generated objects will be pushed to `adata` and stored for further use. + Once instantiated, *Principal component analysis*, *Diffusion maps*, + *tSNE on Diffusion maps*, and *MAGIC imputation* data objects will be created + using the `palantir` default parameters. + + If running `palantir` using default parameters is not satisfactory, + `d.palantir` methods can be used to override and substitute the individual + outputs already embedded into `adata`. + + **Plotting** + + *tSNE visualization* + + >>> fig, ax = d.palantir.plot.plot_tsne(d.tsne) + >>> fig, ax = d.palantir.plot.plot_tsne_by_cell_sizes(d.data_df, d.tsne) + + *Gene expression can be visualized on tSNE maps* + + >>> d.palantir.plot.plot_gene_expression(d.imp_df, d.tsne, ['CD34', 'MPO', 'GATA1', 'IRF8']) + + *Diffusion maps* + + >>> d.palantir.plot.plot_diffusion_components(d.tsne, d.dm_res) + + **Visualizing Palantir results** + + Palantir can be run by specifying an approximate early cell. While Palantir + automatically determines the terminal states, they can also be specified using the + `termine_states` parameter. + + >>> start_cell = 'Run5_164698952452459' + >>> pr_res = d.palantir.core.run_palantir(d.ms_data, start_cell, num_waypoints=500) + >>> palantir.plot.plot_palantir_results(pr_res, d.tsne) + + - note that a `start_cell` must be defined for every data set. The start cell for + this dataset was chosen based on high expression of CD34. + + For further demonstration of palantir visualizations please follow this notebook + `Palantir_sample_notebook.ipynb `_. + It provides a comprehensive guide to draw *gene expression trends*, amongst other things. + + """ + + logg.info('Palantir diffusion maps', r=True) + + class _wrapper_cls(object): + """ + A wrapper class to instantiate a new object that wraps `palantir` as an + attribute reference attached to the class, together with other attribute + references. The class uses instance variables, to preprocess and generate + data using the embedded palantir package. + Pre-processing of data is important step before start using the palantir + methods. + palantir accepts as input a Counts matrix: Cells x Genes. + + Methods used are: + - instantiation initiation + - instance function to embed palantir + - pre-processing of input data + """ + + def __init__(self , + adata, + func=None , + normalize = False, + log_transform = False, + filter_low = False + ): + """ + Parameters + ---------- + adata : AnnData, or Dataframe of cells X genes + func : function wrapper to import palantir (not to be used) + normalize : `bool` (default: `False`) + property setter passed to palantir to normalize using palantir method + `palantir.preprocess.normalize_counts`. + log_transform : `bool` (default: `False`) + property setter passed to palantir. Some datasets show better signal in the log + scale. Applied using `palantir.preprocess.log_transform` + filter_low : `bool` (default: `False`) + property setter passed to palantir to remove low molecule count cells and low detection genes + """ + + # instantiate variables + self.func = func + self.adata = adata + self._normalize = normalize + self._log_transform = log_transform + self._filter_low = filter_low + + try: + # for AnnData + self.data_df = self.adata.to_df() + except AttributeError: + # assume the data is a cell X genes Dataframe + logg.info('Assuming the data is a cell X genes Dataframe', + r=True) + + # load palantir + self.__call__() + logg.info('palantir loaded ...', r=True) + + def __call__(self): + """ + Call for function to import palantir and instantiate it as a class + attribute + """ + self.palantir = self.func() + + def process(self): + + """ + A method to run `palantir` on input Data Frame + """ + + # Principal component analysis + logg.info('PCA in progress ...', r=True) + + self.pca_projections, self.var_r = self.palantir.utils.run_pca(self.data_df) + + adata.uns['palantir_pca_results'] = {} + adata.uns['palantir_pca_results']['pca_projections'] = self.pca_projections + adata.uns['palantir_pca_results']['variance_ratio'] = self.var_r + + # Diffusion maps + logg.info('Diffusion maps in progress ...', r=True) + + self.dm_res = self.palantir.utils.run_diffusion_maps(self.pca_projections) + self.ms_data = self.palantir.utils.determine_multiscale_space(self.dm_res) + + adata.uns['palantir_diff_maps'] = self.dm_res + adata.uns['palantir_ms_data'] = self.ms_data + + # tSNE visualization + logg.info('tSNE in progress ...', r=True) + + self.tsne = self.palantir.utils.run_tsne(self.ms_data) + + adata.uns['palantir_tsne'] = self.tsne + + # MAGIC imputation + logg.info('imputation in progress ...', r=True) + + self.imp_df = self.palantir.utils.run_magic_imputation(self.data_df, self.dm_res) + + adata.uns['palantir_imp_df'] = self.imp_df + + logg.info('End of processing, start plotting.', r=True) + + @property + def normalize(self): + return self._normalize + @normalize.setter + def normalize(self , value): + if value is True: + self.data_df = self.palantir.preprocess.normalize_counts(self.data_df) + adata.uns['palantir_norm_data'] = self.data_df + logg.info('data normalized ...', r=True) + + @property + def log_transform(self): + return self._log_transform + @log_transform.setter + def log_transform(self , value): + if value is True: + self.data_df = self.palantir.preprocess.log_transform(self.data_df) + adata.uns['palantir_norm_data'] = self.data_df + logg.info('data log transformed ...', r=True) + + @property + def filter_low(self): + return self._filter_low + @filter_low.setter + def filter_low(self , value): + if value is True: + self.data_df = self.palantir.preprocess.filter_counts_data(self.data_df) + adata.uns['palantir_norm_data'] = self.data_df + logg.info('data filtered for low counts:\n\t' +\ + 'cell_min_molecules=1000\n\tgenes_min_cells=10', + r=True) + + + def wrapper_cls(adata, func=None): + """ + Class wrapper to pass a function to the class alongside positional argument + """ + if func: + return _wrapper_cls(func) + else: + def wrapper(func): + return _wrapper_cls(adata, func) + return wrapper + + # import palantir and wrap it in a function passed to the wrapper class + # this method allows passing positional argument of adata to `_wrapper_cls` + @wrapper_cls(adata) + def _run(): + import importlib + try: + palantir = importlib.import_module('palantir') + except ImportError: + raise ImportError( + '\nplease install palantir: \n\n\t' + 'git clone git://github.com/dpeerlab/Palantir.git\n\t' + 'cd Palantir\n\t' + 'sudo -H pip3 install .\n') + return palantir + return _run diff --git a/scanpy/external/tl.py b/scanpy/external/tl.py index 207acb40f0..04599fd8ad 100644 --- a/scanpy/external/tl.py +++ b/scanpy/external/tl.py @@ -1,3 +1,4 @@ from ..tools._pypairs import cyclone, sandbag from ..tools._phate import phate from ..tools._phenograph import phenograph +from ._tools._palantir import palantir diff --git a/scanpy/plotting/_anndata.py b/scanpy/plotting/_anndata.py index 4dd5d07070..12959b6b46 100755 --- a/scanpy/plotting/_anndata.py +++ b/scanpy/plotting/_anndata.py @@ -2083,7 +2083,9 @@ def correlation_matrix(adata, groupby, show_correlation_numbers=False, dendrogra width = corr_matrix_height + dendrogram_width else: width, height = figsize + corr_matrix_height = height - colorbar_height + fig = pl.figure(figsize=(width, height)) # layout with 2 rows and 2 columns: # row 1: dendrogram + correlation matrix diff --git a/scanpy/plotting/_tools/scatterplots.py b/scanpy/plotting/_tools/scatterplots.py index cddf67d4ed..e860d3209c 100644 --- a/scanpy/plotting/_tools/scatterplots.py +++ b/scanpy/plotting/_tools/scatterplots.py @@ -156,6 +156,13 @@ def plot_scatter( color_vector = color_vector[order] _data_points = data_points[component_idx][order, :] + # check if 'size' is given (stored in kwargs['s'] + # and reorder it. + import pandas.core.series + if 's' in kwargs and kwargs['s'] is not None \ + and isinstance(kwargs['s'],(list, pandas.core.series.Series, np.ndarray)) \ + and len(kwargs['s']) == len(color_vector): + kwargs['s'] = np.array(kwargs['s'])[order] else: _data_points = data_points[component_idx] diff --git a/scanpy/preprocessing/_simple.py b/scanpy/preprocessing/_simple.py index 591d52ee58..72c59cbcef 100644 --- a/scanpy/preprocessing/_simple.py +++ b/scanpy/preprocessing/_simple.py @@ -15,7 +15,7 @@ from .. import settings as sett from .. import logging as logg -from ..utils import sanitize_anndata +from ..utils import sanitize_anndata, deprecated_arg_names from ._distributed import materialize_as_ndarray from ._utils import _get_mean_var @@ -910,25 +910,37 @@ def subsample(data, fraction=None, n_obs=None, random_state=0, copy=False): return X[obs_indices], obs_indices -def downsample_counts(adata, target_counts=20000, random_state=0, - replace=True, copy=False): - """Downsample counts so that each cell has no more than `target_counts`. +@deprecated_arg_names({"target_counts": "counts_per_cell"}) +def downsample_counts( + adata: AnnData, + counts_per_cell: Optional[int] = None, + total_counts: Optional[int] = None, + random_state: Optional[int] = 0, + replace: bool = False, + copy: bool = False, +) -> Optional[AnnData]: + """ + Downsample counts from count matrix. - Cells with fewer counts than `target_counts` are unaffected by this. This - has been implemented by M. D. Luecken. + If `counts_per_cell` in specified, each cell will downsampled. If + `total_counts` is specified, expression matrix will be downsampled to + contain at most `total_counts`. Parameters ---------- - adata : :class:`~anndata.AnnData` + adata Annotated data matrix. - target_counts : `int` (default: 20,000) - Target number of counts for downsampling. Cells with more counts than - 'target_counts' will be downsampled to have 'target_counts' counts. - random_state : `int` or `None`, optional (default: 0) - Random seed to change subsampling. - replace : `bool`, optional (default: `True`) + counts_per_cell + Target total counts per cell. If a cell has more than 'counts_per_cell', + it will be downsampled to this number. + total_counts + Target total counts. If the count matrix has more than `total_counts` + it will be downsampled to have this number. + random_state + Random seed for subsampling. + replace Whether to sample the counts with replacement. - copy : `bool`, optional (default: `False`) + copy If an :class:`~anndata.AnnData` is passed, determines whether a copy is returned. @@ -937,34 +949,67 @@ def downsample_counts(adata, target_counts=20000, random_state=0, AnnData, None Depending on `copy` returns or updates an `adata` with downsampled `.X`. """ + if type(total_counts) == type(counts_per_cell): + raise ValueError("Must specify exactly one of `total_counts` or `counts_per_cell`.") if copy: adata = adata.copy() adata.X = adata.X.astype(np.integer) # Numba doesn't want floats - if issparse(adata.X): - X = adata.X + if total_counts: + adata.X = _downsample_total_counts(adata.X, total_counts, random_state, replace) + elif counts_per_cell: + adata.X = _downsample_per_cell(adata.X, counts_per_cell, random_state, replace) + if copy: + return adata + + +def _downsample_per_cell(X, counts_per_cell, random_state, replace): + if issparse(X): + original_type = type(X) if not isspmatrix_csr(X): X = csr_matrix(X) totals = np.ravel(X.sum(axis=1)) - under_target = np.nonzero(totals > target_counts)[0] + under_target = np.nonzero(totals > counts_per_cell)[0] cols = np.split(X.data.view(), X.indptr[1:-1]) for colidx in under_target: col = cols[colidx] - downsample_cell(col, target_counts, random_state=random_state, - replace=replace, inplace=True) - if not isspmatrix_csr(adata.X): # Put it back - adata.X = type(adata.X)(X) + _downsample_array(col, counts_per_cell, random_state=random_state, + replace=replace, inplace=True) + X.eliminate_zeros() + if original_type is not csr_matrix: # Put it back + X = original_type(X) + else: + totals = np.ravel(X.sum(axis=1)) + under_target = np.nonzero(totals > counts_per_cell)[0] + X[under_target, :] = \ + np.apply_along_axis(_downsample_array, 1, X[under_target, :], + counts_per_cell, random_state=random_state, + replace=replace) + return X + + +def _downsample_total_counts(X, total_counts, random_state, replace): + total = X.sum() + if total < total_counts: + return X + if issparse(X): + original_type = type(X) + if not isspmatrix_csr(X): + X = csr_matrix(X) + _downsample_array(X.data, total_counts, random_state=random_state, + replace=replace, inplace=True) + X.eliminate_zeros() + if original_type is not csr_matrix: + X = original_type(X) else: - totals = np.ravel(adata.X.sum(axis=1)) - under_target = np.nonzero(totals > target_counts)[0] - adata.X[under_target, :] = \ - np.apply_along_axis(downsample_cell, 1, adata.X[under_target, :], - target_counts, random_state=random_state, replace=replace) - if copy: return adata + v = X.view().reshape(np.multiply(*X.shape)) + _downsample_array(v, total_counts, random_state, replace=replace, + inplace=True) + return X -@numba.njit -def downsample_cell(col: np.array, target: int, random_state: int=0, - replace: bool=True, inplace: bool=False): +@numba.njit(cache=True) +def _downsample_array(col: np.array, target: int, random_state: int=0, + replace: bool = True, inplace: bool=False): """ Evenly reduce counts in cell to target amount. diff --git a/scanpy/tests/test_preprocessing.py b/scanpy/tests/test_preprocessing.py index 5e006ead72..116e2a7718 100644 --- a/scanpy/tests/test_preprocessing.py +++ b/scanpy/tests/test_preprocessing.py @@ -88,7 +88,7 @@ def test_regress_out_categorical(): multi = sc.pp.regress_out(adata, keys='batch', n_jobs=8, copy=True) assert adata.X.shape == multi.X.shape -def test_downsample_counts(): +def test_downsample_counts_per_cell(): TARGET = 1000 X = np.random.randint(0, 100, (1000, 100)) * \ np.random.binomial(1, .3, (1000, 100)) @@ -97,7 +97,7 @@ def test_downsample_counts(): adata_csc = AnnData(X=sp.csc_matrix(X)) for adata, replace in product((adata_dense, adata_csr, adata_csc), (True, False)): initial_totals = np.ravel(adata.X.sum(axis=1)) - adata = sc.pp.downsample_counts(adata, target_counts=TARGET, replace=replace, copy=True) + adata = sc.pp.downsample_counts(adata, counts_per_cell=TARGET, replace=replace, copy=True) new_totals = np.ravel(adata.X.sum(axis=1)) if sp.issparse(adata.X): assert all(adata.X.toarray()[X == 0] == 0) @@ -109,3 +109,27 @@ def test_downsample_counts(): == new_totals[initial_totals <= TARGET]) if not replace: assert np.all(X >= adata.X) + +def test_downsample_total_counts(): + X = np.random.randint(0, 100, (1000, 100)) * \ + np.random.binomial(1, .3, (1000, 100)) + total = X.sum() + target = np.floor_divide(total, 10) + adata_dense = AnnData(X=X.copy()) + adata_csr = AnnData(X=sp.csr_matrix(X)) + for adata, replace in product((adata_dense, adata_csr), (True, False)): + initial_totals = np.ravel(adata.X.sum(axis=1)) + adata = sc.pp.downsample_counts(adata, total_counts=target, replace=replace, copy=True) + new_totals = np.ravel(adata.X.sum(axis=1)) + if sp.issparse(adata.X): + assert all(adata.X.toarray()[X == 0] == 0) + else: + assert all(adata.X[X == 0] == 0) + assert adata.X.sum() == target + assert all(initial_totals >= new_totals) + if not replace: + assert np.all(X >= adata.X) + for adata in (adata_dense, adata_csr): # When specified total is greater than current total + adata = sc.pp.downsample_counts(adata, total_counts=total + 10, replace=False, copy=True) + assert (adata.X == X).all() + diff --git a/scanpy/tools/_phenograph.py b/scanpy/tools/_phenograph.py index 970cf1b6e9..a1b1fb520a 100644 --- a/scanpy/tools/_phenograph.py +++ b/scanpy/tools/_phenograph.py @@ -4,7 +4,7 @@ from .. import logging as logg -def phenograph( data, +def phenograph( adata, k=30, directed=False, prune=False, @@ -19,7 +19,7 @@ def phenograph( data, PhenoGraph clustering [Levine15]_. - :param data: Numpy ndarray of data to cluster, + :param adata: Numpy ndarray of data to cluster, or sparse matrix of k-nearest neighbor graph If ndarray, n-by-d array of n cells in d dimensions If sparse matrix, n-by-n adjacency matrix @@ -62,14 +62,43 @@ def phenograph( data, Example ------- + >>> import scanpy.external as sce >>> import scanpy.api as sc >>> import numpy as np + >>> import pandas as pd + + Assume adata is your annotated data which has the normalized data. - >>> # Cluster and cluster centrolds + Then do PCA: + + >>> sc.tl.pca(adata, n_comps = 100) + + Compute phenograph clusters: + + >>> result = sce.tl.phenograph(adata.obsm['X_pca'], k = 30) + + Embed the phenograph result into adata as a *categorical* variable (this helps in plotting): + + >>> adata.obs['pheno'] = pd.Categorical(result[0]) + + Check by typing "adata" and you should see under obs a key called 'pheno'. + + Now to show phenograph on tSNE (for example): + + Compute tSNE: + + >>> sc.tl.tsne(adata, random_state = 7) + + Plot phenograph clusters on tSNE: + + >>> sc.pl.tsne(adata, color = ['pheno'], s = 100, palette = sc.pl.palettes.vega_20_scanpy, legend_fontsize = 10) + + Cluster and cluster centroids for input Numpy ndarray + >>> df = np.random.rand(1000,40) >>> df.shape (1000, 40) - >>> communities, graph, Q = sc.tl.phenograph(df, k=50) + >>> result = sce.tl.phenograph(df, k=50) Finding 50 nearest neighbors using minkowski metric and 'auto' algorithm Neighbors computed in 0.16141605377197266 seconds Jaccard graph constructed in 0.7866239547729492 seconds @@ -79,6 +108,13 @@ def phenograph( data, After 2 runs, maximum modularity is Q = 0.235874 Louvain completed 22 runs in 1.5609488487243652 seconds PhenoGraph complete in 2.9466471672058105 seconds + + New results can be pushed into adata object: + + >>> dframe = pd.DataFrame(data=df, columns=range(df.shape[1]),index=range(df.shape[0]) ) + >>> adata = sc.AnnData( X=dframe, obs=dframe, var=dframe) + >>> adata.obs['pheno'] = pd.Categorical(result[0]) + """ logg.info('PhenoGraph clustering', r=True) @@ -91,7 +127,7 @@ def phenograph( data, 'pip3 install git+https://github.com/jacoblevine/phenograph.git') communities, graph, Q = phenograph.cluster( - data=data, + data=adata, k=k, directed=directed, prune=prune, diff --git a/scanpy/utils.py b/scanpy/utils.py index 050d920fea..01088966f3 100644 --- a/scanpy/utils.py +++ b/scanpy/utils.py @@ -5,7 +5,7 @@ import inspect from weakref import WeakSet from collections import namedtuple -from functools import partial +from functools import partial, wraps from types import ModuleType from typing import Union, Callable, Optional @@ -16,12 +16,12 @@ from pandas.api.types import CategoricalDtype from . import settings, logging as logg +import warnings EPS = 1e-15 def check_versions(): - import warnings from distutils.version import LooseVersion if sys.version_info < (3, 0): @@ -61,6 +61,40 @@ def type_doc(name: str): ) +def deprecated_arg_names(arg_mapping): + """ + Decorator which marks a functions keyword arguments as deprecated. It will + result in a warning being emitted when the deprecated keyword argument is + used, and the function being called with the new argument. + + Parameters + ---------- + arg_mapping : dict[str, str] + Mapping from deprecated argument name to current argument name. + """ + def decorator(func): + @wraps(func) + def func_wrapper(*args, **kwargs): + warnings.simplefilter( + 'always', DeprecationWarning) # turn off filter + for old, new in arg_mapping.items(): + if old in kwargs: + warnings.warn( + "Keyword argument '{0}' has been deprecated in favour " + "of '{1}'. '{0}' will be removed in a future version." + .format(old, new), + category=DeprecationWarning, + stacklevel=2, + ) + val = kwargs.pop(old) + kwargs[new] = val + warnings.simplefilter( + 'default', DeprecationWarning) # reset filter + return func(*args, **kwargs) + return func_wrapper + return decorator + + def descend_classes_and_funcs(mod: ModuleType, root: str, encountered=None): if encountered is None: encountered = WeakSet() @@ -774,7 +808,6 @@ def warn_with_traceback(message, category, filename, lineno, file=None, line=Non -------- http://stackoverflow.com/questions/22373927/get-traceback-of-warnings """ - import warnings import traceback traceback.print_stack() log = file if hasattr(file, 'write') else sys.stderr