Skip to content

Commit

Permalink
update test_basic and datasets
Browse files Browse the repository at this point in the history
  • Loading branch information
VolkerBergen committed Aug 22, 2018
1 parent 4d8cba9 commit 4937188
Show file tree
Hide file tree
Showing 4 changed files with 49 additions and 33 deletions.
11 changes: 11 additions & 0 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
scvelo – stochastic single cell RNA velocity
==========================================================

|PyPI| |Docs| |travis|

**scvelo** is a scalable toolkit for estimating and analyzing stochastic RNA velocities in single cells.

RNA velocity is the time derivative of mRNA abundance obtained by distinguishing unspliced (precursor) from spliced
Expand All @@ -12,6 +14,15 @@ incorporates intrinsic expression variability.

It is compatible with scanpy_ (`Wolf et al., 2018 <https://doi.org/10.1186/s13059-017-1382-0>`_).

.. |PyPI| image:: https://img.shields.io/pypi/v/scvelo.svg
:target: https://pypi.org/project/scvelo

.. |Docs| image:: https://readthedocs.org/projects/scvelo/badge/?version=latest
:target: https://scvelo.readthedocs.io

.. |travis| image:: https://travis-ci.org/theislab/scvelo.svg?branch=master
:target: https://travis-ci.org/theislab/scvelo

.. _velocyto: http://velocyto.org/
.. _scanpy: https://github.com/theislab/scanpy

Expand Down
13 changes: 11 additions & 2 deletions scvelo/datasets.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
"""Builtin Datasets.
"""

from scanpy.api import read
import scanpy.api as sc
from scipy import sparse, stats


def dentategyrus():
Expand All @@ -12,5 +13,13 @@ def dentategyrus():
"""
filename = 'data/DentateGyrus/10X43_1.loom'
url = 'http://pklab.med.harvard.edu/velocyto/DG1/10X43_1.loom'
adata = read(filename, backup_url=url, cleanup=True, sparse=True, cache=True)
adata = sc.read(filename, backup_url=url, cleanup=True, sparse=True, cache=True)
return adata


def toy_data(n_obs, n_vars):
adata = sc.AnnData(sparse.random(n_obs, n_vars, data_rvs=stats.poisson(1).rvs, density=.6, format='csr'))
adata.layers['spliced'] = adata.X
adata.layers['unspliced'] = \
.5 * adata.X + .3 * sparse.random(n_obs, n_vars, density=.6, data_rvs=stats.poisson(1).rvs, format='csr')
return adata
13 changes: 12 additions & 1 deletion scvelo/preprocessing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,15 @@ def read_loom_layers(file_name, backup_url=None):

adata = AnnData(X, obs=obs, var=var, layers=layers)

return adata
return adata


def recipe_velocity(adata):
from scanpy.api.pp import \
filter_genes, filter_genes_dispersion, normalize_per_cell, pca, neighbors
filter_genes(adata, min_counts=10)
filter_genes_dispersion(adata)
normalize_per_cell(adata, layers='all')
pca(adata, n_comps=10)
neighbors(adata, use_rep='X_pca')
moments(adata)
45 changes: 15 additions & 30 deletions tests/test_basic.py
Original file line number Diff line number Diff line change
@@ -1,40 +1,25 @@
from scvelo.tools.utils import prod_sum_obs, prod_sum_var, norm
import numpy as np
import scvelo as scv
import scanpy.api as sc
from scvelo.tools.utils import *
from scipy import sparse, stats


def test_data(n_obs=1000, n_vars=100):
adata = sc.AnnData(sparse.random(n_obs, n_vars, data_rvs=stats.poisson(1).rvs, density=.6, format='csr'))
adata.layers['spliced'] = adata.X
adata.layers['unspliced'] = \
.5 * adata.X + .3 * sparse.random(n_obs, n_vars, density=.6, data_rvs=stats.poisson(1).rvs, format='csr')

sc.pp.filter_genes(adata, min_counts=10)
sc.pp.filter_genes_dispersion(adata)
sc.pp.normalize_per_cell(adata, layers='all')
sc.pp.pca(adata, n_comps=10)
sc.pp.neighbors(adata, use_rep='X_pca')
scv.pp.moments(adata)
return adata


def test_einsum():
adata = test_data()
adata = scv.datasets.toy_data(n_obs=100, n_vars=100)
adata = scv.pp.recipe_velocity(adata)
Ms, Mu = adata.layers['Ms'], adata.layers['Mu']
assert np.allclose(prod_sum_obs(Ms, Mu), np.sum(Ms * Mu, 0))
assert np.allclose(prod_sum_var(Ms, Mu), np.sum(Ms * Mu, 1))
assert np.allclose(norm(Ms), np.linalg.norm(Ms, axis=1))


# def test_velocity_graph():
# adata = test_data()
# scv.tl.velocity_graph(adata)
#
# graph1 = adata.uns['velocity_graph'].copy()
#
# scv.tl.velocity_graph(adata, n_jobs=2)
# graph2 = adata.uns['velocity_graph'].copy()
#
# assert np.allclose((scv.tl.transition_matrix(adata) > 0).toarray(), (graph1 > 0).toarray())
# assert np.allclose(graph1.toarray(), graph2.toarray())
def test_velocity_graph():
adata = scv.datasets.toy_data(n_obs=1000, n_vars=100)
scv.tl.velocity_graph(adata)

graph1 = adata.uns['velocity_graph'].copy()

scv.tl.velocity_graph(adata, n_jobs=2)
graph2 = adata.uns['velocity_graph'].copy()

assert np.allclose((scv.tl.transition_matrix(adata) > 0).toarray(), (graph1 > 0).toarray())
assert np.allclose(graph1.toarray(), graph2.toarray())

0 comments on commit 4937188

Please sign in to comment.