Skip to content

Commit

Permalink
Merge e19e8af into 266f4c2
Browse files Browse the repository at this point in the history
  • Loading branch information
mdeff committed Dec 4, 2018
2 parents 266f4c2 + e19e8af commit b2e509a
Show file tree
Hide file tree
Showing 11 changed files with 597 additions and 174 deletions.
2 changes: 1 addition & 1 deletion doc/tutorials/intro.rst
Expand Up @@ -98,7 +98,7 @@ smoothness of a signal.

>>> G.compute_differential_operator()
>>> G.D.shape
(60, 30)
(30, 60)

.. note::
Note that we called :meth:`pygsp.graphs.Graph.compute_fourier_basis` and
Expand Down
2 changes: 1 addition & 1 deletion doc/tutorials/optimization.rst
Expand Up @@ -76,7 +76,7 @@ We start with the graph TV regularization. We will use the :class:`pyunlocbox.so
>>>
>>> # Define the solver
>>> G.compute_differential_operator()
>>> L = G.D.toarray()
>>> L = G.D.T.toarray()
>>> step = 0.999 / (1 + np.linalg.norm(L))
>>> solver = pyunlocbox.solvers.mlfbf(L=L, step=step)
>>>
Expand Down
1 change: 1 addition & 0 deletions pygsp/graphs/__init__.py
Expand Up @@ -56,6 +56,7 @@
Graph.grad
Graph.div
Graph.dirichlet_energy
Transforms
----------
Expand Down
275 changes: 218 additions & 57 deletions pygsp/graphs/difference.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-

from __future__ import division

import numpy as np
from scipy import sparse

Expand Down Expand Up @@ -28,13 +30,57 @@ def D(self):
def compute_differential_operator(self):
r"""Compute the graph differential operator (cached).
The differential operator is a matrix such that
The differential operator is the matrix :math:`D` such that
.. math:: L = D D^\top,
where :math:`L` is the graph Laplacian (combinatorial or normalized).
It is used to compute the gradient and the divergence of graph
signals (see :meth:`grad` and :meth:`div`).
The differential operator computes the gradient and divergence of
signals, and the Laplacian computes the divergence of the gradient, as
follows:
.. math:: z = L x = D y = D D^\top x,
where :math:`y = D^\top x = \nabla_\mathcal{G} x` is the gradient of
:math:`x` and :math:`z = D y = \operatorname{div}_\mathcal{G} y` is the
divergence of :math:`y`. See :meth:`grad` and :meth:`div` for details.
The difference operator is actually an incidence matrix of the graph,
defined as
.. math:: D[i, k] =
\begin{cases}
-\sqrt{W[i, j] / 2} &
\text{if } e_k = (v_i, v_j) \text{ for some } j, \\
+\sqrt{W[i, j] / 2} &
\text{if } e_k = (v_j, v_i) \text{ for some } j, \\
0 & \text{otherwise}
\end{cases}
.. math:: L = D^T D,
for the combinatorial Laplacian, and
where :math:`D` is the differential operator and :math:`L` is the graph
Laplacian. It is used to compute the gradient and the divergence of a
graph signal, see :meth:`grad` and :meth:`div`.
.. math:: D[i, k] =
\begin{cases}
-\sqrt{W[i, j] / 2 / d[i]} &
\text{if } e_k = (v_i, v_j) \text{ for some } j, \\
+\sqrt{W[i, j] / 2 / d[i]} &
\text{if } e_k = (v_j, v_i) \text{ for some } j, \\
0 & \text{otherwise}
\end{cases}
for the normalized Laplacian, where :math:`v_i \in \mathcal{V}` is a
vertex, :math:`e_k = (v_i, v_j) \in \mathcal{E}` is an edge from
:math:`v_i` to :math:`v_j`, :math:`W[i, j]` is the weight :attr:`W` of
the edge :math:`(v_i, v_j)`, :math:`d[i]` is the degree :attr:`dw` of
vertex :math:`v_i`.
For undirected graphs, only half the edges are kept (the upper
triangular part of the adjacency matrix) in the interest of space and
time. In that case, the :math:`1/\sqrt{2}` factor disappears from the
above equations for :math:`L = D D^\top` to stand at all times.
The result is cached and accessible by the :attr:`D` property.
Expand All @@ -45,110 +91,225 @@ def compute_differential_operator(self):
Examples
--------
The difference operator is an incidence matrix.
Example with a undirected graph.
>>> adjacency = np.array([
... [0, 2, 0],
... [2, 0, 1],
... [0, 1, 0],
... ])
>>> graph = graphs.Graph(adjacency)
>>> graph.compute_laplacian('combinatorial')
>>> graph.compute_differential_operator()
>>> graph.D.toarray()
array([[-1.41421356, 0. ],
[ 1.41421356, -1. ],
[ 0. , 1. ]])
>>> graph.compute_laplacian('normalized')
>>> graph.compute_differential_operator()
>>> graph.D.toarray()
array([[-1. , 0. ],
[ 0.81649658, -0.57735027],
[ 0. , 1. ]])
Example with a directed graph.
>>> adjacency = np.array([
... [0, 2, 0],
... [2, 0, 1],
... [0, 0, 0],
... ])
>>> graph = graphs.Graph(adjacency)
>>> graph.compute_laplacian('combinatorial')
>>> graph.compute_differential_operator()
>>> graph.D.toarray()
array([[-1. , 1. , 0. ],
[ 1. , -1. , -0.70710678],
[ 0. , 0. , 0.70710678]])
>>> graph.compute_laplacian('normalized')
>>> graph.compute_differential_operator()
>>> graph.D.toarray()
array([[-0.70710678, 0.70710678, 0. ],
[ 0.63245553, -0.63245553, -0.4472136 ],
[ 0. , 0. , 1. ]])
The graph Laplacian acts on a signal as the divergence of the gradient.
>>> G = graphs.Logo()
>>> G.N, G.Ne
(1130, 3131)
>>> G.compute_differential_operator()
>>> G.D.shape == (G.Ne, G.N)
>>> s = np.random.normal(size=G.N)
>>> s_grad = G.D.T.dot(s)
>>> s_lap = G.D.dot(s_grad)
>>> np.linalg.norm(s_lap - G.L.dot(s)) < 1e-10
True
"""

v_in, v_out, weights = self.get_edge_list()
sources, targets, weights = self.get_edge_list()

n = len(v_in)
Dr = np.concatenate((np.arange(n), np.arange(n)))
Dc = np.empty(2*n)
Dc[:n] = v_in
Dc[n:] = v_out
Dv = np.empty(2*n)
n = self.n_edges
rows = np.concatenate([sources, targets])
columns = np.concatenate([np.arange(n), np.arange(n)])
values = np.empty(2*n)

if self.lap_type == 'combinatorial':
Dv[:n] = np.sqrt(weights)
Dv[n:] = -Dv[:n]
values[:n] = -np.sqrt(weights)
values[n:] = -values[:n]
elif self.lap_type == 'normalized':
Dv[:n] = np.sqrt(weights / self.dw[v_in])
Dv[n:] = -np.sqrt(weights / self.dw[v_out])
values[:n] = -np.sqrt(weights / self.dw[sources])
values[n:] = +np.sqrt(weights / self.dw[targets])
else:
raise ValueError('Unknown lap_type {}'.format(self.lap_type))

self._D = sparse.csc_matrix((Dv, (Dr, Dc)), shape=(n, self.N))
if self.is_directed():
values /= np.sqrt(2)

self._D = sparse.csc_matrix((values, (rows, columns)),
shape=(self.n_vertices, self.n_edges))
self.D.eliminate_zeros() # Self-loops introduce stored zeros.

def grad(self, s):
r"""Compute the gradient of a graph signal.
def grad(self, x):
r"""Compute the gradient of a signal defined on the vertices.
The gradient of a signal :math:`s` is defined as
The gradient :math:`y` of a signal :math:`x` is defined as
.. math:: y = D s,
.. math:: y = \nabla_\mathcal{G} x = D^\top x,
where :math:`D` is the differential operator :attr:`D`.
The value of the gradient on the edge :math:`e_k = (v_i, v_j)` from
:math:`v_i` to :math:`v_j` with weight :math:`W[i, j]` is
.. math:: y[k] = D[i, k] x[i] + D[j, k] x[j]
= \sqrt{\frac{W[i, j]}{2}} (x[j] - x[i])
for the combinatorial Laplacian, and
.. math:: y[k] = \sqrt{\frac{W[i, j]}{2}} \left(
\frac{x[j]}{\sqrt{d[j]}} - \frac{x[i]}{\sqrt{d[i]}}
\right)
for the normalized Laplacian.
For undirected graphs, only half the edges are kept and the
:math:`1/\sqrt{2}` factor disappears from the above equations. See
:meth:`compute_differential_operator` for details.
Parameters
----------
s : ndarray
Signal of length G.N living on the nodes.
x : ndarray
Signal of length :attr:`n_vertices` living on the vertices.
Returns
-------
s_grad : ndarray
Gradient signal of length G.Ne/2 living on the edges (non-directed
graph).
y : ndarray
Gradient signal of length :attr:`n_edges` living on the edges.
See also
--------
compute_differential_operator
div : compute the divergence
div : compute the divergence of an edge signal
dirichlet_energy : compute the norm of the gradient
Examples
--------
>>> G = graphs.Logo()
>>> G.N, G.Ne
(1130, 3131)
>>> s = np.random.normal(size=G.N)
>>> s_grad = G.grad(s)
>>> s_div = G.div(s_grad)
>>> np.linalg.norm(s_div - G.L.dot(s)) < 1e-10
True
>>> graph = graphs.Path(4, directed=False, lap_type='combinatorial')
>>> graph.compute_differential_operator()
>>> graph.grad(np.array([0, 2, 4, 2]))
array([ 2., 2., -2.])
>>> graph = graphs.Path(4, directed=True, lap_type='combinatorial')
>>> graph.compute_differential_operator()
>>> graph.grad(np.array([0, 2, 4, 2]))
array([ 1.41421356, 1.41421356, -1.41421356])
>>> graph = graphs.Path(4, directed=False, lap_type='normalized')
>>> graph.compute_differential_operator()
>>> graph.grad(np.array([0, 2, 4, 2]))
array([ 1.41421356, 1.41421356, -0.82842712])
>>> graph = graphs.Path(4, directed=True, lap_type='normalized')
>>> graph.compute_differential_operator()
>>> graph.grad(np.array([0, 2, 4, 2]))
array([ 1.41421356, 1.41421356, -0.82842712])
"""
if self.N != s.shape[0]:
if self.N != x.shape[0]:
raise ValueError('Signal length should be the number of nodes.')
return self.D.dot(s)
return self.D.T.dot(x)

def div(self, s):
r"""Compute the divergence of a graph signal.
def div(self, y):
r"""Compute the divergence of a signal defined on the edges.
The divergence of a signal :math:`s` is defined as
The divergence :math:`z` of a signal :math:`y` is defined as
.. math:: y = D^T s,
.. math:: z = \operatorname{div}_\mathcal{G} y = D y,
where :math:`D` is the differential operator :attr:`D`.
The value of the divergence on the vertex :math:`v_i` is
.. math:: z[i] = \sum_k D[i, k] y[k]
= \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}}
\sqrt{\frac{W[j, i]}{2}} y[k]
- \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}}
\sqrt{\frac{W[i, j]}{2}} y[k]
for the combinatorial Laplacian, and
.. math:: z[i] = \sum_k D[i, k] y[k]
= \sum_{\{k,j | e_k=(v_j, v_i) \in \mathcal{E}\}}
\sqrt{\frac{W[j, i]}{2 d[i]}} y[k]
- \sum_{\{k,j | e_k=(v_i, v_j) \in \mathcal{E}\}}
\sqrt{\frac{W[i, j]}{2 d[i]}} y[k]
for the normalized Laplacian.
For undirected graphs, only half the edges are kept and the
:math:`1/\sqrt{2}` factor disappears from the above equations. See
:meth:`compute_differential_operator` for details.
Parameters
----------
s : ndarray
Signal of length G.Ne/2 living on the edges (non-directed graph).
y : ndarray
Signal of length :attr:`n_edges` living on the edges.
Returns
-------
s_div : ndarray
Divergence signal of length G.N living on the nodes.
z : ndarray
Divergence signal of length :attr:`n_vertices` living on the
vertices.
See also
--------
compute_differential_operator
grad : compute the gradient
grad : compute the gradient of a vertex signal
Examples
--------
>>> G = graphs.Logo()
>>> G.N, G.Ne
(1130, 3131)
>>> s = np.random.normal(size=G.Ne)
>>> s_div = G.div(s)
>>> s_grad = G.grad(s_div)
>>> graph = graphs.Path(4, directed=False, lap_type='combinatorial')
>>> graph.compute_differential_operator()
>>> graph.div(np.array([2, -2, 0]))
array([-2., 4., -2., 0.])
>>> graph = graphs.Path(4, directed=True, lap_type='combinatorial')
>>> graph.compute_differential_operator()
>>> graph.div(np.array([2, -2, 0]))
array([-1.41421356, 2.82842712, -1.41421356, 0. ])
>>> graph = graphs.Path(4, directed=False, lap_type='normalized')
>>> graph.compute_differential_operator()
>>> graph.div(np.array([2, -2, 0]))
array([-2. , 2.82842712, -1.41421356, 0. ])
>>> graph = graphs.Path(4, directed=True, lap_type='normalized')
>>> graph.compute_differential_operator()
>>> graph.div(np.array([2, -2, 0]))
array([-2. , 2.82842712, -1.41421356, 0. ])
"""
if self.Ne != s.shape[0]:
if self.Ne != y.shape[0]:
raise ValueError('Signal length should be the number of edges.')
return self.D.T.dot(s)
return self.D.dot(y)
8 changes: 5 additions & 3 deletions pygsp/graphs/fourier.py
Expand Up @@ -181,9 +181,11 @@ def compute_fourier_basis(self, n_eigenvectors=None, recompute=False):
assert -1e-12 < self._e[0] < 1e-12
self._e[0] = 0

if self.lap_type == 'normalized':
# Spectrum bounded by [0, 2].
assert self._e[-1] <= 2
# Bounded spectrum.
if self.lap_type == 'combinatorial':
assert self._e[-1] <= 2 * np.max(self.dw) + 1e-12
elif self.lap_type == 'normalized':
assert self._e[-1] <= 2 + 1e-12

assert np.max(self._e) == self._e[-1]
if n_eigenvectors == self.N:
Expand Down

0 comments on commit b2e509a

Please sign in to comment.