Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Mesh Laplacian computation #4187

Merged
merged 17 commits into from
Mar 11, 2022
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
25 changes: 25 additions & 0 deletions .pytest_cache/v/cache/lastfailed
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
{
daniel-unyi-42 marked this conversation as resolved.
Show resolved Hide resolved
"test/loader/test_cluster.py::test_cluster_gcn": true,
"test/nn/conv/test_appnp.py::test_appnp": true,
"test/nn/conv/test_arma_conv.py::test_arma_conv": true,
"test/nn/conv/test_cluster_gcn_conv.py::test_cluster_gcn_conv": true,
"test/nn/conv/test_eg_conv.py::test_eg_conv": true,
"test/nn/conv/test_eg_conv.py::test_eg_conv_multiple_aggregators": true,
"test/nn/conv/test_gated_graph_conv.py::test_gated_graph_conv": true,
"test/nn/conv/test_gcn2_conv.py::test_gcn2_conv": true,
"test/nn/conv/test_gcn_conv.py::test_gcn_conv": true,
"test/nn/conv/test_gcn_conv.py::test_gcn_conv_with_decomposed_layers": true,
"test/nn/conv/test_gin_conv.py::test_gin_conv": true,
"test/nn/conv/test_graph_conv.py::test_graph_conv": true,
"test/nn/conv/test_lg_conv.py::test_lg_conv": true,
"test/nn/conv/test_mf_conv.py::test_mf_conv": true,
"test/nn/conv/test_pdn_conv.py::test_pdn_conv": true,
"test/nn/conv/test_rgcn_conv.py::test_rgcn_conv[RGCNConv-conf0]": true,
"test/nn/conv/test_rgcn_conv.py::test_rgcn_conv[RGCNConv-conf1]": true,
"test/nn/conv/test_rgcn_conv.py::test_rgcn_conv[RGCNConv-conf2]": true,
"test/nn/conv/test_sage_conv.py::test_sage_conv": true,
"test/nn/conv/test_sg_conv.py::test_sg_conv": true,
"test/nn/conv/test_signed_conv.py::test_signed_conv": true,
"test/nn/conv/test_tag_conv.py::test_tag_conv": true,
"test/nn/test_sequential.py::test_sequential_jittable": true
}
3 changes: 3 additions & 0 deletions .pytest_cache/v/cache/nodeids
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[
"test/utils/test_get_mesh_laplacian.py::test_get_mesh_laplacian"
]
59 changes: 59 additions & 0 deletions test/utils/test_get_mesh_laplacian.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
import torch

from torch_geometric.utils import get_mesh_laplacian


def test_get_mesh_laplacian():

# cube
pos = torch.Tensor([[1.0, 1.0, 1.0], [1.0, -1.0, 1.0], \
[-1.0, -1.0, 1.0], [-1.0, 1.0, 1.0], \
[1.0, 1.0, -1.0], [1.0, -1.0, -1.0], \
[-1.0, -1.0, -1.0], [-1.0, 1.0, -1.0]])
face = torch.Tensor([[0, 1, 2], [0, 3, 2], [4, 5, 1], \
[4, 0, 1], [7, 6, 5], [7, 4, 5], \
[3, 2, 6], [3, 7, 6], [4, 0, 3], \
[4, 7, 3], [1, 5, 6], [1, 2, 6]]).long().t()

lap = get_mesh_laplacian(pos, face)
assert torch.all(lap[0] == torch.Tensor([[0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, \
2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, \
5, 5, 5, 6, 6, 6, 6, 6, 7, 7, 7, 7, \
0, 1, 2, 3, 4, 5, 6, 7],
[1, 2, 3, 4, 0, 2, 4, 5, 6, 0, 1, 3, \
6, 0, 2, 4, 6, 7, 0, 1, 3, 5, 7, 1, \
4, 6, 7, 1, 2, 3, 5, 7, 3, 4, 5, 6, \
0, 1, 2, 3, 4, 5, 6, 7]]))
assert torch.allclose(lap[1], torch.Tensor([-1.0, -0.0, -1.0, -1.0, -1.0, -1.0, \
-0.0, -1.0, -0.0, -0.0, -1.0, -1.0, \
-1.0, -1.0, -1.0, -0.0, -0.0, -1.0, \
-1.0, -0.0, -0.0, -1.0, -1.0, -1.0, \
-1.0, -1.0, -0.0, -0.0, -1.0, -0.0, \
-1.0, -1.0, -1.0, -1.0, -0.0, -1.0, \
1.125, 0.9, 1.125, 0.9, \
0.9, 1.125, 0.9, 1.125]))

# irregular triangular prism
pos = torch.Tensor([[0.0, 0.0, 0.0], [4.0, 0.0, 0.0], \
[0.0, 0.0, -3.0], [1.0, 5.0, -1.0], \
[3.0, 5.0, -1.0], [2.0, 5.0, -2.0]])
face = torch.Tensor([[0, 1, 2], [3, 4, 5], [0, 1, 4], [0, 3, 4], \
[1, 2, 5], [1, 4, 5], [2, 0, 3], [2, 5, 3]]).long().t()

lap = get_mesh_laplacian(pos, face)
assert torch.all(lap[0] == torch.Tensor([[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, \
3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, \
0, 1, 2, 3, 4, 5],
[1, 2, 3, 4, 0, 2, 4, 5, 0, 1, 3, 5, \
0, 2, 4, 5, 0, 1, 3, 5, 1, 2, 3, 4, \
0, 1, 2, 3, 4, 5]]))
assert torch.allclose(lap[1], torch.Tensor([-0.938834, -1.451131, -0.490290, \
-0.000000, -0.938834, -0.378790, \
-0.577017, -0.077878, -1.451131, \
-0.378790, -0.163153, -0.344203, \
-0.490290, -0.163153, -1.421842, \
-2.387739, -0.000000, -0.577017, \
-1.421842, -2.550610, -0.077878, \
-0.344203, -2.387739, -2.550610, \
0.298518, 0.183356, 0.233502, \
0.761257, 0.688181, 0.768849]))
2 changes: 2 additions & 0 deletions torch_geometric/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from .subgraph import get_num_hops, subgraph, k_hop_subgraph
from .homophily import homophily
from .get_laplacian import get_laplacian
from .get_mesh_laplacian import get_mesh_laplacian
from .to_dense_batch import to_dense_batch
from .to_dense_adj import to_dense_adj
from .sparse import dense_to_sparse
Expand Down Expand Up @@ -52,6 +53,7 @@
'k_hop_subgraph',
'homophily',
'get_laplacian',
'get_mesh_laplacian',
'to_dense_batch',
'to_dense_adj',
'dense_to_sparse',
Expand Down
73 changes: 73 additions & 0 deletions torch_geometric/utils/get_mesh_laplacian.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
import torch
from torch_scatter import scatter_add

from torch_geometric.utils import add_self_loops


def get_mesh_laplacian(pos, face):
daniel-unyi-42 marked this conversation as resolved.
Show resolved Hide resolved
""" Computes the mesh Laplacian of the mesh given by
daniel-unyi-42 marked this conversation as resolved.
Show resolved Hide resolved
:obj:`pos` and :obj:`face`. It is computed as
:math:` \mathbf{L}_{ij} = \begin{cases}
daniel-unyi-42 marked this conversation as resolved.
Show resolved Hide resolved
\frac{\cot \angle_{ikj} + \cot \angle_{ilj}}{2 a_{ij}} &
\mbox{if } i, j \mbox{ is an edge,} \\
\sum_{j \in N(i)}{L_{ij}} &
\mbox{if } i \mbox{ is in the diagonal,} \\
0 \mbox{ otherwise.}
\end{cases}`
where :math:`a_{ij}` is the local area element,
i.e. one-third of the neighbouring triangle's area.

Args:
pos (Tensor): The node positions.
face (LongTensor): The face indices.
"""

assert pos.shape[1] == 3
assert face.shape[0] == 3

device = pos.device
dtype = pos.dtype
num_nodes = pos.shape[0]
cot_weight = torch.Tensor().to(dtype).to(device)
area_weight = torch.Tensor().to(dtype).to(device)
edge_index = torch.Tensor().long().to(device)

def add_edge(left, centre, right):
left_pos, central_pos, right_pos = pos[left], pos[centre], pos[right]
left_vec = left_pos - central_pos
right_vec = right_pos - central_pos
dot = torch.einsum('ij, ij -> i', left_vec, right_vec)
cross = torch.norm(torch.cross(left_vec, right_vec, dim=1), dim=1)
cot = dot / cross # cos / sin
nonlocal cot_weight, area_weight, edge_index
cot_weight = torch.cat([cot_weight, cot / 2.0, cot / 2.0])
# one-third of a triangle's area is cross / 6.0
# since each edge is accounted twice, we compute cross / 12.0 instead
area_weight = torch.cat([area_weight, cross / 12.0, cross / 12.0])
edge_index = torch.cat([
daniel-unyi-42 marked this conversation as resolved.
Show resolved Hide resolved
edge_index,
torch.stack([left, right], dim=1),
torch.stack([right, left], dim=1)
])

# add all 3 edges of the triangles
add_edge(face[2], face[0], face[1])
add_edge(face[0], face[1], face[2])
add_edge(face[1], face[2], face[0])

# eliminate duplicate matrix entries by adding them together
index_linearizer = torch.Tensor([num_nodes, 1]).to(device)
daniel-unyi-42 marked this conversation as resolved.
Show resolved Hide resolved
lin_index = torch.matmul(edge_index.float(), index_linearizer).long()
y, idx = lin_index.unique(return_inverse=True)
edge_index = torch.stack([y // num_nodes, y % num_nodes])
cot_weight = scatter_add(cot_weight, idx, dim=0)
area_weight = scatter_add(area_weight, idx, dim=0)
daniel-unyi-42 marked this conversation as resolved.
Show resolved Hide resolved

# compute the diagonal part
row, col = edge_index
cot_deg = scatter_add(cot_weight, row, dim=0, dim_size=num_nodes)
area_deg = scatter_add(area_weight, row, dim=0, dim_size=num_nodes)
deg = cot_deg / area_deg
edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)
edge_weight = torch.cat([-cot_weight, deg], dim=0)
return edge_index, edge_weight