Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix: Fixing CI #22

Merged
merged 6 commits into from
Nov 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/run-tox.yml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ jobs:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
sudo apt-get install -y poppler-utils imagemagick pandoc
sudo apt-get update
sudo apt-get install -y poppler-utils imagemagick pandoc --fix-missing
python -m pip install --upgrade pip setuptools
pip install tox-gh-actions pandoc
- name: Run tox
Expand Down
2 changes: 1 addition & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ max-locals=15
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branches=20
max-branches=30
# Maximum number of statements in function / method body
max-statements=65
# Maximum number of parents for a class (see R0901).
Expand Down
49 changes: 27 additions & 22 deletions MARBLE/geometry.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,24 @@
import torch
import torch_geometric.utils as PyGu
import umap
from sklearn.cluster import KMeans, MeanShift
from sklearn.cluster import KMeans
from sklearn.cluster import MeanShift
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE, Isomap, MDS
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.manifold import Isomap
from sklearn.metrics import pairwise_distances
from sklearn.preprocessing import StandardScaler
from torch_geometric.nn import knn_graph, radius_graph
from torch_geometric.nn import knn_graph
from torch_geometric.nn import radius_graph
from torch_scatter import scatter_add

from ptu_dijkstra import connections, tangent_frames # isort:skip

from MARBLE.lib.cknn import cknneighbors_graph # isort:skip
from MARBLE import utils # isort:skip


def furthest_point_sampling(x, N=None, stop_crit=0.0, start_idx=0):
"""A greedy O(N^2) algorithm to do furthest points sampling

Expand All @@ -42,7 +47,7 @@ def furthest_point_sampling(x, N=None, stop_crit=0.0, start_idx=0):
perm[0] = start_idx
lambdas = torch.zeros(n)
ds = D[start_idx, :].flatten()
for i in range(1,n):
for i in range(1, n):
idx = torch.argmax(ds)
perm[i] = idx
lambdas[i] = ds[idx]
Expand All @@ -54,7 +59,7 @@ def furthest_point_sampling(x, N=None, stop_crit=0.0, start_idx=0):
lambdas = lambdas[:i]
break

assert len(perm)==len(np.unique(perm)), 'Returned duplicated points'
assert len(perm) == len(np.unique(perm)), "Returned duplicated points"
return perm, lambdas


Expand Down Expand Up @@ -131,10 +136,10 @@ def embed(x, embed_typ="umap", dim_emb=2, manifold=None, seed=0, **kwargs):
manifold = PCA(n_components=dim_emb).fit(x)

emb = manifold.transform(x)

elif embed_typ == "Isomap":
radius = pairwise_distances(x)
radius = 0.1*(radius.max()-radius.min())
radius = 0.1 * (radius.max() - radius.min())
if manifold is None:
manifold = Isomap(n_components=dim_emb, n_neighbors=None, radius=radius).fit(x)

Expand Down Expand Up @@ -296,7 +301,7 @@ def gradient_op(pos, edge_index, gauges):
_F -= sp.diags(np.array(_F.sum(1)).flatten())
_F = _F.tocoo()
K.append(torch.sparse_coo_tensor(np.vstack([_F.row, _F.col]), _F.data.data))

return K


Expand Down Expand Up @@ -450,17 +455,17 @@ def is_connected(edge_index):
def compute_laplacian(data, normalization="rw"):
"""Compute Laplacian."""
edge_index, edge_attr = PyGu.get_laplacian(
data.edge_index,
edge_weight=data.edge_weight,
normalization=normalization,
num_nodes=data.num_nodes
data.edge_index,
edge_weight=data.edge_weight,
normalization=normalization,
num_nodes=data.num_nodes,
)

return PyGu.to_dense_adj(edge_index, edge_attr=edge_attr).squeeze()


def compute_connection_laplacian(data, R, normalization="rw"):
"""Connection Laplacian
r"""Connection Laplacian

Args:
data: Pytorch geometric data object.
Expand Down Expand Up @@ -515,7 +520,7 @@ def compute_connection_laplacian(data, R, normalization="rw"):


def compute_gauges(data, dim_man=None, n_geodesic_nb=10, n_workers=1):
"""Orthonormal gauges for the tangent space at each node, and connection
r"""Orthonormal gauges for the tangent space at each node, and connection
matrices between each pair of adjacent nodes.

R is a block matrix, where the row index is the gauge we want to align to,
Expand Down Expand Up @@ -568,7 +573,7 @@ def _compute_gauges(inputs, i):


def compute_connections(data, gauges, n_workers=1):
"""Find smallest rotations R between gauges pairs. It is assumed that the first
r"""Find smallest rotations R between gauges pairs. It is assumed that the first
row of edge_index is what we want to align to, i.e.,
gauges(i) = gauges(j)@R[i,j].T

Expand Down Expand Up @@ -625,17 +630,17 @@ def scalar_diffusion(x, t, method="matrix_exp", par=None):
), "For spectral method, par must be a tuple of \
eigenvalues, eigenvectors!"
evals, evecs = par

# Transform to spectral
x_spec = torch.mm(evecs.T, x)

# Diffuse
diffusion_coefs = torch.exp(-evals.unsqueeze(-1) * t.unsqueeze(0))
x_diffuse_spec = diffusion_coefs * x_spec

# Transform back to per-vertex
return evecs.mm(x_diffuse_spec)

raise NotImplementedError


Expand Down Expand Up @@ -683,7 +688,7 @@ def compute_eigendecomposition(A, k=None, eps=1e-8):
"""
if A is None:
return None

if k is not None and k >= A.shape[0]:
k = None

Expand All @@ -692,11 +697,11 @@ def compute_eigendecomposition(A, k=None, eps=1e-8):
while True:
try:
if k is None:
evals, evecs = torch.linalg.eigh(A)
evals, evecs = torch.linalg.eigh(A) # pylint: disable=not-callable
else:
evals, evecs = sp.linalg.eigsh(A, k=k, which="SM")
evals, evecs = torch.tensor(evals), torch.tensor(evecs)

evals = torch.clamp(evals, min=0.0)
evecs *= np.sqrt(len(evecs))

Expand All @@ -709,4 +714,4 @@ def compute_eigendecomposition(A, k=None, eps=1e-8):
print("--- decomp failed; adding eps ===> count: " + str(failcount))
A += torch.eye(A.shape[0]) * (eps * 10 ** (failcount - 1))

return evals, evecs
return evals, evecs
2 changes: 1 addition & 1 deletion MARBLE/layers.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
"""Layer module."""
import torch
from torch import nn
from torch.nn.functional import normalize, relu
from torch_geometric.nn.conv import MessagePassing

from MARBLE import geometry as g


class Diffusion(nn.Module):
"""Diffusion with learned t."""

Expand Down
Loading
Loading