Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
oesteban committed Nov 15, 2022
1 parent 3ad47d7 commit 687ae2d
Show file tree
Hide file tree
Showing 2 changed files with 119 additions and 13 deletions.
47 changes: 47 additions & 0 deletions sdcflows/lib/bspline.pyx
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2022 The NiPreps Developers <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
import numpy as np
cimport numpy as np
cimport cython
from libc.math cimport fabs


DTYPE = np.float32
ctypedef np.float32_t DTYPE_t

@cython.boundscheck(False) # turn off bounds-checking for entire function
@cython.wraparound(False) # turn off negative index wrapping for entire function
def bs_eval(DTYPE_t r):
cdef DTYPE_t retval = 0.0
cdef DTYPE_t d = fabs(r)

if d >= 2.0:
return retval

if d < 1:
retval = (4.0 - 6.0 * d * d + 3.0 * d * d * d) / 6.0
return retval

retval = (2.0 - d)
retval = retval * retval * retval / 6.0
return retval
85 changes: 72 additions & 13 deletions sdcflows/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,17 @@
import attr
import numpy as np
from scipy import ndimage as ndi
from scipy.sparse import vstack as sparse_vstack, csr_matrix, kron
from scipy.sparse import csr_matrix, kron

import nibabel as nb
import nitransforms as nt
from nitransforms.base import _as_homogeneous
from bids.utils import listify

from niworkflows.interfaces.nibabel import reorient_image
from sdcflows.lib.bspline import bs_eval as _bseval

bseval = np.frompyfunc(_bseval, 1, 1)


def _clear_mapped(instance, attribute, value):
Expand Down Expand Up @@ -413,19 +416,11 @@ def grid_bspline_weights(target_nii, ctrl_nii, dtype="float16"):
ijk_knots = np.arange(0, knots_shape[axis], dtype=dtype)

# Distances of every sample w.r.t. every knot
distance = np.abs(ijk_samples[np.newaxis, ...] - ijk_knots[..., np.newaxis])

# Optimization: calculate B-Splines only for samples within the kernel spread
within_support = distance < 2.0
d_vals, d_idxs = np.unique(distance[within_support], return_inverse=True)
distance = ijk_samples[np.newaxis, ...] - ijk_knots[..., np.newaxis]

# Calculate univariate B-Spline weight for samples within kernel spread
bs_w = _cubic_bspline(d_vals)

# Densify and build csr matrix (probably, we could avoid densifying)
weights = np.zeros_like(distance, dtype="float32")
weights[within_support] = bs_w[d_idxs]
wd.append(csr_matrix(weights))
# Build CSR (compressed sparse row) matrix
weights = csr_matrix(bseval(distance), dtype="float32")
wd.append(weights)

# Efficiently calculate tensor product of the weights
return kron(kron(wd[0], wd[1]), wd[2])
Expand Down Expand Up @@ -456,3 +451,67 @@ def _move_coeff(in_coeff, fmap_ref, transform, fmap_target=None):
))
hdr["cal_min"] = - hdr["cal_max"]
return coeff.__class__(coeff.dataobj, newaff, hdr)


def grid_bspline_weights_3d(target_nii, ctrl_nii):
"""Calculate ijk distances of the samples in one axis of the target w.r.t. the knots."""
import pdb; pdb.set_trace()

target_to_grid = np.linalg.inv(ctrl_nii.affine) @ target_nii.affine
# return bsplines_1d(target_nii.shape, ctrl_nii.shape, target_to_grid)

sample_ijk = _indexes(target_nii.shape[:3])
grid_ijk = (target_to_grid @ sample_ijk)[:3, ...]
knots_ijk = _indexes(ctrl_nii.shape[:3])[:3, ...]

blocks_samples = np.array_split(grid_ijk, 1000, axis=1)
blocks_knots = np.array_split(knots_ijk, 2, axis=1)

blocks = 0
for samples in blocks_samples:
for knots in blocks_knots:
print(f"samples = {samples.shape}, knots = {knots.shape}")
blocks += 1

deltas = np.full((knots.shape[-1], samples.shape[-1], 3), 2.0, dtype="float16")
within_support = np.ones((knots.shape[-1], samples.shape[-1]), dtype="bool")
for axis in range(3):
axis_delta = np.abs(np.subtract(
samples[np.newaxis, axis, :],
knots.T[:, axis, np.newaxis],
dtype="float16",
where=within_support,
))
within_support[axis_delta >= 2.0] = False
total = within_support.sum()
if not total:
continue

deltas[within_support, axis] = axis_delta[within_support]

if not total:
continue

d_vals, d_idxs = np.unique(
deltas[within_support],
return_inverse=True,
)
# Calculate univariate B-Spline weight for samples within kernel spread
bs_w = bseval(d_vals)

# Densify and build csr matrix (probably, we could avoid densifying)
weights = np.zeros_like(within_support, dtype="float32")
weights[within_support] = np.prod(
bs_w[d_idxs].reshape(deltas[within_support].shape),
axis=1,
)

return None


def _indexes(shape, dtype="float16"):
indexes = tuple([np.arange(s) for s in shape])
return np.vstack((
np.array(np.meshgrid(*indexes, indexing="ij")).reshape(len(shape), -1),
np.ones((np.prod(shape), )),
)).astype(dtype)

0 comments on commit 687ae2d

Please sign in to comment.