Skip to content

Commit

Permalink
Remove unused t3 skeletons
Browse files Browse the repository at this point in the history
  • Loading branch information
WardBrian committed Oct 9, 2023
1 parent b431570 commit 026e48c
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 280 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.9", "3.10", "3.11"]
python-version: ["3.9"] # TODO: re-add 3.10, 3.11
fail-fast: false

steps:
Expand Down
279 changes: 0 additions & 279 deletions pytorch_finufft/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,114 +200,6 @@ def backward(
return grad_points, grad_targets, None, None, None


class _finufft1D3(torch.autograd.Function):
"""
FINUFFT 1d Problem type 3
"""

@staticmethod
def forward(
ctx: Any,
points: torch.Tensor,
values: torch.Tensor,
targets: torch.Tensor,
out: Optional[torch.Tensor] = None,
finufftkwargs: Dict[str, Union[int, float]] = {},
) -> torch.Tensor:
"""
Evaluates the Type 3 NUFFT on the inputs.
```
M-1
f[k] = SUM c[j] exp(+/-i s[k] x[j]),
j=0
for k = 0, ..., N-1
```
Parameters
----------
ctx : Any
PyTorch context object
points : torch.Tensor
The non-uniform points x_j.
values : torch.Tensor
The source strengths c_j.
targets : torch.Tensor
The non-uniform target points s_k.
out : Optional[torch.Tensor]
Array to populate with result in-place, by default None
finufftkwargs : Dict[str, Union[int, float]]
Additional arguments will be passed into FINUFFT. See
https://finufft.readthedocs.io/en/latest/python.html. By default
an empty dictionary
Returns
-------
torch.Tensor
The resultant array f[k]
"""

if out is not None:
print("In-place results are not yet implemented")

err._type3_checks((points,), values, (targets,))

# NB: no mode ordering kwarg for type 3
finufftkwargs = {k: v for k, v in finufftkwargs.items()}
_i_sign = finufftkwargs.pop("isign", -1)

# NOTE: this is passed in as None in the test suite
if ctx is not None:
ctx.isign = _i_sign
ctx.finufftkwargs = finufftkwargs

finufft_out = finufft.nufft1d3(
points.data.numpy(),
values.data.numpy(),
targets.data.numpy(),
isign=_i_sign,
**finufftkwargs,
)

return torch.from_numpy(finufft_out)

@staticmethod
def backward(
ctx: Any, grad_output: torch.Tensor
) -> Tuple[Union[torch.Tensor, Any], ...]:
"""
Implements gradients for backward mode automatic differentation
Parameters
----------
ctx : Any
PyTorch context object
grad_output : torch.Tensor
Backpass gradient output
Returns
-------
Tuple[Union[torch.Tensor, Any], ...]
Tuple of derivatives with respect to each input
"""
_i_sign = ctx.isign
_mode_ordering = ctx.mode_ordering
_fftshift = ctx.fftshift
_finufftkwargs = ctx.finufftkwargs

grad_points = grad_values = grad_targets = None

if ctx.needs_input_grad[0]:
grad_points = None
if ctx.needs_input_grad[1]:
grad_values = None
if ctx.needs_input_grad[2]:
grad_targets = None

return grad_points, grad_values, grad_targets, None, None, None


###############################################################################
# 2d Functions
###############################################################################
Expand Down Expand Up @@ -540,77 +432,6 @@ def backward(
)


class _finufft2D3(torch.autograd.Function):
"""
FINUFFT 2D problem type 3
"""

@staticmethod
def forward(
ctx: Any,
points_x: torch.Tensor,
points_y: torch.Tensor,
values: torch.Tensor,
targets_s: torch.Tensor,
targets_t: torch.Tensor,
) -> torch.Tensor:
"""
Evaluates the Type 3 NUFFT on the inputs
Parameters
----------
ctx : Any
_description_
points_x : torch.Tensor
_description_
points_y : torch.Tensor
_description_
values : torch.Tensor
_description_
targets_s : torch.Tensor
_description_
targets_t : torch.Tensor
_description_
Returns
-------
torch.Tensor
_description_
Raises
------
ValueError
_description_
"""

if True:
raise ValueError("2D3 is not implemented yet")

return torch.ones(10)

@staticmethod
def backward(
ctx: Any, grad_output: torch.Tensor
) -> Tuple[Union[torch.Tensor, None], ...]:
"""
Implements gradients for backward mode automatic differentiation
Parameters
----------
ctx : Any
TODO PyTorch context object
grad_output : torch.Tensor
TODO VJP output
Returns
-------
Tuple[Union[torch.Tensor, None], ...]
Tuple of derivatives with respect to each input
"""

return None, None, None, None, None


###############################################################################
# 3d Functions
###############################################################################
Expand Down Expand Up @@ -866,106 +687,6 @@ def backward(
)


class _finufft3D3(torch.autograd.Function):
"""
FINUFFT 3D problem type 3
"""

@staticmethod
def forward(
ctx: Any,
points_x: torch.Tensor,
points_y: torch.Tensor,
points_z: torch.Tensor,
values: torch.Tensor,
targets_s: torch.Tensor,
targets_t: torch.Tensor,
targets_u: torch.Tensor,
out: Optional[torch.Tensor] = None,
**finufftkwargs: Union[int, float],
) -> torch.Tensor:
"""
TODO Description here!
Parameters
----------
ctx : Any
PyTorch context object
points_x : torch.Tensor
The nonuniform source points x_j
points_y : torch.Tensor
The nonuniform source points y_j
points_z : torch.Tensor
The nonuniform source points z_j
values : torch.Tensor
The source strengths c_j
targets_s : torch.Tensor
The target Fourier mode coefficients s_k
targets_t : torch.Tensor
The target Fourier mode coefficients t_k
targets_u : torch.Tensor
The target Fourier mode coefficients u_k
out : Optional[torch.Tensor], optional
Array to take the result in-place, by default None
**finufftkwargs : Union[int, float]
Additional arguments will be passed into FINUFFT. See
https://finufft.readthedocs.io/en/latest/python.html
Returns
-------
torch.Tensor
The resultant array f[k]
Raises
------
ValueError
_description_
"""

if True:
raise ValueError("3D3 is not implemented yet")

return torch.ones(10)

@staticmethod
def backward(
ctx: Any, grad_output: torch.Tensor
) -> Tuple[Union[torch.Tensor, None], ...]:
"""
Implements gradients for backward mode automatic differentiation
Parameters
----------
ctx : Any
TODO PyTorch context object
grad_output : torch.Tensor
TODO VJP output
Returns
-------
Tuple[Union[torch.Tensor, None], ...]
Tuple of derivatives with respect to each input
"""

grad_points_x = grad_points_y = grad_points_z = None

grad_values = None

grad_targets_s = grad_targets_t = grad_targets_u = None

return (
grad_points_x,
grad_points_y,
grad_points_z,
grad_values,
grad_targets_s,
grad_targets_t,
grad_targets_u,
None,
None,
)


###############################################################################
# Consolidated forward function for all 1D, 2D, and 3D problems for nufft type 1
###############################################################################
Expand Down

0 comments on commit 026e48c

Please sign in to comment.