diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 50b4cb9..992553c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.9", "3.10", "3.11"] + python-version: ["3.9"] # TODO: re-add 3.10, 3.11 fail-fast: false steps: diff --git a/pytorch_finufft/functional.py b/pytorch_finufft/functional.py index 69c763b..cce4635 100644 --- a/pytorch_finufft/functional.py +++ b/pytorch_finufft/functional.py @@ -200,114 +200,6 @@ def backward( return grad_points, grad_targets, None, None, None -class _finufft1D3(torch.autograd.Function): - """ - FINUFFT 1d Problem type 3 - """ - - @staticmethod - def forward( - ctx: Any, - points: torch.Tensor, - values: torch.Tensor, - targets: torch.Tensor, - out: Optional[torch.Tensor] = None, - finufftkwargs: Dict[str, Union[int, float]] = {}, - ) -> torch.Tensor: - """ - Evaluates the Type 3 NUFFT on the inputs. - - ``` - M-1 - f[k] = SUM c[j] exp(+/-i s[k] x[j]), - j=0 - - for k = 0, ..., N-1 - ``` - - Parameters - ---------- - ctx : Any - PyTorch context object - points : torch.Tensor - The non-uniform points x_j. - values : torch.Tensor - The source strengths c_j. - targets : torch.Tensor - The non-uniform target points s_k. - out : Optional[torch.Tensor] - Array to populate with result in-place, by default None - finufftkwargs : Dict[str, Union[int, float]] - Additional arguments will be passed into FINUFFT. See - https://finufft.readthedocs.io/en/latest/python.html. By default - an empty dictionary - - Returns - ------- - torch.Tensor - The resultant array f[k] - """ - - if out is not None: - print("In-place results are not yet implemented") - - err._type3_checks((points,), values, (targets,)) - - # NB: no mode ordering kwarg for type 3 - finufftkwargs = {k: v for k, v in finufftkwargs.items()} - _i_sign = finufftkwargs.pop("isign", -1) - - # NOTE: this is passed in as None in the test suite - if ctx is not None: - ctx.isign = _i_sign - ctx.finufftkwargs = finufftkwargs - - finufft_out = finufft.nufft1d3( - points.data.numpy(), - values.data.numpy(), - targets.data.numpy(), - isign=_i_sign, - **finufftkwargs, - ) - - return torch.from_numpy(finufft_out) - - @staticmethod - def backward( - ctx: Any, grad_output: torch.Tensor - ) -> Tuple[Union[torch.Tensor, Any], ...]: - """ - Implements gradients for backward mode automatic differentation - - Parameters - ---------- - ctx : Any - PyTorch context object - grad_output : torch.Tensor - Backpass gradient output - - Returns - ------- - Tuple[Union[torch.Tensor, Any], ...] - Tuple of derivatives with respect to each input - """ - _i_sign = ctx.isign - _mode_ordering = ctx.mode_ordering - _fftshift = ctx.fftshift - _finufftkwargs = ctx.finufftkwargs - - grad_points = grad_values = grad_targets = None - - if ctx.needs_input_grad[0]: - grad_points = None - if ctx.needs_input_grad[1]: - grad_values = None - if ctx.needs_input_grad[2]: - grad_targets = None - - return grad_points, grad_values, grad_targets, None, None, None - - ############################################################################### # 2d Functions ############################################################################### @@ -540,77 +432,6 @@ def backward( ) -class _finufft2D3(torch.autograd.Function): - """ - FINUFFT 2D problem type 3 - """ - - @staticmethod - def forward( - ctx: Any, - points_x: torch.Tensor, - points_y: torch.Tensor, - values: torch.Tensor, - targets_s: torch.Tensor, - targets_t: torch.Tensor, - ) -> torch.Tensor: - """ - Evaluates the Type 3 NUFFT on the inputs - - Parameters - ---------- - ctx : Any - _description_ - points_x : torch.Tensor - _description_ - points_y : torch.Tensor - _description_ - values : torch.Tensor - _description_ - targets_s : torch.Tensor - _description_ - targets_t : torch.Tensor - _description_ - - Returns - ------- - torch.Tensor - _description_ - - Raises - ------ - ValueError - _description_ - """ - - if True: - raise ValueError("2D3 is not implemented yet") - - return torch.ones(10) - - @staticmethod - def backward( - ctx: Any, grad_output: torch.Tensor - ) -> Tuple[Union[torch.Tensor, None], ...]: - """ - Implements gradients for backward mode automatic differentiation - - Parameters - ---------- - ctx : Any - TODO PyTorch context object - grad_output : torch.Tensor - TODO VJP output - - Returns - ------- - Tuple[Union[torch.Tensor, None], ...] - Tuple of derivatives with respect to each input - """ - - return None, None, None, None, None - - ############################################################################### # 3d Functions ############################################################################### @@ -866,106 +687,6 @@ def backward( ) -class _finufft3D3(torch.autograd.Function): - """ - FINUFFT 3D problem type 3 - """ - - @staticmethod - def forward( - ctx: Any, - points_x: torch.Tensor, - points_y: torch.Tensor, - points_z: torch.Tensor, - values: torch.Tensor, - targets_s: torch.Tensor, - targets_t: torch.Tensor, - targets_u: torch.Tensor, - out: Optional[torch.Tensor] = None, - **finufftkwargs: Union[int, float], - ) -> torch.Tensor: - """ - TODO Description here! - - Parameters - ---------- - ctx : Any - PyTorch context object - points_x : torch.Tensor - The nonuniform source points x_j - points_y : torch.Tensor - The nonuniform source points y_j - points_z : torch.Tensor - The nonuniform source points z_j - values : torch.Tensor - The source strengths c_j - targets_s : torch.Tensor - The target Fourier mode coefficients s_k - targets_t : torch.Tensor - The target Fourier mode coefficients t_k - targets_u : torch.Tensor - The target Fourier mode coefficients u_k - out : Optional[torch.Tensor], optional - Array to take the result in-place, by default None - **finufftkwargs : Union[int, float] - Additional arguments will be passed into FINUFFT. See - https://finufft.readthedocs.io/en/latest/python.html - - Returns - ------- - torch.Tensor - The resultant array f[k] - - Raises - ------ - ValueError - _description_ - """ - - if True: - raise ValueError("3D3 is not implemented yet") - - return torch.ones(10) - - @staticmethod - def backward( - ctx: Any, grad_output: torch.Tensor - ) -> Tuple[Union[torch.Tensor, None], ...]: - """ - Implements gradients for backward mode automatic differentiation - - Parameters - ---------- - ctx : Any - TODO PyTorch context object - grad_output : torch.Tensor - TODO VJP output - - Returns - ------- - Tuple[Union[torch.Tensor, None], ...] - Tuple of derivatives with respect to each input - """ - - grad_points_x = grad_points_y = grad_points_z = None - - grad_values = None - - grad_targets_s = grad_targets_t = grad_targets_u = None - - return ( - grad_points_x, - grad_points_y, - grad_points_z, - grad_values, - grad_targets_s, - grad_targets_t, - grad_targets_u, - None, - None, - ) - - ############################################################################### # Consolidated forward function for all 1D, 2D, and 3D problems for nufft type 1 ###############################################################################