From 6afde1d97eef7124d0f6d33e38b6c6ba62dd0aaa Mon Sep 17 00:00:00 2001 From: Quinn Dawkins Date: Fri, 26 Jan 2024 18:35:37 -0500 Subject: [PATCH] Fixup formatting of a few comments after clang-format --- .../TorchOnnxToTorch/DefaultDomainGtoP.cpp | 4 ++-- lib/Conversion/TorchToLinalg/DataMovement.cpp | 24 ++++++++++--------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp b/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp index b648541bf639..df20a83515bf 100644 --- a/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp +++ b/lib/Conversion/TorchOnnxToTorch/DefaultDomainGtoP.cpp @@ -41,8 +41,8 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP( binder.tensorResultType(resultType)) return failure(); - // HardSigmoid computes the following expression: max(0, min(1, alpha * - // x + beta)) + // HardSigmoid computes the following expression: + // max(0, min(1, alpha * x + beta)) Value constAlpha = rewriter.create( binder.getLoc(), rewriter.getType(), rewriter.getF64FloatAttr(alpha)); diff --git a/lib/Conversion/TorchToLinalg/DataMovement.cpp b/lib/Conversion/TorchToLinalg/DataMovement.cpp index 51036dcb2fa2..add32ff05cd6 100644 --- a/lib/Conversion/TorchToLinalg/DataMovement.cpp +++ b/lib/Conversion/TorchToLinalg/DataMovement.cpp @@ -110,15 +110,18 @@ LogicalResult prepareArgumentsForSlicingOp(OpTy op, OpAdaptor adaptor, // Example: // input = tensor([[[0., 1., 2., 3.], // [4., 5., 6., 7.]]]) -// torch.ops.aten.reflection_pad1d(input, (3,1)) ; padding_left = 3, -// padding_right = 1 tensor([[[3., 2., 1., 0., 1., 2., 3., 2.], -// [7., 6., 5., 4., 5., 6., 7., 6.]]]) -// Checks: 1) Each of padding_left and padding_right must be non-negative less -// than size of last dimension Implementation: a) Construct a result tensor of +// torch.ops.aten.reflection_pad1d(input, (3,1)); +// padding_left = 3, +// padding_right = 1 +// output = tensor([[[3., 2., 1., 0., 1., 2., 3., 2.], +// [7., 6., 5., 4., 5., 6., 7., 6.]]]) +// Checks: 1) Each of padding_left and padding_right must be non-negative and +// less than the size of the last dimension. +// Implementation: a) Construct a result tensor of // shape of input tensor except for the last dimension. // The last dimension of the result tensor should be last // dimension of input tensor + left padding size + right -// padding size. INitialize result tensor to all zeros +// padding size. Initialize result tensor to all zeros // b) Setup affine map to take slice from input tensor of size // left padding starting from // second column onwards as first column is reflection @@ -190,9 +193,8 @@ class ConvertAtenReflectionPad1dOp tileWidth[PAD_CENTER] = lastDimSize; extractOffset[PAD_LEFT] = one; - // for (1,2,4) input, padding (3,1) lastDimSize=4, 4 - 1 - 1 = 2 [3,5, - // 6,7], so start offset to 6, which is right lasDimSize - - // (tileWidth[PAD_RIGHT] + one) + // The offset for the right hand padding "bar" is: + // [right] lastDimSize - (tileWidth[PAD_RIGHT] + one) extractOffset[PAD_RIGHT] = createISub(lastDimSize, createIAdd(tileWidth[PAD_RIGHT], one)); extractOffset[PAD_CENTER] = zero; @@ -202,8 +204,8 @@ class ConvertAtenReflectionPad1dOp insertOffset[PAD_CENTER] = tileWidth[PAD_LEFT]; SmallVector resultShape{inputShape}; - // Result's last dimension will have shape lastDimSize + left padding size + - // right padding size + // Result's last dimension will have size: + // lastDimSize + left padding size + right padding size resultShape[lastDim] = createIAdd(resultShape[lastDim], createIAdd(tileWidth[PAD_LEFT], tileWidth[PAD_RIGHT]));