From 58abfd1409823351057dff9a068163799da2072c Mon Sep 17 00:00:00 2001 From: Angel Ezquerra Date: Thu, 22 Aug 2024 11:15:03 +0200 Subject: [PATCH] Add support for running some key `bitops` functions on integer Tensors (#661) While we already supported most of nim's std/math features in Arraymancer, we did not support any of the std/bitops operators and procedures yet. These are very useful to implement some important algorithms such as gray coding and others. This commit adds some of the most important std/bitops features. These will soon be used in `impulse` to implement some new algorithms. --- src/arraymancer/tensor.nim | 2 + src/arraymancer/tensor/bitops_functions.nim | 131 ++++++++++++++++++++ tests/tensor/test_bitops_functions.nim | 69 +++++++++++ 3 files changed, 202 insertions(+) create mode 100644 src/arraymancer/tensor/bitops_functions.nim create mode 100644 tests/tensor/test_bitops_functions.nim diff --git a/src/arraymancer/tensor.nim b/src/arraymancer/tensor.nim index e4188306..25aeb023 100644 --- a/src/arraymancer/tensor.nim +++ b/src/arraymancer/tensor.nim @@ -39,6 +39,7 @@ import ./laser/dynamic_stack_arrays, ./tensor/math_functions, ./tensor/aggregate, ./tensor/algorithms, + ./tensor/bitops_functions, ./tensor/lapack, ./tensor/optim_ops_fusion, ./tensor/syntactic_sugar, @@ -67,6 +68,7 @@ export dynamic_stack_arrays, math_functions, aggregate, algorithms, + bitops_functions, lapack, optim_ops_fusion, syntactic_sugar, diff --git a/src/arraymancer/tensor/bitops_functions.nim b/src/arraymancer/tensor/bitops_functions.nim new file mode 100644 index 00000000..7145175b --- /dev/null +++ b/src/arraymancer/tensor/bitops_functions.nim @@ -0,0 +1,131 @@ +# Copyright 2017 the Arraymancer contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ./data_structure, + ./higher_order_applymap, + ./shapeshifting, + ./ufunc +import std / bitops + +export bitops + +proc `shr`*[T1, T2: SomeInteger](t: Tensor[T1], value: T2): Tensor[T1] {.noinit.} = + ## Broadcasted tensor-value `shr` (i.e. shift right) operator + ## + ## This is similar to numpy's `right_shift` and Matlab's `bitsra` + ## (or `bitshift` with a positive shift value). + t.map_inline(x shr value) + +proc `shr`*[T1, T2: SomeInteger](value: T1, t: Tensor[T2]): Tensor[T2] {.noinit.} = + ## Broadcasted value-tensor `shr` (i.e. shift right) operator + ## + ## This is similar to numpy's `right_shift` and Matlab's `bitsra` + ## (or `bitshift` with a positive shift value). + t.map_inline(value shr x) + +proc `shr`*[T: SomeInteger](t1, t2: Tensor[T]): Tensor[T] {.noinit.} = + ## Tensor element-wise `shr` (i.e. shift right) broadcasted operator + ## + ## This is similar to numpy's `right_shift` and Matlab's `bitsra` + ## (or `bitshift` with a positive shift value). + let (tmp1, tmp2) = broadcast2(t1, t2) + result = map2_inline(tmp1, tmp2, x shr y) + +proc `shl`*[T1, T2: SomeInteger](t: Tensor[T1], value: T2): Tensor[T1] {.noinit.} = + ## Broadcasted tensor-value `shl` (i.e. shift left) operator + ## + ## This is similar to numpy's `left_shift` and Matlab's `bitsla` + ## (or `bitshift` with a negative shift value). + t.map_inline(x shl value) + +proc `shl`*[T1, T2: SomeInteger](value: T1, t: Tensor[T2]): Tensor[T2] {.noinit.} = + ## Broadcasted value-tensor `shl` (i.e. shift left) operator + ## + ## This is similar to numpy's `left_shift` and Matlab's `bitsla` + ## (or `bitshift` with a negative shift value). + t.map_inline(value shl x) + +proc `shl`*[T: SomeInteger](t1, t2: Tensor[T]): Tensor[T] {.noinit.} = + ## Tensor element-wise `shl` (i.e. shift left) broadcasted operator + ## + ## This is similar to numpy's `left_shift` and Matlab's `bitsla` + ## (or `bitshift` with a negative shift value). + let (tmp1, tmp2) = broadcast2(t1, t2) + result = map2_inline(tmp1, tmp2, x shl y) + +makeUniversal(bitnot, + docSuffix="""Element-wise `bitnot` procedure + +This is similar to numpy's `bitwise_not` and Matlab's `bitnot`.""") + +proc bitand*[T](t: Tensor[T], value: T): Tensor[T] {.noinit.} = + ## Broadcasted tensor-value `bitand` procedure + ## + ## This is similar to numpy's `bitwise_and` and Matlab's `bitand`. + t.map_inline(bitand(x, value)) + +proc bitand*[T](value: T, t: Tensor[T]): Tensor[T] {.noinit.} = + ## Broadcasted value-tensor `bitand` procedure + ## + ## This is similar to numpy's `bitwise_and` and Matlab's `bitand`. + t.map_inline(bitand(value, x)) + +proc bitand*[T](t1, t2: Tensor[T]): Tensor[T] {.noinit.} = + ## Tensor element-wise `bitand` procedure + ## + ## This is similar to numpy's `bitwise_and` and Matlab's `bitand`. + let (tmp1, tmp2) = broadcast2(t1, t2) + result = map2_inline(tmp1, tmp2, bitand(x, y)) + + +proc bitor*[T](t: Tensor[T], value: T): Tensor[T] {.noinit.} = + ## Broadcasted tensor-value `bitor` procedure + ## + ## This is similar to numpy's `bitwise_or` and Matlab's `bitor`. + t.map_inline(bitor(x, value)) + +proc bitor*[T](value: T, t: Tensor[T]): Tensor[T] {.noinit.} = + ## Broadcasted value-tensor `bitor` procedure + ## + ## This is similar to numpy's `bitwise_or` and Matlab's `bitor`. + t.map_inline(bitor(value, x)) + +proc bitor*[T](t1, t2: Tensor[T]): Tensor[T] {.noinit.} = + ## Tensor element-wise `bitor` procedure + ## + ## This is similar to numpy's `bitwise_or` and Matlab's `bitor`. + let (tmp1, tmp2) = broadcast2(t1, t2) + map2_inline(tmp1, tmp2, bitor(x, y)) + +proc bitxor*[T](t: Tensor[T], value: T): Tensor[T] {.noinit.} = + ## Broadcasted tensor-value `bitxor` procedure + ## + ## This is similar to numpy's `bitwise_xor` and Matlab's `bitxor`. + t.map_inline(bitxor(x, value)) + +proc bitxor*[T](value: T, t: Tensor[T]): Tensor[T] {.noinit.} = + ## Broadcasted value-tensor `bitxor` procedure + ## + ## This is similar to numpy's `bitwise_xor` and Matlab's `bitxor`. + t.map_inline(bitxor(value, x)) + +proc bitxor*[T](t1, t2: Tensor[T]): Tensor[T] {.noinit.} = + ## Tensor element-wise `bitxor` procedure + ## + ## This is similar to numpy's `bitwise_xor` and Matlab's `bitxor`. + let (tmp1, tmp2) = broadcast2(t1, t2) + map2_inline(tmp1, tmp2, bitxor(x, y)) + +makeUniversal(reverseBits, + docSuffix="Element-wise `reverseBits` procedure") diff --git a/tests/tensor/test_bitops_functions.nim b/tests/tensor/test_bitops_functions.nim new file mode 100644 index 00000000..6bccedcf --- /dev/null +++ b/tests/tensor/test_bitops_functions.nim @@ -0,0 +1,69 @@ +# Copyright 2017 the Arraymancer contributors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ../../src/arraymancer +import std / unittest + +proc main() = + suite "Bitops functions": + test "bitnot": + let t = [0, 1, 57, 1022, -100].toTensor + let expected = [-1, -2, -58, -1023, 99].toTensor + check: t.bitnot == expected + + test "shr": + let t1 = [0, 1, 57, 1022, -100].toTensor + let t2 = [0, 1, 2, 3, 4].toTensor + check: t1 shr 3 == [0, 0, 7, 127, -13].toTensor + check: 1024 shr t2 == [1024, 512, 256, 128, 64].toTensor + check: t1 shr t2 == [0, 0, 14, 127, -7].toTensor + + test "shl": + let t1 = [0, 1, 57, 1022, -100].toTensor + let t2 = [0, 1, 2, 3, 4].toTensor + check: t1 shl 3 == [0, 8, 456, 8176, -800].toTensor + check: 3 shl t2 == [3, 6, 12, 24, 48].toTensor + check: t1 shl t2 == [0, 2, 228, 8176, -1600].toTensor + + test "bitand": + let t1 = [0, 1, 57, 1022, -100].toTensor + let t2 = [0, 2, 7, 15, 11].toTensor + check: bitand(t1, 0b010_110_101) == [0, 1, 49, 180, 148].toTensor + check: bitand(t1, 0b010_110_101) == bitand(0b010_110_101, t1) + check: bitand(t1, t2) == [0, 0, 1, 14, 8].toTensor + check: bitand(t1, t2) == bitand(t1, t2) + + test "bitor": + let t1 = [0, 1, 57, 1022, -100].toTensor + let t2 = [0, 2, 7, 15, 11].toTensor + check: bitor(t1, 0b010_110_101) == [181, 181, 189, 1023, -67].toTensor + check: bitor(t1, 0b010_110_101) == bitor(0b010_110_101, t1) + check: bitor(t1, t2) == [0, 3, 63, 1023, -97].toTensor + check: bitor(t1, t2) == bitor(t1, t2) + + test "bitxor": + let t1 = [0, 1, 57, 1022, -100].toTensor + let t2 = [0, 2, 7, 15, 11].toTensor + check: bitxor(t1, 0b010_110_101) == [181, 180, 140, 843, -215].toTensor + check: bitxor(t1, 0b010_110_101) == bitxor(0b010_110_101, t1) + check: bitxor(t1, t2) == [0, 3, 62, 1009, -105].toTensor + check: bitxor(t1, t2) == bitxor(t1, t2) + + test "reverse_bits": + let t = [0, 1, 57, 1022].toTensor(uint16) + let expected = [0, 32768, 39936, 32704].toTensor(uint16) + check: t.reverse_bits == expected + +main() +GC_fullCollect()