diff --git a/docs/operators.md b/docs/operators.md index 47b30cee..d492c1e3 100644 --- a/docs/operators.md +++ b/docs/operators.md @@ -36,7 +36,7 @@ _This file is automatically generated from the def files via [this script](/tool | [ConvTranspose](https://github.com/onnx/onnx/blob/master/docs/Operators.md#ConvTranspose) | | | | | [Cos](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Cos) | [7+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Cos-7) | | [7+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Cos-7) | | [Cosh](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Cosh) | [9+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Cosh-9) | | | -| [CumSum](https://github.com/onnx/onnx/blob/master/docs/Operators.md#CumSum) | | | | +| [CumSum](https://github.com/onnx/onnx/blob/master/docs/Operators.md#CumSum) | [11+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#CumSum-11) | [11+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#CumSum-11) | [11+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#CumSum-11) | | [DepthToSpace](https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace) | | | | | [DequantizeLinear](https://github.com/onnx/onnx/blob/master/docs/Operators.md#DequantizeLinear) | | | | | [Det](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Det) | | | | diff --git a/lib/backends/cpu/op-resolve-rules.ts b/lib/backends/cpu/op-resolve-rules.ts index c77a8b7e..ad463b3a 100644 --- a/lib/backends/cpu/op-resolve-rules.ts +++ b/lib/backends/cpu/op-resolve-rules.ts @@ -10,6 +10,7 @@ import {CpuBinaryOp} from './ops/binary-op'; import {CpuCast} from './ops/cast'; import {CpuConcat} from './ops/concat'; import {CpuConv} from './ops/conv'; +import {CpuCumSum} from './ops/cumsum'; import {CpuDropout} from './ops/dropout'; import {CpuExpand} from './ops/expand'; import {CpuFlatten} from './ops/flatten'; @@ -112,4 +113,5 @@ export const CPU_OP_RESOLVE_RULES: ReadonlyArray = [ ['Upsample', '', '7-8', () => new CpuUpsample()], ['Upsample', '', '9', () => new CpuUpsampleV9()], ['Xor', '', '7+', () => new CpuBinaryOp(['bool'], (e1, e2) => (e1 ^ e2))], + ['CumSum', '', '11+', () => new CpuCumSum()], ]; diff --git a/lib/backends/cpu/ops/cumsum.ts b/lib/backends/cpu/ops/cumsum.ts new file mode 100644 index 00000000..8094edd5 --- /dev/null +++ b/lib/backends/cpu/ops/cumsum.ts @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +import {CumSum} from '../../../ops/cumsum'; +import {Tensor} from '../../../tensor'; +import {ShapeUtil} from '../../../util'; +import {CpuInferenceHandler} from '../inference-handler'; + +export class CpuCumSum extends CumSum { + run(inferenceHandler: CpuInferenceHandler, inputs: Tensor[]): Tensor[] { + const ax = inputs[1].integerData[0]; + const output = cumsum(inputs[0], ax, this.exclusive, this.reverse); + return [output]; + } +} + +export function cumsum(x: Tensor, axis: number, exclusive: boolean, reverse: boolean) { + const y = new Tensor(x.dims, x.type); + + if (axis < 0) { + axis = y.dims.length + axis; + } + + const index: number[] = new Array(y.dims.length).fill(0); + let i = 0; + + if (reverse) { + i = y.data.length - 1; + for (let j = 0; j < y.dims.length; j++) { + index[j] = y.dims[j] - 1; + } + } + + while (i < y.data.length && i >= 0) { + const prevIndex = updateIndex(index, axis, index[axis] + (reverse ? 1 : -1)); + + const start = (index[axis] === 0 && !reverse) || (index[axis] === (y.dims[axis] - 1) && reverse); + + if (start && !exclusive) { + y.set(index, x.get(index)); + } else if (start && exclusive) { + y.set(index, 0); + } else if (!start && !exclusive) { + const prevValue = y.get(prevIndex) as number; + y.set(index, prevValue + (x.get(index) as number)); + } else { + const prevValue = y.get(prevIndex) as number; + y.set(index, prevValue + (x.get(prevIndex) as number)); + } + + if (reverse) { + ShapeUtil.decrementIndex(index, x.dims); + i--; + } else { + ShapeUtil.incrementIndex(index, x.dims); + i++; + } + } + + return y; +} + +function updateIndex(index: number[], axis: number, value: number) { + const result = index.slice(); + result[axis] = value; + return result; +} diff --git a/lib/backends/wasm/op-resolve-rules.ts b/lib/backends/wasm/op-resolve-rules.ts index 92fb6141..afd423de 100644 --- a/lib/backends/wasm/op-resolve-rules.ts +++ b/lib/backends/wasm/op-resolve-rules.ts @@ -7,6 +7,7 @@ import {WasmBatchNormalization} from './ops/batch-normalization'; import {WasmBinaryOp} from './ops/binary-op'; import {WasmClip} from './ops/clip'; import {WasmConv} from './ops/conv'; +import {WasmCumSum} from './ops/cumsum'; import {WasmGemm} from './ops/gemm'; import {WasmInstanceNormalization} from './ops/instance-normalization'; import {WasmMatMul} from './ops/matmul'; @@ -36,4 +37,5 @@ export const WASM_OP_RESOLVE_RULES: ReadonlyArray = [ ['Sub', '', '7+', () => new WasmBinaryOp(['float32'], 'Sub')], ['Sum', '', '6+', () => new WasmSum()], // TODO: support multidirectional broadcast for Sum-8 ['Xor', '', '7+', () => new WasmBinaryOp(['bool'], 'Xor')], + ['CumSum', '', '11+', () => new WasmCumSum()], ]; diff --git a/lib/backends/wasm/ops/cumsum.ts b/lib/backends/wasm/ops/cumsum.ts new file mode 100644 index 00000000..caa6019e --- /dev/null +++ b/lib/backends/wasm/ops/cumsum.ts @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +import {CumSum} from '../../../ops/cumsum'; +import {Tensor} from '../../../tensor'; +import {ShapeUtil} from '../../../util'; +import {WasmBinding} from '../../../wasm-binding'; +import {WasmInferenceHandler} from '../inference-handler'; + +export class WasmCumSum extends CumSum { + run(inferenceHandler: WasmInferenceHandler, inputs: Tensor[]): Tensor[] { + const ax = inputs[1].integerData[0]; + + const outputSize = ShapeUtil.size(inputs[0].dims); + const resultData = new Float32Array(outputSize); + WasmBinding.getInstance().ccall( + '_cumsum_f32', [inputs[0].floatData, 'float32ptr'], [inputs[0].dims, 'int32ptr'], + [inputs[0].dims.length, 'int32'], [ax, 'int32'], [this.exclusive, 'bool'], [this.reverse, 'bool'], + [resultData, 'float32ptr', 'out']); + + const result = new Tensor(inputs[0].dims, inputs[0].type); + result.floatData.set(resultData); + return [result]; + } + + // overriding the checkInputTypes() in the base class because Wasm backend has special type limitations + checkInputTypes(inputs: Tensor[]): boolean { + // currently Wasm backend only supports 'float32' input type + if (inputs[0].type !== 'float32') { + return false; + } + + return true; + } +} diff --git a/lib/backends/webgl/op-resolve-rules.ts b/lib/backends/webgl/op-resolve-rules.ts index 7b9d56fe..30595fc4 100644 --- a/lib/backends/webgl/op-resolve-rules.ts +++ b/lib/backends/webgl/op-resolve-rules.ts @@ -9,6 +9,7 @@ import * as binaryOps from './ops/binary-op'; import {WebGLClip} from './ops/clip'; import {WebGLConcat} from './ops/concat'; import {WebGLConv} from './ops/conv'; +import {WebGLCumSum} from './ops/cumsum'; import {WebGLDropout} from './ops/dropout'; import {WebGLElu} from './ops/elu'; import {WebGLFlatten} from './ops/flatten'; @@ -105,4 +106,5 @@ export const WEBGL_OP_RESOLVE_RULES: ReadonlyArray = [ ['Upsample', '', '7-8', () => new WebGLUpsample()], ['Unsqueeze', '', '1+', () => new WebGLUnsqueeze()], ['Xor', '', '7+', () => new binaryOps.WebGLBinaryOp(['bool'], binaryOps.glslXor())], + ['CumSum', '', '11+', () => new WebGLCumSum()], ]; diff --git a/lib/backends/webgl/ops/cumsum.ts b/lib/backends/webgl/ops/cumsum.ts new file mode 100644 index 00000000..4e20eb60 --- /dev/null +++ b/lib/backends/webgl/ops/cumsum.ts @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +import {CumSum} from '../../../ops/cumsum'; +import {Tensor} from '../../../tensor'; +import {WebGLInferenceHandler} from '../inference-handler'; +import {ProgramInfo, RunData, WebGLOperator} from '../types'; + +export class WebGLCumSum extends CumSum implements WebGLOperator { + run(inferenceHandler: WebGLInferenceHandler, inputs: Tensor[]): Tensor[] { + return inferenceHandler.run(this, inputs); + } + createProgramInfo(inferenceHandler: WebGLInferenceHandler, inputs: Tensor[]): ProgramInfo { + const ax = inputs[1].integerData[0]; + const rank = inputs[0].dims.length; + const dims = inputs[0].dims; + + const startIx = this.reverse ? (dims[ax] - 1) : 0; + const comp = this.exclusive ? '' : '='; + const condition = this.reverse ? `k >${comp} endIx` : `k <${comp} endIx`; + const update = this.reverse ? 'k--' : 'k++'; + + const shaderSource = ` + float process(int indices[${rank}]) { + float value = 0.0; + int endIx = indices[${ax}]; + for (int k=${startIx}; ${condition}; ${update}) { + indices[${ax}] = k; + value += _A(indices); + } + return value; + }`; + const inputLayouts = [inferenceHandler.getOrCreateTextureLayout(inputs[0])]; + return { + inputLayouts, + outputLayout: inferenceHandler.createTextureLayoutFromShape(inputs[0].dims), + samplers: ['A'], + shaderSource, + }; + } + + createRunData(inferenceHandler: WebGLInferenceHandler, programInfo: ProgramInfo, inputs: Tensor[]): RunData { + const inputTDs = [inferenceHandler.getOrCreateTextureData(inputs[0], programInfo.inputLayouts[0])]; + return { + inputTextureDatas: inputTDs, + outputTextureData: + inferenceHandler.createTextureDataFromLayout(programInfo.outputLayout, inputTDs[0].tensor.type), + uniformData: {} + }; + } +} diff --git a/lib/ops/cumsum.ts b/lib/ops/cumsum.ts new file mode 100644 index 00000000..8413c246 --- /dev/null +++ b/lib/ops/cumsum.ts @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. +import {Attribute} from '../attribute'; +import {InferenceHandler} from '../backend'; +import {Operator} from '../operators'; +import {Tensor} from '../tensor'; + +export abstract class CumSum implements Operator { + abstract run(inferenceHandler: InferenceHandler, inputs: Tensor[]): Tensor[]|Promise; + + initialize(attributes: Attribute): void { + this.exclusive = attributes.getInt('exclusive', 0) === 1; + this.reverse = attributes.getInt('reverse', 0) === 1; + } + + checkInputs(inputs: Tensor[]): boolean { + if (!inputs || inputs.length !== 2) { + return false; + } + + return this.checkInputTypes(inputs); + } + + protected checkInputTypes(inputs: Tensor[]): boolean { + if (inputs[1].type !== 'int32' || inputs[1].dims.length !== 1) { + return false; + } + + return true; + } + + protected exclusive: boolean; + protected reverse: boolean; +} diff --git a/lib/util.ts b/lib/util.ts index cfab8467..2ff00160 100644 --- a/lib/util.ts +++ b/lib/util.ts @@ -503,6 +503,35 @@ export class ShapeUtil { } } + // Decrement an index into a tensor (in lexicographic + // ordering), wrapping around the specified lower bound. + /** + * Decrement an index into a tensor (in lexicographic ordering), wrapping around the specified upper_bound. + * @param index Given index to decrement (Will be mutated) + * @param dims The dimensions of the tensor for which the given index corresponds to + * @param axisToDecrementOn The 1-indexed axis to decrement on. If undefined, axisToDecrementOn == rank + */ + static decrementIndex(index: number[], dims: ReadonlyArray, axisToDecrementOn?: number) { + if (dims.length === 0 || index.length === 0) { + throw new Error(`Index decrementing unsupported for scalar Tensor`); + } + if (axisToDecrementOn === undefined) { + axisToDecrementOn = dims.length; + } else { + if (axisToDecrementOn <= 0 || axisToDecrementOn > dims.length) { + throw new Error(`Incorrect axis to decrement on`); + } + } + + for (let k = axisToDecrementOn - 1; k >= 0; --k) { + index[k]--; + if (index[k] >= 0) { + break; + } + index[k] = dims[k] - 1; + } + } + /** * Produces a new dimensions array based on the values in the 'originalDimensions' and 'shape' array * Used in Reshape diff --git a/src/wasm-build-config.json b/src/wasm-build-config.json index 95a99ee8..465f3365 100644 --- a/src/wasm-build-config.json +++ b/src/wasm-build-config.json @@ -22,6 +22,7 @@ "_clip_f32", "_instance_normalization_f32", "_sum_f32", - "_softmax_f32" + "_softmax_f32", + "_cumsum_f32" ] } diff --git a/src/wasm-ops/cumsum.cpp b/src/wasm-ops/cumsum.cpp new file mode 100644 index 00000000..7ec53731 --- /dev/null +++ b/src/wasm-ops/cumsum.cpp @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +#include "cumsum.h" +#include "common.h" +#include "utils/shape_utils.h" + +// Wasm interop method +void cumsum_f32(void *data) { + uint32_t *dataIndex = static_cast(data); + uint32_t const argc = dataIndex[0]; + const float *x = PARAM_FLOAT_PTR(data, dataIndex[1]); + const int32_t *dims = PARAM_INT32_PTR(data, dataIndex[2]); + const int32_t rank = PARAM_INT32(data, dataIndex[3]); + const int32_t axis = PARAM_INT32(data, dataIndex[4]); + const bool exclusive = PARAM_BOOL(data, dataIndex[5]); + const bool reverse = PARAM_BOOL(data, dataIndex[6]); + + float *output = PARAM_FLOAT_PTR(data, dataIndex[7]); + cumsum_f32_imp(x, dims, rank, axis, exclusive, reverse, output); +} + +// Core operator implementation +void cumsum_f32_imp(const float *X, const int32_t *dims, const int32_t rank, + int32_t axis, const bool exclusive, const bool reverse, + float *Y) { + if (axis < 0) { + axis = rank + axis; + } + + // const index : number[] = new Array(y.dims.length).fill(0); + size_t i = 0; + std::vector dimsVector(dims, dims + rank); + std::vector strides = ShapeUtils::compute_strides(dimsVector); + size_t size = ShapeUtils::size_from_dims(dimsVector); + + if (reverse) { + i = size - 1; + } + + while (i < size && i >= 0) { + + size_t indexAtAxis = ShapeUtils::offset_to_index(strides, i, axis); + + size_t prevIndex = i + (reverse ? strides.at(axis) : -strides.at(axis)); + + bool start = (indexAtAxis == 0 && !reverse) || + (indexAtAxis == dimsVector.at(axis) && reverse); + + if (start && !exclusive) { + Y[i] = X[i]; + } else if (start && exclusive) { + Y[i] = 0; + } else if (!start && !exclusive) { + Y[i] = Y[prevIndex] + X[i]; + } else { + Y[i] = Y[prevIndex] + X[prevIndex]; + } + + if (reverse) { + i--; + } else { + i++; + } + } +} diff --git a/src/wasm-ops/cumsum.h b/src/wasm-ops/cumsum.h new file mode 100644 index 00000000..3081f14b --- /dev/null +++ b/src/wasm-ops/cumsum.h @@ -0,0 +1,13 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +#pragma once + +#include + +extern "C" { +void cumsum_f32(void *); +void cumsum_f32_imp(const float *X, const int32_t *dims, const int32_t rank, + int32_t axis, const bool exclusive, const bool reverse, + float *Y); +} diff --git a/src/wasm-ops/utils/shape_utils.cpp b/src/wasm-ops/utils/shape_utils.cpp index bd068b83..04e98c61 100644 --- a/src/wasm-ops/utils/shape_utils.cpp +++ b/src/wasm-ops/utils/shape_utils.cpp @@ -86,3 +86,13 @@ void ShapeUtils::offset_to_indices(const std::vector &strides, } indices[indices.size() - 1] = offset; } + +size_t ShapeUtils::offset_to_index(const std::vector &strides, + size_t offset, int32_t axis) { + size_t index; + for (size_t i = 0; i < axis; ++i) { + size_t index = floor(offset / strides[i]); + offset -= index * strides[i]; + } + return index; +} diff --git a/src/wasm-ops/utils/shape_utils.h b/src/wasm-ops/utils/shape_utils.h index 2d138136..91fea4c3 100644 --- a/src/wasm-ops/utils/shape_utils.h +++ b/src/wasm-ops/utils/shape_utils.h @@ -18,4 +18,7 @@ std::vector offset_to_indices(const std::vector &strides, // Fills in values in the indices vector. Assumes it is of the required size. void offset_to_indices(const std::vector &strides, size_t offset, std::vector &indices); +// Gives the index at a specific axis from a given offset +size_t offset_to_index(const std::vector &strides, size_t offset, + int32_t axis); }; // namespace ShapeUtils diff --git a/test/data/ops/cumsum.jsonc b/test/data/ops/cumsum.jsonc new file mode 100644 index 00000000..b8d40b60 --- /dev/null +++ b/test/data/ops/cumsum.jsonc @@ -0,0 +1,341 @@ +[ + { + "name": "Cumsum without attributes", + "operator": "CumSum", + "opsets": [ + { + "domain": "", + "version": 11 + } + ], + "attributes": [], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [1, 3, 6, 10, 15], + "dims": [5], + "type": "float32" + } + ] + }, + { + "name": "T[1]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [1, 2, 3, 5, 7, 9], + "dims": [2, 3], + "type": "float32" + } + ] + }, + { + "name": "T[2]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [1], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [1, 3, 6, 4, 9, 15], + "dims": [2, 3], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Cumsum exclusive", + "operator": "CumSum", + "opsets": [ + { + "domain": "", + "version": 11 + } + ], + "attributes": [ + { + "name": "exclusive", + "data": 1, + "type": "int" + } + ], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [0, 1, 3, 6, 10], + "dims": [5], + "type": "float32" + } + ] + }, + { + "name": "T[1]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [0, 0, 0, 1, 2, 3], + "dims": [2, 3], + "type": "float32" + } + ] + }, + { + "name": "T[2]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [1], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [0, 1, 3, 0, 4, 9], + "dims": [2, 3], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Cumsum reverse", + "operator": "CumSum", + "opsets": [ + { + "domain": "", + "version": 11 + } + ], + "attributes": [ + { + "name": "reverse", + "data": 1, + "type": "int" + } + ], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [15, 14, 12, 9, 5], + "dims": [5], + "type": "float32" + } + ] + }, + { + "name": "T[1]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [5, 7, 9, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + } + ] + }, + { + "name": "T[2]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [1], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [6, 5, 3, 15, 11, 6], + "dims": [2, 3], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Cumsum exclusive and reverse", + "operator": "CumSum", + "opsets": [ + { + "domain": "", + "version": 11 + } + ], + "attributes": [ + { + "name": "reverse", + "data": 1, + "type": "int" + }, + { + "name": "exclusive", + "data": 1, + "type": "int" + } + ], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [14, 12, 9, 5, 0], + "dims": [5], + "type": "float32" + } + ] + }, + { + "name": "T[1]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [4, 5, 6, 0, 0, 0], + "dims": [2, 3], + "type": "float32" + } + ] + }, + { + "name": "T[2]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [1], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [5, 3, 0, 11, 6, 0], + "dims": [2, 3], + "type": "float32" + } + ] + } + ] + } +] diff --git a/test/test-suite-whitelist.jsonc b/test/test-suite-whitelist.jsonc index 9a1d2d63..6b9477e8 100644 --- a/test/test-suite-whitelist.jsonc +++ b/test/test-suite-whitelist.jsonc @@ -225,7 +225,13 @@ "test_xor_bcast4v4d", "test_xor2d", "test_xor3d", - "test_xor4d" + "test_xor4d", + "v{11,12}/test_cumsum_1d_exclusive", + "v{11,12}/test_cumsum_1d_reverse", + "v{11,12}/test_cumsum_1d_reverse_exclusive", + "v{11,12}/test_cumsum_2d_axis_0", + "v{11,12}/test_cumsum_2d_axis_1", + "v{11,12}/test_cumsum_2d_negative_axis" ], "ops": [ "abs.jsonc", @@ -265,7 +271,8 @@ "softmax.jsonc", "tan.jsonc", "transpose.jsonc", - "xor.jsonc" + "xor.jsonc", + "cumsum.jsonc" ] }, "webgl": { @@ -485,7 +492,13 @@ "test_xor_bcast4v4d", "test_xor2d", "test_xor3d", - "test_xor4d" + "test_xor4d", + "v{11,12}/test_cumsum_1d_exclusive", + "v{11,12}/test_cumsum_1d_reverse", + "v{11,12}/test_cumsum_1d_reverse_exclusive", + "v{11,12}/test_cumsum_2d_axis_0", + "v{11,12}/test_cumsum_2d_axis_1", + "v{11,12}/test_cumsum_2d_negative_axis" ], "ops": [ "abs.jsonc", @@ -527,7 +540,8 @@ "sub.jsonc", "tan.jsonc", "transpose.jsonc", - "xor.jsonc" + "xor.jsonc", + "cumsum.jsonc" ] }, "wasm": { @@ -624,7 +638,13 @@ "test_globalmaxpool_precomputed", "test_globalmaxpool", "test_instancenorm_epsilon", - "test_instancenorm_example" + "test_instancenorm_example", + "v{11,12}/test_cumsum_1d_exclusive", + "v{11,12}/test_cumsum_1d_reverse", + "v{11,12}/test_cumsum_1d_reverse_exclusive", + "v{11,12}/test_cumsum_2d_axis_0", + "v{11,12}/test_cumsum_2d_axis_1", + "v{11,12}/test_cumsum_2d_negative_axis" ], "ops": [ // Check in op tests that have native Wasm implementations @@ -639,7 +659,8 @@ "and.jsonc", "or.jsonc", "xor.jsonc", - "matmul.jsonc" + "matmul.jsonc", + "cumsum.jsonc" ] } }