From f7f077f3645689d8ce756293befe187cb31b7991 Mon Sep 17 00:00:00 2001 From: Frithjof Winkelmann Date: Sun, 6 Sep 2020 12:37:11 +0200 Subject: [PATCH 1/8] Add CPU implementation for cumsum --- lib/backends/cpu/op-resolve-rules.ts | 2 + lib/backends/cpu/ops/cumsum.ts | 67 ++++++++ lib/ops/cumsum.ts | 34 ++++ lib/util.ts | 29 ++++ test/data/ops/cumsum.jsonc | 245 +++++++++++++++++++++++++++ 5 files changed, 377 insertions(+) create mode 100644 lib/backends/cpu/ops/cumsum.ts create mode 100644 lib/ops/cumsum.ts create mode 100644 test/data/ops/cumsum.jsonc diff --git a/lib/backends/cpu/op-resolve-rules.ts b/lib/backends/cpu/op-resolve-rules.ts index c77a8b7e..cce29099 100644 --- a/lib/backends/cpu/op-resolve-rules.ts +++ b/lib/backends/cpu/op-resolve-rules.ts @@ -28,6 +28,7 @@ import {CpuSlice, CpuSliceV10} from './ops/slice'; import {CpuSoftmax} from './ops/softmax'; import {CpuSqueeze} from './ops/squeeze'; import {CpuSum} from './ops/sum'; +import {CpuCumSum} from './ops/cumsum'; import {CpuTile} from './ops/tile'; import {CpuTranspose} from './ops/transpose'; import * as unaryOps from './ops/unary-op'; @@ -112,4 +113,5 @@ export const CPU_OP_RESOLVE_RULES: ReadonlyArray = [ ['Upsample', '', '7-8', () => new CpuUpsample()], ['Upsample', '', '9', () => new CpuUpsampleV9()], ['Xor', '', '7+', () => new CpuBinaryOp(['bool'], (e1, e2) => (e1 ^ e2))], + ['CumSum', '', '11+', () => new CpuCumSum()], ]; diff --git a/lib/backends/cpu/ops/cumsum.ts b/lib/backends/cpu/ops/cumsum.ts new file mode 100644 index 00000000..df79a807 --- /dev/null +++ b/lib/backends/cpu/ops/cumsum.ts @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +import { Tensor } from '../../../tensor'; +import { ShapeUtil } from '../../../util'; +import { CpuInferenceHandler } from '../inference-handler'; +import { CumSum } from '../../../ops/cumsum'; + +export class CpuCumSum extends CumSum { + run(inferenceHandler: CpuInferenceHandler, inputs: Tensor[]): Tensor[] { + const ax = inputs[1].integerData[0]; + const output = cumsum(inputs[0], ax, this.exclusive, this.reverse); + return [output]; + } +} + +export function cumsum(x: Tensor, axis: number, exclusive: boolean, reverse: boolean) { + const y = new Tensor(x.dims, x.type); + + if (axis < 0) { + axis = y.dims.length + axis; + } + + const index: number[] = new Array(y.dims.length).fill(0); + let i = 0; + + if (reverse) { + i = y.data.length - 1; + for (let j = 0; j < y.dims.length; j++) { + index[j] = y.dims[j] - 1; + } + } + + while (i < y.data.length && i >= 0) { + const prevIndex = updateIndex(index, axis, index[axis] + (reverse ? 1 : -1)); + + const start = (index[axis] === 0 && !reverse) || (index[axis] === (y.dims[axis] - 1) && reverse); + + if (start && !exclusive) { + y.set(index, x.get(index)); + } else if (start && exclusive) { + y.set(index, 0); + } else if (!start && !exclusive) { + const prevValue = y.get(prevIndex) as number; + y.set(index, prevValue + (x.get(index) as number)); + } else { + const prevValue = y.get(prevIndex) as number; + y.set(index, prevValue + (x.get(prevIndex) as number)); + } + + if (reverse) { + ShapeUtil.decrementIndex(index, x.dims); + i--; + } else { + ShapeUtil.incrementIndex(index, x.dims); + i++; + } + } + + return y; +} + +function updateIndex(index: number[], axis: number, value: number) { + const result = index.slice(); + result[axis] = value; + return result; +} diff --git a/lib/ops/cumsum.ts b/lib/ops/cumsum.ts new file mode 100644 index 00000000..87ef1606 --- /dev/null +++ b/lib/ops/cumsum.ts @@ -0,0 +1,34 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. +import { Attribute } from '../attribute'; +import { InferenceHandler } from '../backend'; +import { Operator } from '../operators'; +import { Tensor } from '../tensor'; + +export abstract class CumSum implements Operator { + abstract run(inferenceHandler: InferenceHandler, inputs: Tensor[]): Tensor[] | Promise; + + initialize(attributes: Attribute): void { + this.exclusive = attributes.getInt('exclusive', 0) === 1; + this.reverse = attributes.getInt('reverse', 0) === 1; + } + + checkInputs(inputs: Tensor[]): boolean { + if (!inputs || inputs.length !== 2) { + return false; + } + + return this.checkInputTypes(inputs); + } + + protected checkInputTypes(inputs: Tensor[]): boolean { + if (inputs[1].type !== 'int32' || inputs[1].dims.length !== 1) { + return false; + } + + return true; + } + + protected exclusive: boolean; + protected reverse: boolean; +} diff --git a/lib/util.ts b/lib/util.ts index cfab8467..2ff00160 100644 --- a/lib/util.ts +++ b/lib/util.ts @@ -503,6 +503,35 @@ export class ShapeUtil { } } + // Decrement an index into a tensor (in lexicographic + // ordering), wrapping around the specified lower bound. + /** + * Decrement an index into a tensor (in lexicographic ordering), wrapping around the specified upper_bound. + * @param index Given index to decrement (Will be mutated) + * @param dims The dimensions of the tensor for which the given index corresponds to + * @param axisToDecrementOn The 1-indexed axis to decrement on. If undefined, axisToDecrementOn == rank + */ + static decrementIndex(index: number[], dims: ReadonlyArray, axisToDecrementOn?: number) { + if (dims.length === 0 || index.length === 0) { + throw new Error(`Index decrementing unsupported for scalar Tensor`); + } + if (axisToDecrementOn === undefined) { + axisToDecrementOn = dims.length; + } else { + if (axisToDecrementOn <= 0 || axisToDecrementOn > dims.length) { + throw new Error(`Incorrect axis to decrement on`); + } + } + + for (let k = axisToDecrementOn - 1; k >= 0; --k) { + index[k]--; + if (index[k] >= 0) { + break; + } + index[k] = dims[k] - 1; + } + } + /** * Produces a new dimensions array based on the values in the 'originalDimensions' and 'shape' array * Used in Reshape diff --git a/test/data/ops/cumsum.jsonc b/test/data/ops/cumsum.jsonc new file mode 100644 index 00000000..fdc37a33 --- /dev/null +++ b/test/data/ops/cumsum.jsonc @@ -0,0 +1,245 @@ +[{ + "name": "Cumsum without attributes", + "operator": "CumSum", + "opsets": [{ + "domain": "", + "version": 11 + }], + "attributes": [], + "cases": [{ + "name": "T[0]", + "inputs": [{ + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, { + "data": [0], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [1, 3, 6, 10, 15], + "dims": [5], + "type": "float32" + }] + }, { + "name": "T[1]", + "inputs": [{ + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, { + "data": [0], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [1, 2, 3, 5, 7, 9], + "dims": [2, 3], + "type": "float32" + }] + }, { + "name": "T[2]", + "inputs": [{ + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, { + "data": [1], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [1, 3, 6, 4, 9, 15], + "dims": [2, 3], + "type": "float32" + }] + }] +}, { + "name": "Cumsum exclusive", + "operator": "CumSum", + "opsets": [{ + "domain": "", + "version": 11 + }], + "attributes": [{ + "name": "exclusive", + "data": 1, + "type": "int" + }], + "cases": [{ + "name": "T[0]", + "inputs": [{ + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, { + "data": [0], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [0, 1, 3, 6, 10], + "dims": [5], + "type": "float32" + }] + }, { + "name": "T[1]", + "inputs": [{ + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, { + "data": [0], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [0, 0, 0, 1, 2, 3], + "dims": [2, 3], + "type": "float32" + }] + }, { + "name": "T[2]", + "inputs": [{ + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, { + "data": [1], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [0, 1, 3, 0, 4, 9], + "dims": [2, 3], + "type": "float32" + }] + }] +}, { + "name": "Cumsum reverse", + "operator": "CumSum", + "opsets": [{ + "domain": "", + "version": 11 + }], + "attributes": [{ + "name": "reverse", + "data": 1, + "type": "int" + }], + "cases": [{ + "name": "T[0]", + "inputs": [{ + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, { + "data": [0], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [15, 14, 12, 9, 5], + "dims": [5], + "type": "float32" + }] + }, { + "name": "T[1]", + "inputs": [{ + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, { + "data": [0], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [5, 7, 9, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }] + }, { + "name": "T[2]", + "inputs": [{ + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, { + "data": [1], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [6, 5, 3, 15, 11, 6], + "dims": [2, 3], + "type": "float32" + }] + }] +}, { + "name": "Cumsum exclusive and reverse", + "operator": "CumSum", + "opsets": [{ + "domain": "", + "version": 11 + }], + "attributes": [{ + "name": "reverse", + "data": 1, + "type": "int" + }, { + "name": "exclusive", + "data": 1, + "type": "int" + }], + "cases": [{ + "name": "T[0]", + "inputs": [{ + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, { + "data": [0], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [14, 12, 9, 5, 0], + "dims": [5], + "type": "float32" + }] + }, { + "name": "T[1]", + "inputs": [{ + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, { + "data": [0], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [4, 5, 6, 0, 0, 0], + "dims": [2, 3], + "type": "float32" + }] + }, { + "name": "T[2]", + "inputs": [{ + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, { + "data": [1], + "dims": [1], + "type": "int32" + }], + "outputs": [{ + "data": [5, 3, 0, 11, 6, 0], + "dims": [2, 3], + "type": "float32" + }] + }] +}] From 692d6e083b3941932a3394f8db95ebf2256cd2df Mon Sep 17 00:00:00 2001 From: Frithjof Winkelmann Date: Sun, 6 Sep 2020 13:49:34 +0200 Subject: [PATCH 2/8] Add wasm implementation of CumSum --- lib/backends/wasm/op-resolve-rules.ts | 2 + lib/backends/wasm/ops/cumsum.ts | 35 ++++++++++++++ src/wasm-build-config.json | 3 +- src/wasm-ops/cumsum.cpp | 66 +++++++++++++++++++++++++++ src/wasm-ops/cumsum.h | 12 +++++ src/wasm-ops/utils/shape_utils.cpp | 10 ++++ src/wasm-ops/utils/shape_utils.h | 3 ++ 7 files changed, 130 insertions(+), 1 deletion(-) create mode 100644 lib/backends/wasm/ops/cumsum.ts create mode 100644 src/wasm-ops/cumsum.cpp create mode 100644 src/wasm-ops/cumsum.h diff --git a/lib/backends/wasm/op-resolve-rules.ts b/lib/backends/wasm/op-resolve-rules.ts index 92fb6141..42c077ae 100644 --- a/lib/backends/wasm/op-resolve-rules.ts +++ b/lib/backends/wasm/op-resolve-rules.ts @@ -13,6 +13,7 @@ import {WasmMatMul} from './ops/matmul'; import {WasmAveragePool, WasmGlobalAveragePool, WasmGlobalMaxPool, WasmMaxPool} from './ops/pool'; import {WasmSoftmax} from './ops/softmax'; import {WasmSum} from './ops/sum'; +import {WasmCumSum} from './ops/cumsum'; export const WASM_OP_RESOLVE_RULES: ReadonlyArray = [ ['Add', '', '7+', () => new WasmBinaryOp(['float32'], 'Add')], @@ -36,4 +37,5 @@ export const WASM_OP_RESOLVE_RULES: ReadonlyArray = [ ['Sub', '', '7+', () => new WasmBinaryOp(['float32'], 'Sub')], ['Sum', '', '6+', () => new WasmSum()], // TODO: support multidirectional broadcast for Sum-8 ['Xor', '', '7+', () => new WasmBinaryOp(['bool'], 'Xor')], + ['CumSum', '', '11+', () => new WasmCumSum()], ]; diff --git a/lib/backends/wasm/ops/cumsum.ts b/lib/backends/wasm/ops/cumsum.ts new file mode 100644 index 00000000..98ed09fe --- /dev/null +++ b/lib/backends/wasm/ops/cumsum.ts @@ -0,0 +1,35 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +import { Tensor } from '../../../tensor'; +import { ShapeUtil } from '../../../util'; +import { WasmBinding } from '../../../wasm-binding'; +import { WasmInferenceHandler } from '../inference-handler'; +import { CumSum } from '../../../ops/cumsum'; + +export class WasmCumSum extends CumSum { + run(inferenceHandler: WasmInferenceHandler, inputs: Tensor[]): Tensor[] { + const ax = inputs[1].integerData[0]; + + const outputSize = ShapeUtil.size(inputs[0].dims); + const resultData = new Float32Array(outputSize); + WasmBinding.getInstance().ccall( + '_cumsum_f32', [inputs[0].floatData, 'float32ptr'], [inputs[0].dims, 'int32ptr'], + [inputs[0].dims.length, 'int32'], [ax, 'int32'], [this.exclusive, 'bool'], [this.reverse, 'bool'], + [resultData, 'float32ptr', 'out']); + + const result = new Tensor(inputs[0].dims, inputs[0].type); + result.floatData.set(resultData); + return [result]; + } + + // overriding the checkInputTypes() in the base class because Wasm backend has special type limitations + checkInputTypes(inputs: Tensor[]): boolean { + // currently Wasm backend only supports 'float32' input type + if (inputs[0].type !== 'float32') { + return false; + } + + return true; + } +} diff --git a/src/wasm-build-config.json b/src/wasm-build-config.json index 95a99ee8..465f3365 100644 --- a/src/wasm-build-config.json +++ b/src/wasm-build-config.json @@ -22,6 +22,7 @@ "_clip_f32", "_instance_normalization_f32", "_sum_f32", - "_softmax_f32" + "_softmax_f32", + "_cumsum_f32" ] } diff --git a/src/wasm-ops/cumsum.cpp b/src/wasm-ops/cumsum.cpp new file mode 100644 index 00000000..79ecb788 --- /dev/null +++ b/src/wasm-ops/cumsum.cpp @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +#include "common.h" +#include "sum.h" +#include "utils/shape_utils.h" + +// Wasm interop method +void cumsum_f32(void *data) { + uint32_t *dataIndex = static_cast(data); + uint32_t const argc = dataIndex[0]; + const float *x = PARAM_FLOAT_PTR(data, dataIndex[1]); + const int32_t *dims = PARAM_INT32_PTR(data, dataIndex[2]); + const int32_t rank = PARAM_INT32(data, dataIndex[3]); + const int32_t axis = PARAM_INT32(data, dataIndex[4]); + const bool exclusive = PARAM_BOOL(data, dataIndex[5]); + const bool reverse = PARAM_BOOL(data, dataIndex[6]); + + float *output = PARAM_FLOAT_PTR(data, dataIndex[7]); + cumsum_f32_imp(x, dims, rank, axis, exclusive, reverse, output); +} + +// Core operator implementation +void cumsum_f32_imp(const float *X, const int32_t *dims, const int32_t rank, + int32_t axis, const bool exclusive, const bool reverse, + float *Y) { + if (axis < 0) { + axis = rank + axis; + } + + // const index : number[] = new Array(y.dims.length).fill(0); + size_t i = 0; + std::vector dimsVector(dims, dims + rank); + std::vector strides = ShapeUtils::compute_strides(dimsVector); + size_t size = ShapeUtils::size_from_dims(dimsVector); + + if (reverse) { + i = size - 1; + } + + while (i < size && i >= 0) { + + size_t indexAtAxis = ShapeUtils::offset_to_index(strides, i, axis); + + size_t prevIndex = i + (reverse ? strides.at(axis) : -strides.at(axis)); + + bool start = (indexAtAxis == 0 && !reverse) || + (indexAtAxis == dimsVector.at(axis) && reverse); + + if (start && !exclusive) { + Y[i] = X[i]; + } else if (start && exclusive) { + Y[i] = 0; + } else if (!start && !exclusive) { + Y[i] = Y[prevIndex] + X[i]; + } else { + Y[i] = Y[prevIndex] + X[prevIndex]; + } + + if (reverse) { + i--; + } else { + i++; + } + } +} diff --git a/src/wasm-ops/cumsum.h b/src/wasm-ops/cumsum.h new file mode 100644 index 00000000..059a2eb2 --- /dev/null +++ b/src/wasm-ops/cumsum.h @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +#pragma once + +#include + +extern "C" { +void cumsum_f32(void *); +void cumsum_f32_imp(float *X, int32_t *dims, int32_t rank, int32_t axis, + bool exclusive, bool reverse, float *Y); +} diff --git a/src/wasm-ops/utils/shape_utils.cpp b/src/wasm-ops/utils/shape_utils.cpp index bd068b83..04e98c61 100644 --- a/src/wasm-ops/utils/shape_utils.cpp +++ b/src/wasm-ops/utils/shape_utils.cpp @@ -86,3 +86,13 @@ void ShapeUtils::offset_to_indices(const std::vector &strides, } indices[indices.size() - 1] = offset; } + +size_t ShapeUtils::offset_to_index(const std::vector &strides, + size_t offset, int32_t axis) { + size_t index; + for (size_t i = 0; i < axis; ++i) { + size_t index = floor(offset / strides[i]); + offset -= index * strides[i]; + } + return index; +} diff --git a/src/wasm-ops/utils/shape_utils.h b/src/wasm-ops/utils/shape_utils.h index 2d138136..d67daafa 100644 --- a/src/wasm-ops/utils/shape_utils.h +++ b/src/wasm-ops/utils/shape_utils.h @@ -18,4 +18,7 @@ std::vector offset_to_indices(const std::vector &strides, // Fills in values in the indices vector. Assumes it is of the required size. void offset_to_indices(const std::vector &strides, size_t offset, std::vector &indices); +// Gives the index at a specific axis from a given offset +size_t ShapeUtils::offset_to_index(const std::vector &strides, + size_t offset, int32_t axis); }; // namespace ShapeUtils From 10000b6143cd6d462d5c9f765009d9ee3cabc65d Mon Sep 17 00:00:00 2001 From: Frithjof Winkelmann Date: Sun, 6 Sep 2020 13:58:33 +0200 Subject: [PATCH 3/8] Run formatters --- docs/operators.md | 2 +- lib/backends/cpu/op-resolve-rules.ts | 2 +- lib/backends/cpu/ops/cumsum.ts | 8 +- lib/backends/wasm/op-resolve-rules.ts | 2 +- lib/backends/wasm/ops/cumsum.ts | 16 +- lib/ops/cumsum.ts | 10 +- test/data/ops/cumsum.jsonc | 586 +++++++++++++++----------- 7 files changed, 361 insertions(+), 265 deletions(-) diff --git a/docs/operators.md b/docs/operators.md index 47b30cee..71bdf574 100644 --- a/docs/operators.md +++ b/docs/operators.md @@ -36,7 +36,7 @@ _This file is automatically generated from the def files via [this script](/tool | [ConvTranspose](https://github.com/onnx/onnx/blob/master/docs/Operators.md#ConvTranspose) | | | | | [Cos](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Cos) | [7+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Cos-7) | | [7+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Cos-7) | | [Cosh](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Cosh) | [9+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Cosh-9) | | | -| [CumSum](https://github.com/onnx/onnx/blob/master/docs/Operators.md#CumSum) | | | | +| [CumSum](https://github.com/onnx/onnx/blob/master/docs/Operators.md#CumSum) | [11+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#CumSum-11) | [11+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#CumSum-11) | | | [DepthToSpace](https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace) | | | | | [DequantizeLinear](https://github.com/onnx/onnx/blob/master/docs/Operators.md#DequantizeLinear) | | | | | [Det](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Det) | | | | diff --git a/lib/backends/cpu/op-resolve-rules.ts b/lib/backends/cpu/op-resolve-rules.ts index cce29099..ad463b3a 100644 --- a/lib/backends/cpu/op-resolve-rules.ts +++ b/lib/backends/cpu/op-resolve-rules.ts @@ -10,6 +10,7 @@ import {CpuBinaryOp} from './ops/binary-op'; import {CpuCast} from './ops/cast'; import {CpuConcat} from './ops/concat'; import {CpuConv} from './ops/conv'; +import {CpuCumSum} from './ops/cumsum'; import {CpuDropout} from './ops/dropout'; import {CpuExpand} from './ops/expand'; import {CpuFlatten} from './ops/flatten'; @@ -28,7 +29,6 @@ import {CpuSlice, CpuSliceV10} from './ops/slice'; import {CpuSoftmax} from './ops/softmax'; import {CpuSqueeze} from './ops/squeeze'; import {CpuSum} from './ops/sum'; -import {CpuCumSum} from './ops/cumsum'; import {CpuTile} from './ops/tile'; import {CpuTranspose} from './ops/transpose'; import * as unaryOps from './ops/unary-op'; diff --git a/lib/backends/cpu/ops/cumsum.ts b/lib/backends/cpu/ops/cumsum.ts index df79a807..8094edd5 100644 --- a/lib/backends/cpu/ops/cumsum.ts +++ b/lib/backends/cpu/ops/cumsum.ts @@ -1,10 +1,10 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. -import { Tensor } from '../../../tensor'; -import { ShapeUtil } from '../../../util'; -import { CpuInferenceHandler } from '../inference-handler'; -import { CumSum } from '../../../ops/cumsum'; +import {CumSum} from '../../../ops/cumsum'; +import {Tensor} from '../../../tensor'; +import {ShapeUtil} from '../../../util'; +import {CpuInferenceHandler} from '../inference-handler'; export class CpuCumSum extends CumSum { run(inferenceHandler: CpuInferenceHandler, inputs: Tensor[]): Tensor[] { diff --git a/lib/backends/wasm/op-resolve-rules.ts b/lib/backends/wasm/op-resolve-rules.ts index 42c077ae..afd423de 100644 --- a/lib/backends/wasm/op-resolve-rules.ts +++ b/lib/backends/wasm/op-resolve-rules.ts @@ -7,13 +7,13 @@ import {WasmBatchNormalization} from './ops/batch-normalization'; import {WasmBinaryOp} from './ops/binary-op'; import {WasmClip} from './ops/clip'; import {WasmConv} from './ops/conv'; +import {WasmCumSum} from './ops/cumsum'; import {WasmGemm} from './ops/gemm'; import {WasmInstanceNormalization} from './ops/instance-normalization'; import {WasmMatMul} from './ops/matmul'; import {WasmAveragePool, WasmGlobalAveragePool, WasmGlobalMaxPool, WasmMaxPool} from './ops/pool'; import {WasmSoftmax} from './ops/softmax'; import {WasmSum} from './ops/sum'; -import {WasmCumSum} from './ops/cumsum'; export const WASM_OP_RESOLVE_RULES: ReadonlyArray = [ ['Add', '', '7+', () => new WasmBinaryOp(['float32'], 'Add')], diff --git a/lib/backends/wasm/ops/cumsum.ts b/lib/backends/wasm/ops/cumsum.ts index 98ed09fe..caa6019e 100644 --- a/lib/backends/wasm/ops/cumsum.ts +++ b/lib/backends/wasm/ops/cumsum.ts @@ -1,11 +1,11 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. -import { Tensor } from '../../../tensor'; -import { ShapeUtil } from '../../../util'; -import { WasmBinding } from '../../../wasm-binding'; -import { WasmInferenceHandler } from '../inference-handler'; -import { CumSum } from '../../../ops/cumsum'; +import {CumSum} from '../../../ops/cumsum'; +import {Tensor} from '../../../tensor'; +import {ShapeUtil} from '../../../util'; +import {WasmBinding} from '../../../wasm-binding'; +import {WasmInferenceHandler} from '../inference-handler'; export class WasmCumSum extends CumSum { run(inferenceHandler: WasmInferenceHandler, inputs: Tensor[]): Tensor[] { @@ -14,9 +14,9 @@ export class WasmCumSum extends CumSum { const outputSize = ShapeUtil.size(inputs[0].dims); const resultData = new Float32Array(outputSize); WasmBinding.getInstance().ccall( - '_cumsum_f32', [inputs[0].floatData, 'float32ptr'], [inputs[0].dims, 'int32ptr'], - [inputs[0].dims.length, 'int32'], [ax, 'int32'], [this.exclusive, 'bool'], [this.reverse, 'bool'], - [resultData, 'float32ptr', 'out']); + '_cumsum_f32', [inputs[0].floatData, 'float32ptr'], [inputs[0].dims, 'int32ptr'], + [inputs[0].dims.length, 'int32'], [ax, 'int32'], [this.exclusive, 'bool'], [this.reverse, 'bool'], + [resultData, 'float32ptr', 'out']); const result = new Tensor(inputs[0].dims, inputs[0].type); result.floatData.set(resultData); diff --git a/lib/ops/cumsum.ts b/lib/ops/cumsum.ts index 87ef1606..8413c246 100644 --- a/lib/ops/cumsum.ts +++ b/lib/ops/cumsum.ts @@ -1,12 +1,12 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. -import { Attribute } from '../attribute'; -import { InferenceHandler } from '../backend'; -import { Operator } from '../operators'; -import { Tensor } from '../tensor'; +import {Attribute} from '../attribute'; +import {InferenceHandler} from '../backend'; +import {Operator} from '../operators'; +import {Tensor} from '../tensor'; export abstract class CumSum implements Operator { - abstract run(inferenceHandler: InferenceHandler, inputs: Tensor[]): Tensor[] | Promise; + abstract run(inferenceHandler: InferenceHandler, inputs: Tensor[]): Tensor[]|Promise; initialize(attributes: Attribute): void { this.exclusive = attributes.getInt('exclusive', 0) === 1; diff --git a/test/data/ops/cumsum.jsonc b/test/data/ops/cumsum.jsonc index fdc37a33..b8d40b60 100644 --- a/test/data/ops/cumsum.jsonc +++ b/test/data/ops/cumsum.jsonc @@ -1,245 +1,341 @@ -[{ - "name": "Cumsum without attributes", - "operator": "CumSum", - "opsets": [{ - "domain": "", - "version": 11 - }], - "attributes": [], - "cases": [{ - "name": "T[0]", - "inputs": [{ - "data": [1, 2, 3, 4, 5], - "dims": [5], - "type": "float32" - }, { - "data": [0], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [1, 3, 6, 10, 15], - "dims": [5], - "type": "float32" - }] - }, { - "name": "T[1]", - "inputs": [{ - "data": [1, 2, 3, 4, 5, 6], - "dims": [2, 3], - "type": "float32" - }, { - "data": [0], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [1, 2, 3, 5, 7, 9], - "dims": [2, 3], - "type": "float32" - }] - }, { - "name": "T[2]", - "inputs": [{ - "data": [1, 2, 3, 4, 5, 6], - "dims": [2, 3], - "type": "float32" - }, { - "data": [1], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [1, 3, 6, 4, 9, 15], - "dims": [2, 3], - "type": "float32" - }] - }] -}, { - "name": "Cumsum exclusive", - "operator": "CumSum", - "opsets": [{ - "domain": "", - "version": 11 - }], - "attributes": [{ - "name": "exclusive", - "data": 1, - "type": "int" - }], - "cases": [{ - "name": "T[0]", - "inputs": [{ - "data": [1, 2, 3, 4, 5], - "dims": [5], - "type": "float32" - }, { - "data": [0], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [0, 1, 3, 6, 10], - "dims": [5], - "type": "float32" - }] - }, { - "name": "T[1]", - "inputs": [{ - "data": [1, 2, 3, 4, 5, 6], - "dims": [2, 3], - "type": "float32" - }, { - "data": [0], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [0, 0, 0, 1, 2, 3], - "dims": [2, 3], - "type": "float32" - }] - }, { - "name": "T[2]", - "inputs": [{ - "data": [1, 2, 3, 4, 5, 6], - "dims": [2, 3], - "type": "float32" - }, { - "data": [1], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [0, 1, 3, 0, 4, 9], - "dims": [2, 3], - "type": "float32" - }] - }] -}, { - "name": "Cumsum reverse", - "operator": "CumSum", - "opsets": [{ - "domain": "", - "version": 11 - }], - "attributes": [{ - "name": "reverse", - "data": 1, - "type": "int" - }], - "cases": [{ - "name": "T[0]", - "inputs": [{ - "data": [1, 2, 3, 4, 5], - "dims": [5], - "type": "float32" - }, { - "data": [0], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [15, 14, 12, 9, 5], - "dims": [5], - "type": "float32" - }] - }, { - "name": "T[1]", - "inputs": [{ - "data": [1, 2, 3, 4, 5, 6], - "dims": [2, 3], - "type": "float32" - }, { - "data": [0], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [5, 7, 9, 4, 5, 6], - "dims": [2, 3], - "type": "float32" - }] - }, { - "name": "T[2]", - "inputs": [{ - "data": [1, 2, 3, 4, 5, 6], - "dims": [2, 3], - "type": "float32" - }, { - "data": [1], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [6, 5, 3, 15, 11, 6], - "dims": [2, 3], - "type": "float32" - }] - }] -}, { - "name": "Cumsum exclusive and reverse", - "operator": "CumSum", - "opsets": [{ - "domain": "", - "version": 11 - }], - "attributes": [{ - "name": "reverse", - "data": 1, - "type": "int" - }, { - "name": "exclusive", - "data": 1, - "type": "int" - }], - "cases": [{ - "name": "T[0]", - "inputs": [{ - "data": [1, 2, 3, 4, 5], - "dims": [5], - "type": "float32" - }, { - "data": [0], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [14, 12, 9, 5, 0], - "dims": [5], - "type": "float32" - }] - }, { - "name": "T[1]", - "inputs": [{ - "data": [1, 2, 3, 4, 5, 6], - "dims": [2, 3], - "type": "float32" - }, { - "data": [0], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [4, 5, 6, 0, 0, 0], - "dims": [2, 3], - "type": "float32" - }] - }, { - "name": "T[2]", - "inputs": [{ - "data": [1, 2, 3, 4, 5, 6], - "dims": [2, 3], - "type": "float32" - }, { - "data": [1], - "dims": [1], - "type": "int32" - }], - "outputs": [{ - "data": [5, 3, 0, 11, 6, 0], - "dims": [2, 3], - "type": "float32" - }] - }] -}] +[ + { + "name": "Cumsum without attributes", + "operator": "CumSum", + "opsets": [ + { + "domain": "", + "version": 11 + } + ], + "attributes": [], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [1, 3, 6, 10, 15], + "dims": [5], + "type": "float32" + } + ] + }, + { + "name": "T[1]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [1, 2, 3, 5, 7, 9], + "dims": [2, 3], + "type": "float32" + } + ] + }, + { + "name": "T[2]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [1], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [1, 3, 6, 4, 9, 15], + "dims": [2, 3], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Cumsum exclusive", + "operator": "CumSum", + "opsets": [ + { + "domain": "", + "version": 11 + } + ], + "attributes": [ + { + "name": "exclusive", + "data": 1, + "type": "int" + } + ], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [0, 1, 3, 6, 10], + "dims": [5], + "type": "float32" + } + ] + }, + { + "name": "T[1]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [0, 0, 0, 1, 2, 3], + "dims": [2, 3], + "type": "float32" + } + ] + }, + { + "name": "T[2]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [1], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [0, 1, 3, 0, 4, 9], + "dims": [2, 3], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Cumsum reverse", + "operator": "CumSum", + "opsets": [ + { + "domain": "", + "version": 11 + } + ], + "attributes": [ + { + "name": "reverse", + "data": 1, + "type": "int" + } + ], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [15, 14, 12, 9, 5], + "dims": [5], + "type": "float32" + } + ] + }, + { + "name": "T[1]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [5, 7, 9, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + } + ] + }, + { + "name": "T[2]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [1], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [6, 5, 3, 15, 11, 6], + "dims": [2, 3], + "type": "float32" + } + ] + } + ] + }, + { + "name": "Cumsum exclusive and reverse", + "operator": "CumSum", + "opsets": [ + { + "domain": "", + "version": 11 + } + ], + "attributes": [ + { + "name": "reverse", + "data": 1, + "type": "int" + }, + { + "name": "exclusive", + "data": 1, + "type": "int" + } + ], + "cases": [ + { + "name": "T[0]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5], + "dims": [5], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [14, 12, 9, 5, 0], + "dims": [5], + "type": "float32" + } + ] + }, + { + "name": "T[1]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [0], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [4, 5, 6, 0, 0, 0], + "dims": [2, 3], + "type": "float32" + } + ] + }, + { + "name": "T[2]", + "inputs": [ + { + "data": [1, 2, 3, 4, 5, 6], + "dims": [2, 3], + "type": "float32" + }, + { + "data": [1], + "dims": [1], + "type": "int32" + } + ], + "outputs": [ + { + "data": [5, 3, 0, 11, 6, 0], + "dims": [2, 3], + "type": "float32" + } + ] + } + ] + } +] From 10e1e85e113aa8d62374af7e3f3a0534118f1e69 Mon Sep 17 00:00:00 2001 From: Frithjof Winkelmann Date: Mon, 7 Sep 2020 13:48:32 +0200 Subject: [PATCH 4/8] Fix compiler errors in cumsum implementation --- src/wasm-ops/cumsum.cpp | 2 +- src/wasm-ops/cumsum.h | 5 +++-- src/wasm-ops/utils/shape_utils.h | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/wasm-ops/cumsum.cpp b/src/wasm-ops/cumsum.cpp index 79ecb788..7ec53731 100644 --- a/src/wasm-ops/cumsum.cpp +++ b/src/wasm-ops/cumsum.cpp @@ -1,8 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. +#include "cumsum.h" #include "common.h" -#include "sum.h" #include "utils/shape_utils.h" // Wasm interop method diff --git a/src/wasm-ops/cumsum.h b/src/wasm-ops/cumsum.h index 059a2eb2..3081f14b 100644 --- a/src/wasm-ops/cumsum.h +++ b/src/wasm-ops/cumsum.h @@ -7,6 +7,7 @@ extern "C" { void cumsum_f32(void *); -void cumsum_f32_imp(float *X, int32_t *dims, int32_t rank, int32_t axis, - bool exclusive, bool reverse, float *Y); +void cumsum_f32_imp(const float *X, const int32_t *dims, const int32_t rank, + int32_t axis, const bool exclusive, const bool reverse, + float *Y); } diff --git a/src/wasm-ops/utils/shape_utils.h b/src/wasm-ops/utils/shape_utils.h index d67daafa..91fea4c3 100644 --- a/src/wasm-ops/utils/shape_utils.h +++ b/src/wasm-ops/utils/shape_utils.h @@ -19,6 +19,6 @@ std::vector offset_to_indices(const std::vector &strides, void offset_to_indices(const std::vector &strides, size_t offset, std::vector &indices); // Gives the index at a specific axis from a given offset -size_t ShapeUtils::offset_to_index(const std::vector &strides, - size_t offset, int32_t axis); +size_t offset_to_index(const std::vector &strides, size_t offset, + int32_t axis); }; // namespace ShapeUtils From c589211a618ed4f2155994fb91e062a2f85c6339 Mon Sep 17 00:00:00 2001 From: Frithjof Winkelmann Date: Mon, 7 Sep 2020 18:49:29 +0200 Subject: [PATCH 5/8] Implement webgl backend for cumsum --- lib/backends/webgl/op-resolve-rules.ts | 2 + lib/backends/webgl/ops/cumsum.ts | 51 ++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 lib/backends/webgl/ops/cumsum.ts diff --git a/lib/backends/webgl/op-resolve-rules.ts b/lib/backends/webgl/op-resolve-rules.ts index 7b9d56fe..30595fc4 100644 --- a/lib/backends/webgl/op-resolve-rules.ts +++ b/lib/backends/webgl/op-resolve-rules.ts @@ -9,6 +9,7 @@ import * as binaryOps from './ops/binary-op'; import {WebGLClip} from './ops/clip'; import {WebGLConcat} from './ops/concat'; import {WebGLConv} from './ops/conv'; +import {WebGLCumSum} from './ops/cumsum'; import {WebGLDropout} from './ops/dropout'; import {WebGLElu} from './ops/elu'; import {WebGLFlatten} from './ops/flatten'; @@ -105,4 +106,5 @@ export const WEBGL_OP_RESOLVE_RULES: ReadonlyArray = [ ['Upsample', '', '7-8', () => new WebGLUpsample()], ['Unsqueeze', '', '1+', () => new WebGLUnsqueeze()], ['Xor', '', '7+', () => new binaryOps.WebGLBinaryOp(['bool'], binaryOps.glslXor())], + ['CumSum', '', '11+', () => new WebGLCumSum()], ]; diff --git a/lib/backends/webgl/ops/cumsum.ts b/lib/backends/webgl/ops/cumsum.ts new file mode 100644 index 00000000..4e20eb60 --- /dev/null +++ b/lib/backends/webgl/ops/cumsum.ts @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT license. + +import {CumSum} from '../../../ops/cumsum'; +import {Tensor} from '../../../tensor'; +import {WebGLInferenceHandler} from '../inference-handler'; +import {ProgramInfo, RunData, WebGLOperator} from '../types'; + +export class WebGLCumSum extends CumSum implements WebGLOperator { + run(inferenceHandler: WebGLInferenceHandler, inputs: Tensor[]): Tensor[] { + return inferenceHandler.run(this, inputs); + } + createProgramInfo(inferenceHandler: WebGLInferenceHandler, inputs: Tensor[]): ProgramInfo { + const ax = inputs[1].integerData[0]; + const rank = inputs[0].dims.length; + const dims = inputs[0].dims; + + const startIx = this.reverse ? (dims[ax] - 1) : 0; + const comp = this.exclusive ? '' : '='; + const condition = this.reverse ? `k >${comp} endIx` : `k <${comp} endIx`; + const update = this.reverse ? 'k--' : 'k++'; + + const shaderSource = ` + float process(int indices[${rank}]) { + float value = 0.0; + int endIx = indices[${ax}]; + for (int k=${startIx}; ${condition}; ${update}) { + indices[${ax}] = k; + value += _A(indices); + } + return value; + }`; + const inputLayouts = [inferenceHandler.getOrCreateTextureLayout(inputs[0])]; + return { + inputLayouts, + outputLayout: inferenceHandler.createTextureLayoutFromShape(inputs[0].dims), + samplers: ['A'], + shaderSource, + }; + } + + createRunData(inferenceHandler: WebGLInferenceHandler, programInfo: ProgramInfo, inputs: Tensor[]): RunData { + const inputTDs = [inferenceHandler.getOrCreateTextureData(inputs[0], programInfo.inputLayouts[0])]; + return { + inputTextureDatas: inputTDs, + outputTextureData: + inferenceHandler.createTextureDataFromLayout(programInfo.outputLayout, inputTDs[0].tensor.type), + uniformData: {} + }; + } +} From 8c0796d470d2a0088d0ed34bd3d999c171f5c723 Mon Sep 17 00:00:00 2001 From: Frithjof Winkelmann Date: Mon, 7 Sep 2020 18:50:17 +0200 Subject: [PATCH 6/8] Update operator docs --- docs/operators.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/operators.md b/docs/operators.md index 71bdf574..d492c1e3 100644 --- a/docs/operators.md +++ b/docs/operators.md @@ -36,7 +36,7 @@ _This file is automatically generated from the def files via [this script](/tool | [ConvTranspose](https://github.com/onnx/onnx/blob/master/docs/Operators.md#ConvTranspose) | | | | | [Cos](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Cos) | [7+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Cos-7) | | [7+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Cos-7) | | [Cosh](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Cosh) | [9+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#Cosh-9) | | | -| [CumSum](https://github.com/onnx/onnx/blob/master/docs/Operators.md#CumSum) | [11+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#CumSum-11) | [11+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#CumSum-11) | | +| [CumSum](https://github.com/onnx/onnx/blob/master/docs/Operators.md#CumSum) | [11+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#CumSum-11) | [11+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#CumSum-11) | [11+](https://github.com/onnx/onnx/blob/master/docs/Changelog.md#CumSum-11) | | [DepthToSpace](https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace) | | | | | [DequantizeLinear](https://github.com/onnx/onnx/blob/master/docs/Operators.md#DequantizeLinear) | | | | | [Det](https://github.com/onnx/onnx/blob/master/docs/Operators.md#Det) | | | | From 640a5e55dd88c8d3ab3654de129162af16d620b7 Mon Sep 17 00:00:00 2001 From: Frithjof Winkelmann Date: Mon, 7 Sep 2020 20:42:45 +0200 Subject: [PATCH 7/8] Add cumsum.jsonc to test suite --- test/test-suite-whitelist.jsonc | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/test/test-suite-whitelist.jsonc b/test/test-suite-whitelist.jsonc index 64fc0cef..9fbd7363 100644 --- a/test/test-suite-whitelist.jsonc +++ b/test/test-suite-whitelist.jsonc @@ -265,7 +265,8 @@ "softmax.jsonc", "tan.jsonc", "transpose.jsonc", - "xor.jsonc" + "xor.jsonc", + "cumsum.jsonc" ] }, "webgl": { @@ -527,7 +528,8 @@ "sub.jsonc", "tan.jsonc", "transpose.jsonc", - "xor.jsonc" + "xor.jsonc", + "cumsum.jsonc" ] }, "wasm": { @@ -639,7 +641,8 @@ "and.jsonc", "or.jsonc", "xor.jsonc", - "matmul.jsonc" + "matmul.jsonc", + "cumsum.jsonc" ] } } From 93e27b45132bc013c85ad907616380525fd60eaf Mon Sep 17 00:00:00 2001 From: Frithjof Winkelmann Date: Tue, 8 Sep 2020 09:49:56 +0200 Subject: [PATCH 8/8] Add cumsum tests --- test/test-suite-whitelist.jsonc | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/test/test-suite-whitelist.jsonc b/test/test-suite-whitelist.jsonc index 617d8cc3..6b9477e8 100644 --- a/test/test-suite-whitelist.jsonc +++ b/test/test-suite-whitelist.jsonc @@ -225,7 +225,13 @@ "test_xor_bcast4v4d", "test_xor2d", "test_xor3d", - "test_xor4d" + "test_xor4d", + "v{11,12}/test_cumsum_1d_exclusive", + "v{11,12}/test_cumsum_1d_reverse", + "v{11,12}/test_cumsum_1d_reverse_exclusive", + "v{11,12}/test_cumsum_2d_axis_0", + "v{11,12}/test_cumsum_2d_axis_1", + "v{11,12}/test_cumsum_2d_negative_axis" ], "ops": [ "abs.jsonc", @@ -486,7 +492,13 @@ "test_xor_bcast4v4d", "test_xor2d", "test_xor3d", - "test_xor4d" + "test_xor4d", + "v{11,12}/test_cumsum_1d_exclusive", + "v{11,12}/test_cumsum_1d_reverse", + "v{11,12}/test_cumsum_1d_reverse_exclusive", + "v{11,12}/test_cumsum_2d_axis_0", + "v{11,12}/test_cumsum_2d_axis_1", + "v{11,12}/test_cumsum_2d_negative_axis" ], "ops": [ "abs.jsonc", @@ -626,7 +638,13 @@ "test_globalmaxpool_precomputed", "test_globalmaxpool", "test_instancenorm_epsilon", - "test_instancenorm_example" + "test_instancenorm_example", + "v{11,12}/test_cumsum_1d_exclusive", + "v{11,12}/test_cumsum_1d_reverse", + "v{11,12}/test_cumsum_1d_reverse_exclusive", + "v{11,12}/test_cumsum_2d_axis_0", + "v{11,12}/test_cumsum_2d_axis_1", + "v{11,12}/test_cumsum_2d_negative_axis" ], "ops": [ // Check in op tests that have native Wasm implementations