From 079f2385df1abec959d68cc7c699c73e6d8a1f7d Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Mon, 21 Aug 2023 12:18:22 +0200 Subject: [PATCH 01/36] Update dlc.js --- source/dlc.js | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dlc.js b/source/dlc.js index 3111d299e26..c2076d6aa3f 100644 --- a/source/dlc.js +++ b/source/dlc.js @@ -360,6 +360,7 @@ dlc.Container = class { case 0x0308: return 'qint8'; case 0x0332: return 'qint32'; case 0x0408: return 'uint8'; + case 0x0416: return 'uint16'; case 0x0508: return 'boolean'; default: throw new dlc.Error("Unsupported data type '" + JSON.stringify(value) + "'."); } From 7749a6a239ef1f6e6587d38a0960bb22ad6c3235 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Tue, 22 Aug 2023 12:56:41 +0200 Subject: [PATCH 02/36] Add Keras test files (#1141) --- test/models.json | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/test/models.json b/test/models.json index 90a6e907adc..6a6e88a31cb 100644 --- a/test/models.json +++ b/test/models.json @@ -2135,6 +2135,27 @@ "format": "Keras v2.4.0", "link": "https://github.com/lutzroeder/netron/issues/57" }, + { + "type": "keras", + "target": "issue1142.1.keras", + "source": "https://github.com/lutzroeder/netron/files/12407995/issue1142.1.keras.zip[issue1142.1.keras]", + "format": "Keras v2.13.1", + "link": "https://github.com/lutzroeder/netron/issues/1141" + }, + { + "type": "keras", + "target": "issue1142.2.keras", + "source": "https://github.com/lutzroeder/netron/files/12407996/issue1142.2.keras.zip[issue1142.2.keras]", + "format": "Keras v2.13.1", + "link": "https://github.com/lutzroeder/netron/issues/1141" + }, + { + "type": "keras", + "target": "InceptionV3.h5.zip", + "source": "https://github.com/lutzroeder/netron/files/6098151/InceptionV3.h5.zip", + "format": "Keras v2.4.0", + "link": "https://github.com/lutzroeder/netron/issues/57" + }, { "type": "keras", "target": "lstm_seq2seq.h5", From 38c2b02dda5341ed899ffdcebcb3f72d9583b566 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Wed, 23 Aug 2023 14:37:12 +0200 Subject: [PATCH 03/36] Update onnx-metadata.json --- source/onnx-metadata.json | 155 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 155 insertions(+) diff --git a/source/onnx-metadata.json b/source/onnx-metadata.json index 574a83f6f42..144eb5972d9 100644 --- a/source/onnx-metadata.json +++ b/source/onnx-metadata.json @@ -16260,6 +16260,16 @@ "tensor(int64)" ] } + ], + "examples": [ + { + "summary": "string_int_label_encoder", + "code": "node = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n default_int64=42,\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, -1, 2, -1]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int_no_default\",\n)" + }, + { + "summary": "tensor_based_label_encoder", + "code": "tensor_keys = make_tensor(\n \"keys_tensor\", onnx.TensorProto.STRING, (3,), [\"a\", \"b\", \"c\"]\n)\nrepeated_string_keys = [\"a\", \"b\", \"c\"]\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int16)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_tensor=tensor_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_mapping\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=repeated_string_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_value_only_mapping\",\n)" + } ] }, { @@ -16363,6 +16373,151 @@ "tensor(float)" ] } + ], + "examples": [ + { + "summary": "string_int_label_encoder", + "code": "node = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n default_int64=42,\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, -1, 2, -1]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int_no_default\",\n)" + }, + { + "summary": "tensor_based_label_encoder", + "code": "tensor_keys = make_tensor(\n \"keys_tensor\", onnx.TensorProto.STRING, (3,), [\"a\", \"b\", \"c\"]\n)\nrepeated_string_keys = [\"a\", \"b\", \"c\"]\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int16)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_tensor=tensor_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_mapping\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=repeated_string_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_value_only_mapping\",\n)" + } + ] + }, + { + "name": "LabelEncoder", + "module": "ai.onnx.ml", + "version": 4, + "support_level": "common", + "description": "Maps each element in the input tensor to another value.
\n The mapping is determined by the two parallel attributes, 'keys_*' and\n 'values_*' attribute. The i-th value in the specified 'keys_*' attribute\n would be mapped to the i-th value in the specified 'values_*' attribute. It\n implies that input's element type and the element type of the specified\n 'keys_*' should be identical while the output type is identical to the\n specified 'values_*' attribute. Note that the 'keys_*' and 'values_*' attributes\n must have the same length. If an input element can not be found in the\n specified 'keys_*' attribute, the 'default_*' that matches the specified\n 'values_*' attribute may be used as its output value. The type of the 'default_*'\n attribute must match the 'values_*' attribute chosen.
\n Let's consider an example which maps a string tensor to an integer tensor.\n Assume and 'keys_strings' is [\"Amy\", \"Sally\"], 'values_int64s' is [5, 6],\n and 'default_int64' is '-1'. The input [\"Dori\", \"Amy\", \"Amy\", \"Sally\",\n \"Sally\"] would be mapped to [-1, 5, 5, 6, 6].
\n Since this operator is an one-to-one mapping, its input and output shapes\n are the same. Notice that only one of 'keys_*'/'values_*' can be set.
\n Float keys with value 'NaN' match any input 'NaN' value regardless of bit\n value. If a key is repeated, the last key takes precedence.\n", + "attributes": [ + { + "name": "default_float", + "type": "float32", + "required": false, + "description": "A float." + }, + { + "name": "default_int64", + "type": "int64", + "required": false, + "default": -1, + "description": "An integer." + }, + { + "name": "default_string", + "type": "string", + "required": false, + "default": "_Unused", + "description": "A string." + }, + { + "name": "default_tensor", + "type": "tensor", + "required": false, + "description": "A default tensor." + }, + { + "name": "keys_floats", + "type": "float32[]", + "required": false, + "description": "A list of floats." + }, + { + "name": "keys_int64s", + "type": "int64[]", + "required": false, + "description": "A list of ints." + }, + { + "name": "keys_strings", + "type": "string[]", + "required": false, + "description": "A list of strings." + }, + { + "name": "keys_tensor", + "type": "tensor", + "required": false, + "description": "Keys encoded as a 1D tensor. One and only one of 'keys_*'s should be set." + }, + { + "name": "values_floats", + "type": "float32[]", + "required": false, + "description": "A list of floats." + }, + { + "name": "values_int64s", + "type": "int64[]", + "required": false, + "description": "A list of ints." + }, + { + "name": "values_strings", + "type": "string[]", + "required": false, + "description": "A list of strings." + }, + { + "name": "values_tensor", + "type": "tensor", + "required": false, + "description": "Values encoded as a 1D tensor. One and only one of 'values_*'s should be set." + } + ], + "inputs": [ + { + "name": "X", + "type": "T1", + "description": "Input data. It must have the same element type as the keys_* attribute set." + } + ], + "min_input": 1, + "max_input": 1, + "outputs": [ + { + "name": "Y", + "type": "T2", + "description": "Output data. This tensor's element type is based on the values_* attribute set." + } + ], + "min_output": 1, + "max_output": 1, + "type_constraints": [ + { + "description": "The input type is a tensor of any shape.", + "type_param_str": "T1", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)", + "tensor(float)", + "tensor(int32)", + "tensor(int16)" + ] + }, + { + "description": "Output type is determined by the specified 'values_*' attribute.", + "type_param_str": "T2", + "allowed_type_strs": [ + "tensor(string)", + "tensor(int64)", + "tensor(float)", + "tensor(int32)", + "tensor(int16)" + ] + } + ], + "examples": [ + { + "summary": "string_int_label_encoder", + "code": "node = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n default_int64=42,\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=[\"a\", \"b\", \"c\"],\n values_int64s=[0, 1, 2],\n)\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, -1, 2, -1]).astype(np.int64)\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_string_int_no_default\",\n)" + }, + { + "summary": "tensor_based_label_encoder", + "code": "tensor_keys = make_tensor(\n \"keys_tensor\", onnx.TensorProto.STRING, (3,), [\"a\", \"b\", \"c\"]\n)\nrepeated_string_keys = [\"a\", \"b\", \"c\"]\nx = np.array([\"a\", \"b\", \"d\", \"c\", \"g\"]).astype(object)\ny = np.array([0, 1, 42, 2, 42]).astype(np.int16)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_tensor=tensor_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_mapping\",\n)\n\nnode = onnx.helper.make_node(\n \"LabelEncoder\",\n inputs=[\"X\"],\n outputs=[\"Y\"],\n domain=\"ai.onnx.ml\",\n keys_strings=repeated_string_keys,\n values_tensor=make_tensor(\n \"values_tensor\", onnx.TensorProto.INT16, (3,), [0, 1, 2]\n ),\n default_tensor=make_tensor(\n \"default_tensor\", onnx.TensorProto.INT16, (1,), [42]\n ),\n)\n\nexpect(\n node,\n inputs=[x],\n outputs=[y],\n name=\"test_ai_onnx_ml_label_encoder_tensor_value_only_mapping\",\n)" + } ] }, { From 218b9ed317cd94ae5c843852376324cfc430f116 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Wed, 23 Aug 2023 19:35:10 +0200 Subject: [PATCH 04/36] Update tflite-schema.js --- source/tflite-metadata.json | 18 +++++++++++++ source/tflite-schema.js | 50 ++++++++++++++++++++++++++++++++++++- 2 files changed, 67 insertions(+), 1 deletion(-) diff --git a/source/tflite-metadata.json b/source/tflite-metadata.json index baf82bdffd9..226cde29812 100644 --- a/source/tflite-metadata.json +++ b/source/tflite-metadata.json @@ -861,6 +861,24 @@ { "name": "precision_config", "type": "StablehloPrecisionConfig[]", "default": "DEFAULT" } ] }, + { + "name": "StablehloCustomCall", + "attributes": [ + { "name": "call_target_name", "type": "string", "default": null }, + { "name": "has_side_effect", "type": "boolean", "default": false }, + { "name": "backend_config", "type": "string", "default": null }, + { "name": "api_version", "type": "int32", "default": 0 }, + { "name": "called_computations", "type": "int32[]", "default": 0 }, + { "name": "custom_attributes", "type": "uint8[]", "default": 0 } + ] + }, + { + "name": "StablehloReduce", + "attributes": [ + { "name": "dimensions", "type": "int64[]", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + }, { "name": "StablehloSlice", "attributes": [ diff --git a/source/tflite-schema.js b/source/tflite-schema.js index fecf559e979..e406eca48c8 100644 --- a/source/tflite-schema.js +++ b/source/tflite-schema.js @@ -416,7 +416,9 @@ $root.tflite.BuiltinOperator = { STABLEHLO_CONCATENATE: 169, STABLEHLO_BROADCAST_IN_DIM: 170, STABLEHLO_CONVOLUTION: 171, - STABLEHLO_SLICE: 172 + STABLEHLO_SLICE: 172, + STABLEHLO_CUSTOM_CALL: 173, + STABLEHLO_REDUCE: 174 }; $root.tflite.BuiltinOptions = class { @@ -694,6 +696,8 @@ $root.tflite.BuiltinOptions2 = class { case 2: return $root.tflite.StablehloBroadcastInDimOptions.decode(reader, position); case 3: return $root.tflite.StablehloSliceOptions.decode(reader, position); case 4: return $root.tflite.StablehloConvolutionOptions.decode(reader, position); + case 5: return $root.tflite.StablehloCustomCallOptions.decode(reader, position); + case 6: return $root.tflite.StablehloReduceOptions.decode(reader, position); default: return undefined; } } @@ -704,6 +708,8 @@ $root.tflite.BuiltinOptions2 = class { case 'StablehloBroadcastInDimOptions': return $root.tflite.StablehloBroadcastInDimOptions.decodeText(reader, json); case 'StablehloSliceOptions': return $root.tflite.StablehloSliceOptions.decodeText(reader, json); case 'StablehloConvolutionOptions': return $root.tflite.StablehloConvolutionOptions.decodeText(reader, json); + case 'StablehloCustomCallOptions': return $root.tflite.StablehloCustomCallOptions.decodeText(reader, json); + case 'StablehloReduceOptions': return $root.tflite.StablehloReduceOptions.decodeText(reader, json); default: return undefined; } } @@ -745,6 +751,48 @@ $root.tflite.StablehloBroadcastInDimOptions = class StablehloBroadcastInDimOptio } }; +$root.tflite.StablehloCustomCallOptions = class StablehloCustomCallOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloCustomCallOptions(); + $.call_target_name = reader.string_(position, 4, null); + $.has_side_effect = reader.bool_(position, 6, false); + $.backend_config = reader.string_(position, 8, null); + $.api_version = reader.int32_(position, 10, 0); + $.called_computations = reader.typedArray(position, 12, Int32Array); + $.custom_attributes = reader.typedArray(position, 14, Uint8Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloCustomCallOptions(); + $.call_target_name = reader.value(json.call_target_name, null); + $.has_side_effect = reader.value(json.has_side_effect, false); + $.backend_config = reader.value(json.backend_config, null); + $.api_version = reader.value(json.api_version, 0); + $.called_computations = reader.typedArray(json.called_computations, Int32Array); + $.custom_attributes = reader.typedArray(json.custom_attributes, Uint8Array); + return $; + } +}; + +$root.tflite.StablehloReduceOptions = class StablehloReduceOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloReduceOptions(); + $.dimensions = reader.int64s_(position, 4); + $.body_subgraph_index = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloReduceOptions(); + $.dimensions = reader.array(json.dimensions); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + $root.tflite.StablehloSliceOptions = class StablehloSliceOptions { static decode(reader, position) { From c4dd1376e50959a974481bc627451800c4443c7d Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Thu, 24 Aug 2023 07:44:01 +0200 Subject: [PATCH 05/36] Update tf-metadata.json --- source/tf-metadata.json | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/source/tf-metadata.json b/source/tf-metadata.json index 3ad11497ab9..e51b9c4c96f 100644 --- a/source/tf-metadata.json +++ b/source/tf-metadata.json @@ -8086,6 +8086,7 @@ { "name": "CollectiveAllToAllV2", "summary": "Mutually exchanges multiple tensors of identical type and shape.", + "description": "`is_stateless` means each op does not need control dependencies to other\ncollective ops. In this case, keys that are unique at runtime\n(e.g. `instance_key`) should be used to distinguish collective groups.", "attributes": [ { "name": "T", @@ -8102,6 +8103,11 @@ "type": "float32", "default": 0.0 }, + { + "name": "is_stateless", + "type": "boolean", + "default": false + }, { "name": "Nordering_token", "type": "int64", @@ -8446,6 +8452,7 @@ { "name": "CollectiveGatherV2", "summary": "Mutually accumulates multiple tensors of identical type and shape.", + "description": "`is_stateless` means each op does not need control dependencies to other\ncollective ops. In this case, keys that are unique at runtime\n(e.g. `instance_key`) should be used to distinguish collective groups.", "attributes": [ { "name": "T", @@ -8462,6 +8469,11 @@ "type": "float32", "default": 0.0 }, + { + "name": "is_stateless", + "type": "boolean", + "default": false + }, { "name": "Nordering_token", "type": "int64", @@ -8633,6 +8645,7 @@ { "name": "CollectiveReduceScatterV2", "summary": "Mutually reduces multiple tensors of identical type and shape and scatters the result.", + "description": "`is_stateless` means each op does not need control dependencies to other\ncollective ops. In this case, keys that are unique at runtime\n(e.g. `instance_key`) should be used to distinguish collective groups.", "attributes": [ { "name": "T", @@ -8659,6 +8672,11 @@ "type": "float32", "default": 0.0 }, + { + "name": "is_stateless", + "type": "boolean", + "default": false + }, { "name": "Nordering_token", "type": "int64", @@ -8704,6 +8722,7 @@ { "name": "CollectiveReduceV2", "summary": "Mutually reduces multiple tensors of identical type and shape.", + "description": "`is_stateless` means each op does not need control dependencies to other\ncollective ops. In this case, keys that are unique at runtime\n(e.g. `instance_key`) should be used to distinguish collective groups.", "attributes": [ { "name": "T", @@ -8730,6 +8749,11 @@ "type": "float32", "default": 0.0 }, + { + "name": "is_stateless", + "type": "boolean", + "default": false + }, { "name": "Nordering_token", "type": "int64", From a595b553a544db6d24f86e7ffd3df202c735b5ee Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Thu, 24 Aug 2023 19:04:36 +0200 Subject: [PATCH 06/36] Update om.js --- source/om.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/om.js b/source/om.js index 2bfe658e240..cbd57b1d7fe 100644 --- a/source/om.js +++ b/source/om.js @@ -407,7 +407,7 @@ om.Container = class { break; } default: { - throw new om.Error("Unsupported partition type '" + partition.type + "'."); + throw new om.Error('Unsupported DaVinci OM partition type.'); } } } From baf92fd68c064a6758a415d66505f428acdd2b90 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Thu, 24 Aug 2023 19:12:32 +0200 Subject: [PATCH 07/36] Update xmodel.js --- source/xmodel.js | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/source/xmodel.js b/source/xmodel.js index 20bdd480be8..af13740bf7c 100644 --- a/source/xmodel.js +++ b/source/xmodel.js @@ -101,19 +101,13 @@ xmodel.Value = class { if (tensor && tensor.tensor_attr && tensor.data_type) { if (initializer) { this.initializer = new xmodel.Tensor(node); + this.type = this.initializer.type; } else { - this._type = new xmodel.TensorType(tensor); + this.type = new xmodel.TensorType(tensor); } } } } - - get type() { - if (this.initializer) { - return this.initializer.type; - } - return this._type; - } }; xmodel.Node = class { @@ -151,7 +145,8 @@ xmodel.Node = class { this.chain.push(new xmodel.Node(metadata, { op_type: activation }, arg)); continue; } - this.attributes.push(new xmodel.Attribute(metadata.attribute(this._type, name), name, value)); + const attribute = new xmodel.Attribute(metadata.attribute(this.type, name), name, value); + this.attributes.push(attribute); } } if (op_node.args) { @@ -174,20 +169,16 @@ xmodel.Attribute = class { this.value = attribute.value; if (metadata) { if (metadata.default !== undefined) { - if (metadata.default === this._value) { - this._visible = false; + if (metadata.default === this.value) { + this.visible = false; } - if (Array.isArray(metadata.default) && Array.isArray(this._value) && - metadata.default.length === this._value.length && metadata.default.every((value, index) => value === this._value[index])) { - this._visible = false; + if (Array.isArray(metadata.default) && Array.isArray(this.value) && + metadata.default.length === this.value.length && metadata.default.every((value, index) => value === this.value[index])) { + this.visible = false; } } } } - - get visible() { - return this._visible == false ? false : true; - } }; xmodel.TensorType = class { @@ -270,6 +261,7 @@ xmodel.Utility = class { case 'bool': return { type: 'boolean', value: value }; case 'int32': return { type: 'int32', value: value }; case 'int32_vec': return { type: 'int32[]', value: value.value }; + case 'uint32_vec': return { type: 'uint32[]', value: value.value }; case 'int64': return { type: 'int64', value: value }; case 'uint64': return { type: 'uint64', value: value }; case 'float': return { type: 'float32', value: value }; From 6d34156750e2e9039a236fd6bfec4c51496f4173 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Fri, 25 Aug 2023 05:21:25 -0700 Subject: [PATCH 08/36] Update onnx-metadata.json --- source/onnx-metadata.json | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/onnx-metadata.json b/source/onnx-metadata.json index 144eb5972d9..c64bacf28b3 100644 --- a/source/onnx-metadata.json +++ b/source/onnx-metadata.json @@ -16494,7 +16494,8 @@ "tensor(int64)", "tensor(float)", "tensor(int32)", - "tensor(int16)" + "tensor(int16)", + "tensor(double)" ] }, { @@ -16505,7 +16506,8 @@ "tensor(int64)", "tensor(float)", "tensor(int32)", - "tensor(int16)" + "tensor(int16)", + "tensor(double)" ] } ], From 516d467a1c578ba92db169ba39d479d6de725f61 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Fri, 25 Aug 2023 05:21:38 -0700 Subject: [PATCH 09/36] Update tflite-schema.js --- source/tflite-schema.js | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/source/tflite-schema.js b/source/tflite-schema.js index e406eca48c8..b2d98ff106a 100644 --- a/source/tflite-schema.js +++ b/source/tflite-schema.js @@ -418,7 +418,22 @@ $root.tflite.BuiltinOperator = { STABLEHLO_CONVOLUTION: 171, STABLEHLO_SLICE: 172, STABLEHLO_CUSTOM_CALL: 173, - STABLEHLO_REDUCE: 174 + STABLEHLO_REDUCE: 174, + STABLEHLO_ABS: 175, + STABLEHLO_AND: 176, + STABLEHLO_COSINE: 177, + STABLEHLO_EXPONENTIAL: 178, + STABLEHLO_FLOOR: 179, + STABLEHLO_LOG: 180, + STABLEHLO_MINIMUM: 181, + STABLEHLO_NEGATE: 182, + STABLEHLO_OR: 183, + STABLEHLO_POWER: 184, + STABLEHLO_REMAINDER: 185, + STABLEHLO_RSQRT: 186, + STABLEHLO_SELECT: 187, + STABLEHLO_SUBTRACT: 188, + STABLEHLO_TANH: 189 }; $root.tflite.BuiltinOptions = class { From 781ee3935c955a98b19045b16943b56acf89b173 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Fri, 25 Aug 2023 06:08:20 -0700 Subject: [PATCH 10/36] Add TorchScript test file (#1142) --- source/view.js | 2 +- test/models.json | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/source/view.js b/source/view.js index 68d52c1834c..29d8aa4948f 100644 --- a/source/view.js +++ b/source/view.js @@ -5029,7 +5029,7 @@ view.ModelFactoryService = class { this._extensions = new Set([ '.zip', '.tar', '.tar.gz', '.tgz', '.gz' ]); this._factories = []; this.register('./server', [ '.netron']); - this.register('./pytorch', [ '.pt', '.pth', '.ptl', '.pt1', '.pyt', '.pyth', '.pkl', '.pickle', '.h5', '.t7', '.model', '.dms', '.tar', '.ckpt', '.chkpt', '.tckpt', '.bin', '.pb', '.zip', '.nn', '.torchmodel', '.torchscript', '.pytorch', '.ot', '.params', '.trt', '.ff', '.ptmf' ], [ '.model' ]); + this.register('./pytorch', [ '.pt', '.pth', '.ptl', '.pt1', '.pyt', '.pyth', '.pkl', '.pickle', '.h5', '.t7', '.model', '.dms', '.tar', '.ckpt', '.chkpt', '.tckpt', '.bin', '.pb', '.zip', '.nn', '.torchmodel', '.torchscript', '.pytorch', '.ot', '.params', '.trt', '.ff', '.ptmf', '.jit' ], [ '.model' ]); this.register('./onnx', [ '.onnx', '.onn', '.pb', '.onnxtxt', '.pbtxt', '.prototxt', '.txt', '.model', '.pt', '.pth', '.pkl', '.ort', '.ort.onnx', 'onnxmodel', 'ngf', 'json' ]); this.register('./mxnet', [ '.json', '.params' ], [ '.mar']); this.register('./coreml', [ '.mlmodel', '.bin', 'manifest.json', 'metadata.json', 'featuredescriptions.json', '.pb' ], [ '.mlpackage' ]); diff --git a/test/models.json b/test/models.json index 6a6e88a31cb..e3db41774c2 100644 --- a/test/models.json +++ b/test/models.json @@ -5112,6 +5112,13 @@ "format": "PyTorch v0.1.10", "link": "https://github.com/HengLan/SiamFC-PyTorch" }, + { + "type": "pytorch", + "target": "silero_vad.jit", + "source": "https://github.com/lutzroeder/netron/files/12439374/silero_vad.zip[silero_vad.jit]", + "error": "Too many dimensions for input audio chunk ?", + "link": "https://github.com/lutzroeder/netron/issues/1142" + }, { "type": "pytorch", "target": "shufflenet_v2_x1_0.zip.pth", From 1ba353aa83389c95aa07fcf140928da7cdee19b2 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Fri, 25 Aug 2023 06:11:05 -0700 Subject: [PATCH 11/36] Update to electron 26.1.0 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index d2370f34531..b99cf345fb9 100755 --- a/package.json +++ b/package.json @@ -31,7 +31,7 @@ "electron-updater": "6.1.1" }, "devDependencies": { - "electron": "26.0.0", + "electron": "26.1.0", "electron-builder": "24.6.3", "@electron/notarize": "2.1.0", "eslint": "8.47.0" From df33af9df4ea36803ed6616f19aaee318e4bfd95 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Fri, 25 Aug 2023 06:12:12 -0700 Subject: [PATCH 12/36] Update to 7.1.5 --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index b99cf345fb9..cb3bd14e47f 100755 --- a/package.json +++ b/package.json @@ -6,8 +6,8 @@ "email": "lutzroeder@users.noreply.github.com", "url": "https://www.lutzroeder.com" }, - "version": "7.1.4", - "date": "2023-08-17 21:47:36", + "version": "7.1.5", + "date": "2023-08-25 13:12:12", "description": "Visualizer for neural network, deep learning, and machine learning models", "license": "MIT", "repository": "lutzroeder/netron", From 790cb379d9584751cc39f7ba6cc6a99bded38da5 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Fri, 25 Aug 2023 19:51:34 -0700 Subject: [PATCH 13/36] Update to eslint 8.48.0 --- package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/package.json b/package.json index cb3bd14e47f..7bddb91f252 100755 --- a/package.json +++ b/package.json @@ -34,7 +34,7 @@ "electron": "26.1.0", "electron-builder": "24.6.3", "@electron/notarize": "2.1.0", - "eslint": "8.47.0" + "eslint": "8.48.0" }, "eslintConfig": { "env": { From aa12715f9e7c2c92cb0912e0502566f29e6b80e8 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Sat, 26 Aug 2023 07:29:29 -0700 Subject: [PATCH 14/36] Update ONNX test files (#741) --- source/onnx.js | 23 ++++++++++++++--------- source/view.js | 32 ++++++++++++++++++-------------- test/models.json | 13 +++++++++++-- 3 files changed, 43 insertions(+), 25 deletions(-) diff --git a/source/onnx.js b/source/onnx.js index 4872cdb0e8f..c200eb57b3e 100644 --- a/source/onnx.js +++ b/source/onnx.js @@ -616,14 +616,14 @@ onnx.Tensor = class { this._category = category || null; if (tensor.indices && tensor.values) { this._name = tensor.values.name || ''; - this._type = context.createTensorType(tensor.values.data_type, tensor.dims.map((dim) => dim), null); + this._type = context.createTensorType(tensor.values.data_type, tensor.dims.map((dim) => dim), 'sparse'); this._location = context.createLocation(tensor.values.data_location); this._layout = 'sparse'; this._values = new onnx.Tensor(context, tensor.values); this._indices = new onnx.Tensor(context, tensor.indices); } else { this._name = tensor.name || ''; - this._type = context.createTensorType(tensor.data_type, tensor.dims.map((dim) => dim), null); + this._type = context.createTensorType(tensor.data_type, tensor.dims.map((dim) => dim)); this._location = context.createLocation(tensor.data_location); switch (tensor.data_location) { case onnx.DataLocation.DEFAULT: { @@ -783,9 +783,10 @@ onnx.Tensor = class { onnx.TensorType = class { - constructor(dataType, shape, denotation) { + constructor(dataType, shape, layout, denotation) { this._dataType = dataType; this._shape = shape; + this._layout = layout || null; this._denotation = denotation || null; } @@ -797,6 +798,10 @@ onnx.TensorType = class { return this._shape; } + get layout() { + return this._layout; + } + get denotation() { return this._denotation; } @@ -1312,11 +1317,11 @@ onnx.GraphContext = class { if (type.tensor_type) { const tensor_type = type.tensor_type; const shape = tensor_type.shape && tensor_type.shape.dim ? tensor_type.shape.dim.map((dim) => dim.dim_param ? dim.dim_param : dim.dim_value ? dim.dim_value : null) : []; - return this.createTensorType(tensor_type.elem_type, shape, denotation); + return this.createTensorType(tensor_type.elem_type, shape, null, denotation); } else if (type.sparse_tensor_type) { - const tensor_type = type.sparse_tensor_type; - const shape = tensor_type.shape && tensor_type.shape.dim ? tensor_type.shape.dim.map((dim) => dim.dim_param ? dim.dim_param : dim.dim_value ? dim.dim_value : null) : []; - return this.createTensorType(tensor_type.elem_type, shape, denotation); + type = type.sparse_tensor_type; + const shape = type.shape && type.shape.dim ? type.shape.dim.map((dim) => dim.dim_param ? dim.dim_param : dim.dim_value ? dim.dim_value : null) : []; + return this.createTensorType(type.elem_type, shape, 'sparse', denotation); } else if (type.map_type) { return this.createMapType(type.map_type.key_type, this.createType(type.map_type.value_type), denotation); } else if (type.sequence_type) { @@ -1331,9 +1336,9 @@ onnx.GraphContext = class { throw new onnx.Error("Unsupported tensor type '" + JSON.stringify(type) + "'."); } - createTensorType(dataType, shape, denotation) { + createTensorType(dataType, shape, layout, denotation) { dataType = this.createDataType(dataType); - return new onnx.TensorType(dataType, new onnx.TensorShape(shape), denotation); + return new onnx.TensorType(dataType, new onnx.TensorShape(shape), layout, denotation); } createMapType(keyType, valueType, denotation) { diff --git a/source/view.js b/source/view.js index 29d8aa4948f..11555439dd7 100644 --- a/source/view.js +++ b/source/view.js @@ -2675,6 +2675,24 @@ view.ValueView = class extends view.Control { if (location !== undefined) { this._bold('location', location); } + let layout = this._value.type ? this._value.type.layout : null; + if (initializer) { + if (layout && layout !== initializer.layout) { + throw new view.Error('Tensor type layout mismatch.'); + } + layout = layout || initializer.layout; + } + if (layout) { + const layouts = new Map([ + [ 'sparse', 'sparse' ], + [ 'sparse.coo', 'sparse coo' ], + [ 'sparse.csr', 'sparse csr' ], + [ 'sparse.csc', 'sparse csc' ], + [ 'sparse.bsr', 'sparse bsr' ], + [ 'sparse.bsc', 'sparse bsc' ] + ]); + this._bold('layout', layouts.get(layout)); + } if (initializer) { this._tensor(initializer); } @@ -2708,20 +2726,6 @@ view.ValueView = class extends view.Control { const contentLine = this.createElement('pre'); try { const tensor = new view.Tensor(value); - const layout = tensor.layout; - if (layout) { - const layouts = new Map([ - [ 'sparse', 'Sparse' ], - [ 'sparse.coo', 'Sparse COO' ], - [ 'sparse.csr', 'Sparse CSR' ], - [ 'sparse.csc', 'Sparse CSC' ], - [ 'sparse.bsr', 'Sparse BSR' ], - [ 'sparse.bsc', 'Sparse BSC' ] - ]); - if (layouts.has(layout)) { - this._bold('layout', layouts.get(layout)); - } - } if (Array.isArray(tensor.stride) && tensor.stride.length > 0) { this._code('stride', tensor.stride.join(',')); } diff --git a/test/models.json b/test/models.json index e3db41774c2..ef85ec4c5b4 100644 --- a/test/models.json +++ b/test/models.json @@ -3878,9 +3878,10 @@ { "type": "onnx", "target": "sparse_initializer_as_output.json", - "source": "https://github.com/lutzroeder/netron/files/12329156/sparse_initializer_as_output.json.zip[sparse_initializer_as_output.json]", + "source": "https://github.com/lutzroeder/netron/files/12444489/sparse_initializer_as_output.json.zip[sparse_initializer_as_output.json]", "format": "ONNX JSON v7", - "link": "https://github.com/lutzroeder/netron/issues/6" + "assert": [ "model.graphs[0].outputs[0].value[0].type.layout=sparse" ], + "link": "https://github.com/lutzroeder/netron/issues/741" }, { "type": "onnx", @@ -3889,6 +3890,14 @@ "format": "ONNX v7", "link": "https://github.com/lutzroeder/netron/issues/741" }, + { + "type": "onnx", + "target": "sparse_to_dense_matmul.onnx", + "source": "https://github.com/lutzroeder/netron/files/12444490/sparse_to_dense_matmul.onnx.zip[sparse_to_dense_matmul.onnx]", + "format": "ONNX v7", + "assert": [ "model.graphs[0].nodes[0].inputs[0].value[0].type.layout=sparse" ], + "link": "https://github.com/lutzroeder/netron/issues/741" + }, { "type": "onnx", "target": "squeezenet.onnx", From cd99d0f083f58ae6986d4f9b2542661c143a7ce1 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Sat, 26 Aug 2023 07:34:43 -0700 Subject: [PATCH 15/36] Update pytorch-metadata.json --- source/pytorch-metadata.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/pytorch-metadata.json b/source/pytorch-metadata.json index bf8bf59bcee..dee7cc1bb61 100755 --- a/source/pytorch-metadata.json +++ b/source/pytorch-metadata.json @@ -829,7 +829,8 @@ { "name": "dtype", "type": "ScalarType", "optional": true, "default": null }, { "name": "layout", "type": "Layout", "optional": true, "default": null }, { "name": "device", "type": "Device", "optional": true, "default": null }, - { "name": "pin_memory", "type": "boolean", "optional": true, "default": null } + { "name": "pin_memory", "type": "boolean", "optional": true, "default": null }, + { "name": "is_coalesced", "type": "boolean", "optional": true, "default": null } ], "outputs": [ { "name": "outputs", "type": "Tensor" } From 916ec3f2e3edb3f8644e57b940f91ee61699bb67 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Sat, 26 Aug 2023 09:00:08 -0700 Subject: [PATCH 16/36] Update tensor formatter (#741) (#961) --- source/bigdl.js | 8 +-- source/caffe.js | 2 +- source/caffe2.js | 2 +- source/circle.js | 2 +- source/cntk.js | 2 +- source/coreml.js | 2 +- source/dlc.js | 4 +- source/flax.js | 2 +- source/hickle.js | 2 +- source/keras.js | 14 ++-- source/mlir.js | 2 +- source/mnn.js | 2 +- source/mslite.js | 2 +- source/mxnet.js | 2 +- source/nnabla.js | 2 +- source/numpy.js | 2 +- source/onnx.js | 37 ++++++----- source/pytorch.js | 26 +++++--- source/safetensors.js | 2 +- source/sklearn.js | 2 +- source/tf.js | 32 +++++----- source/tflite.js | 2 +- source/torch.js | 2 +- source/view.js | 144 ++++++++++++++++++++++-------------------- source/xmodel.js | 2 +- test/models.js | 11 ++-- test/models.json | 15 +++-- 27 files changed, 172 insertions(+), 155 deletions(-) diff --git a/source/bigdl.js b/source/bigdl.js index 762692b2f45..8fa3ed3aa77 100644 --- a/source/bigdl.js +++ b/source/bigdl.js @@ -327,11 +327,11 @@ bigdl.Tensor = class { case 'float32': if (storage.bytes_data && storage.bytes_data.length > 0) { this._values = storage.bytes_data[0]; - this._layout = '<'; + this._encoding = '<'; } else if (storage.float_data && storage.float_data.length > 0) { this._values = storage.float_data; - this._layout = '|'; + this._encoding = '|'; } break; default: @@ -349,8 +349,8 @@ bigdl.Tensor = class { return this._type; } - get layout() { - return this._layout; + get encoding() { + return this._encoding; } get values() { diff --git a/source/caffe.js b/source/caffe.js index 0f93b6a1a82..3698c3ccf7b 100644 --- a/source/caffe.js +++ b/source/caffe.js @@ -627,7 +627,7 @@ caffe.Tensor = class { return this._type; } - get layout() { + get encoding() { return '|'; } diff --git a/source/caffe2.js b/source/caffe2.js index cd19db81d84..d560dd7255b 100644 --- a/source/caffe2.js +++ b/source/caffe2.js @@ -561,7 +561,7 @@ caffe2.Tensor = class { return null; } - get layout() { + get encoding() { return '|'; } diff --git a/source/circle.js b/source/circle.js index 4b25b67c050..65e9fe086a8 100644 --- a/source/circle.js +++ b/source/circle.js @@ -555,7 +555,7 @@ circle.Tensor = class { return this._type; } - get layout() { + get encoding() { switch (this._type.dataType) { case 'string': return '|'; default: return '<'; diff --git a/source/cntk.js b/source/cntk.js index 2b56d79b7b1..aafbecc412b 100644 --- a/source/cntk.js +++ b/source/cntk.js @@ -545,7 +545,7 @@ cntk.Tensor = class { return this._type; } - get layout() { + get encoding() { return '|'; } diff --git a/source/coreml.js b/source/coreml.js index 1aeed032f37..d1a3150b2ff 100644 --- a/source/coreml.js +++ b/source/coreml.js @@ -1240,7 +1240,7 @@ coreml.Tensor = class { return null; } - get layout() { + get encoding() { switch (this._type.dataType) { case 'float32': return '|'; default: return '<'; diff --git a/source/dlc.js b/source/dlc.js index c2076d6aa3f..33c5af45445 100644 --- a/source/dlc.js +++ b/source/dlc.js @@ -243,10 +243,10 @@ dlc.Tensor = class { constructor(type, data) { this.type = type; if (data instanceof Uint8Array) { - this.layout = '<'; + this.encoding = '<'; this.values = data; } else { - this.layout = '|'; + this.encoding = '|'; switch (type.dataType) { case 'uint8': this.values = data.bytes; break; case 'float32': this.values = data.floats; break; diff --git a/source/flax.js b/source/flax.js index 2d3da57332a..9b46a395c52 100644 --- a/source/flax.js +++ b/source/flax.js @@ -256,7 +256,7 @@ flax.Tensor = class { return this._type; } - get layout() { + get encoding() { switch (this._type.dataType) { case 'string': case 'object': diff --git a/source/hickle.js b/source/hickle.js index 0ffde6c5128..1b6b0c1326b 100644 --- a/source/hickle.js +++ b/source/hickle.js @@ -196,7 +196,7 @@ hickle.Tensor = class { return this._type; } - get layout() { + get encoding() { return this._littleEndian ? '<' : '>'; } diff --git a/source/keras.js b/source/keras.js index 34b58df0041..97cc87615fc 100644 --- a/source/keras.js +++ b/source/keras.js @@ -126,8 +126,8 @@ keras.ModelFactory = class { const components = weight_name.split('/'); components.pop(); const name = (components.length == 0 || components[0] !== layer_name) ? [ layer_name ].concat(components).join('/') : components.join('/'); - const layout = variable.littleEndian ? '<' : '>'; - const tensor = new keras.Tensor(weight_name, variable.shape, variable.type, null, layout, variable.data); + const encoding = variable.littleEndian ? '<' : '>'; + const tensor = new keras.Tensor(weight_name, variable.shape, variable.type, null, encoding, variable.data); weights.add(name, tensor); } } @@ -962,11 +962,11 @@ keras.Attribute = class { keras.Tensor = class { - constructor(name, shape, type, quantization, layout, data) { + constructor(name, shape, type, quantization, encoding, data) { this._name = name; this._type = new keras.TensorType(type, new keras.TensorShape(shape)); this._quantization = quantization; - this._layout = layout; + this._encoding = encoding; this._data = data; } @@ -978,8 +978,8 @@ keras.Tensor = class { return this._type; } - get layout() { - return this._layout; + get encoding() { + return this._encoding; } get quantization() { @@ -992,7 +992,7 @@ keras.Tensor = class { } get values() { - if (this._layout === '|') { + if (this._encoding === '|') { return this._data; } if (this._data === null) { diff --git a/source/mlir.js b/source/mlir.js index 9aa80e82828..e04b3ab431e 100644 --- a/source/mlir.js +++ b/source/mlir.js @@ -403,7 +403,7 @@ mlir.Tensor = class { return null; } - get layout() { + get encoding() { switch (this._type.dataType) { case 'float32': return '|'; default: return '<'; diff --git a/source/mnn.js b/source/mnn.js index 00136016f99..24a631aa49b 100644 --- a/source/mnn.js +++ b/source/mnn.js @@ -364,7 +364,7 @@ mnn.Tensor = class { return this._type; } - get layout() { + get encoding() { switch (this._type.dataType) { case 'int32': case 'float32': diff --git a/source/mslite.js b/source/mslite.js index dd99252748f..256b4f8ef87 100644 --- a/source/mslite.js +++ b/source/mslite.js @@ -321,7 +321,7 @@ mslite.Tensor = class { return this._type; } - get layout() { + get encoding() { switch (this._type.dataType) { case 'string': return '|'; default: return '<'; diff --git a/source/mxnet.js b/source/mxnet.js index ef70f39c6ec..1cc0570c898 100644 --- a/source/mxnet.js +++ b/source/mxnet.js @@ -750,7 +750,7 @@ mxnet.Tensor = class { return this._type; } - get layout() { + get encoding() { return '<'; } diff --git a/source/nnabla.js b/source/nnabla.js index ae32bd37b58..c44fa99ce49 100644 --- a/source/nnabla.js +++ b/source/nnabla.js @@ -326,7 +326,7 @@ nnabla.Tensor = class { return this._type; } - get layout() { + get encoding() { return '|'; } diff --git a/source/numpy.js b/source/numpy.js index 4f1766f5225..3088abf8854 100644 --- a/source/numpy.js +++ b/source/numpy.js @@ -233,7 +233,7 @@ numpy.Tensor = class { constructor(array) { this.type = new numpy.TensorType(array.dtype.__name__, new numpy.TensorShape(array.shape)); this.values = this.type.dataType == 'string' || this.type.dataType == 'object' ? array.flatten().tolist() : array.tobytes(); - this.layout = this.type.dataType == 'string' || this.type.dataType == 'object' ? '|' : array.dtype.byteorder; + this.encoding = this.type.dataType == 'string' || this.type.dataType == 'object' ? '|' : array.dtype.byteorder; } }; diff --git a/source/onnx.js b/source/onnx.js index c200eb57b3e..88f7224ec4f 100644 --- a/source/onnx.js +++ b/source/onnx.js @@ -618,7 +618,6 @@ onnx.Tensor = class { this._name = tensor.values.name || ''; this._type = context.createTensorType(tensor.values.data_type, tensor.dims.map((dim) => dim), 'sparse'); this._location = context.createLocation(tensor.values.data_location); - this._layout = 'sparse'; this._values = new onnx.Tensor(context, tensor.values); this._indices = new onnx.Tensor(context, tensor.indices); } else { @@ -633,11 +632,11 @@ onnx.Tensor = class { } case onnx.DataType.FLOAT: this._data = new Float32Array(tensor.float_data); - this._layout = '|'; + this._encoding = '|'; break; case onnx.DataType.DOUBLE: this._data = new Float64Array(tensor.double_data); - this._layout = '|'; + this._encoding = '|'; break; case onnx.DataType.BOOL: if (tensor.int32_data && tensor.int32_data.length > 0) { @@ -646,41 +645,41 @@ onnx.Tensor = class { for (let i = 0; i < this._data.length; i++) { this._data[i] = array[i] === 0 ? false : true; } - this._layout = '|'; + this._encoding = '|'; } break; case onnx.DataType.INT8: this._data = new Int8Array(tensor.int32_data); - this._layout = '|'; + this._encoding = '|'; break; case onnx.DataType.UINT8: this._data = new Uint8Array(tensor.int32_data); - this._layout = '|'; + this._encoding = '|'; break; case onnx.DataType.INT16: this._data = new Int32Array(tensor.int32_data); - this._layout = '|'; + this._encoding = '|'; break; case onnx.DataType.UINT16: this._data = new Int32Array(tensor.int32_data); - this._layout = '|'; + this._encoding = '|'; break; case onnx.DataType.INT32: this._data = new Int32Array(tensor.int32_data); - this._layout = '|'; + this._encoding = '|'; break; case onnx.DataType.UINT32: case onnx.DataType.UINT64: this._data = tensor.uint64_data; - this._layout = '|'; + this._encoding = '|'; break; case onnx.DataType.INT64: this._data = tensor.int64_data; - this._layout = '|'; + this._encoding = '|'; break; case onnx.DataType.STRING: this._data = tensor.string_data; - this._layout = '|'; + this._encoding = '|'; break; case onnx.DataType.COMPLEX64: case onnx.DataType.COMPLEX128: @@ -695,7 +694,7 @@ onnx.Tensor = class { view.setUint16(i << 1, array[i], true); } this._data = buffer; - this._layout = '<'; + this._encoding = '<'; } break; case onnx.DataType.FLOAT8E4M3FN: @@ -704,7 +703,7 @@ onnx.Tensor = class { case onnx.DataType.FLOAT8E5M2FNUZ: if (tensor.int32_data && tensor.int32_data.length > 0) { this._data = new Uint8Array(Array.from(tensor.int32_data)); - this._layout = '<'; + this._encoding = '<'; } break; default: @@ -715,7 +714,7 @@ onnx.Tensor = class { } if (!this._data && tensor.raw_data && tensor.raw_data.length > 0) { this._data = tensor.raw_data; - this._layout = '<'; + this._encoding = '<'; } break; } @@ -730,7 +729,7 @@ onnx.Tensor = class { const length = parseInt(external_data.length, 10); if (Number.isInteger(offset) && Number.isInteger(length)) { this._data = context.location(external_data.location, offset, length); - this._layout = '<'; + this._encoding = '<'; } } } @@ -751,8 +750,8 @@ onnx.Tensor = class { return this._category; } - get layout() { - return this._layout; + get encoding() { + return this._encoding; } get type() { @@ -764,7 +763,7 @@ onnx.Tensor = class { } get values() { - switch (this._layout) { + switch (this.type.layout) { case 'sparse': { return this._values; } diff --git a/source/pytorch.js b/source/pytorch.js index 4463263986f..3d4d1cd9733 100644 --- a/source/pytorch.js +++ b/source/pytorch.js @@ -663,16 +663,16 @@ pytorch.Tensor = class { this._name = name || ''; const storage = tensor.storage(); const size = tensor.size(); - this._type = new pytorch.TensorType(storage.dtype.__reduce__(), new pytorch.TensorShape(size)); const layout = tensor.layout ? tensor.layout.__str__() : null; this._stride = tensor.stride(); if (layout && layout.startsWith('torch.sparse_')) { - this._layout = layout.split('.').pop().replace('_', '.'); + this._type = new pytorch.TensorType(storage.dtype.__reduce__(), new pytorch.TensorShape(size), layout.split('.').pop().replace('_', '.')); this._indices = new pytorch.Tensor('', tensor.indices); this._values = new pytorch.Tensor('', tensor.values); } else if (!layout || layout === 'torch.strided') { + this._type = new pytorch.TensorType(storage.dtype.__reduce__(), new pytorch.TensorShape(size)); this._data = storage.data; - this._layout = '<'; + this._encoding = '<'; this._indices = null; } else { throw new pytorch.Error("Unsupported tensor layout '" + layout + "'."); @@ -687,8 +687,8 @@ pytorch.Tensor = class { return this._type; } - get layout() { - return this._layout; + get encoding() { + return this._encoding; } get stride() { @@ -700,15 +700,16 @@ pytorch.Tensor = class { } get values() { - if (this._layout && this._layout.startsWith('sparse.')) { + const type = this._type.layout; + if (type && type.startsWith('sparse.')) { return this._values; } return this._data instanceof Uint8Array ? this._data : this._data.peek(); } decode() { - if (this._layout !== '<') { - throw new pytorch.Error("Tensor layout '" + this._layout + "' not implemented."); + if (this._encoding !== '<') { + throw new pytorch.Error("Tensor encoding '" + this._encoding + "' not implemented."); } const type = this._type; const data = this.values; @@ -740,9 +741,10 @@ pytorch.Tensor = class { pytorch.TensorType = class { - constructor(dataType, shape) { + constructor(dataType, shape, layout) { this._dataType = dataType; this._shape = shape; + this._layout = layout; } get dataType() { @@ -753,6 +755,10 @@ pytorch.TensorType = class { return this._shape; } + get layout() { + return this._layout; + } + toString() { return this._dataType + this._shape.toString(); } @@ -4210,7 +4216,7 @@ pytorch.nnapi.Tensor = class { return this._type; } - get layout() { + get encoding() { return '<'; } diff --git a/source/safetensors.js b/source/safetensors.js index d776b1e26d2..ee9fed09d88 100644 --- a/source/safetensors.js +++ b/source/safetensors.js @@ -138,7 +138,7 @@ safetensors.Tensor = class { constructor(obj, position, stream) { const shape = new safetensors.TensorShape(obj.shape); this.type = new safetensors.TensorType(obj.dtype, shape); - this.layout = '<'; + this.encoding = '<'; const size = obj.data_offsets[1] - obj.data_offsets[0]; position += obj.data_offsets[0]; stream.seek(position); diff --git a/source/sklearn.js b/source/sklearn.js index 3f00d7308b0..aa6be3bb20d 100644 --- a/source/sklearn.js +++ b/source/sklearn.js @@ -259,7 +259,7 @@ sklearn.Tensor = class { constructor(array) { this.type = new sklearn.TensorType(array.dtype.__name__, new sklearn.TensorShape(array.shape)); - this.layout = this.type.dataType == 'string' || this.type.dataType == 'object' ? '|' : array.dtype.byteorder; + this.encoding = this.type.dataType == 'string' || this.type.dataType == 'object' ? '|' : array.dtype.byteorder; this.values = this.type.dataType == 'string' || this.type.dataType == 'object' ? array.tolist() : array.tobytes(); } }; diff --git a/source/tf.js b/source/tf.js index 314f6073cd8..12a07b25977 100644 --- a/source/tf.js +++ b/source/tf.js @@ -1225,7 +1225,7 @@ tf.Tensor = class { this._tensor = tensor; if (Object.prototype.hasOwnProperty.call(tensor, 'tensor_content')) { this._values = tensor.tensor_content; - this._layout = '<'; + this._encoding = '<'; } else { const DataType = tf.proto.tensorflow.DataType; switch (tensor.dtype) { @@ -1239,7 +1239,7 @@ tf.Tensor = class { for (let i = 0; i < values.length; i++) { view.setUint32(i << 2, values[i] << 16, true); } - this._layout = '<'; + this._encoding = '<'; break; } case DataType.DT_HALF: { @@ -1249,17 +1249,17 @@ tf.Tensor = class { for (let i = 0; i < values.length; i++) { view.setUint16(i << 1, values[i], true); } - this._layout = '<'; + this._encoding = '<'; break; } case DataType.DT_FLOAT: { this._values = tensor.float_val || null; - this._layout = '|'; + this._encoding = '|'; break; } case DataType.DT_DOUBLE: { this._values = tensor.double_val || null; - this._layout = '|'; + this._encoding = '|'; break; } case DataType.DT_UINT8: @@ -1268,36 +1268,36 @@ tf.Tensor = class { case DataType.DT_INT16: case DataType.DT_INT32: { this._values = tensor.int_val || null; - this._layout = '|'; + this._encoding = '|'; break; } case DataType.DT_UINT32: { this._values = tensor.uint32_val || null; - this._layout = '|'; + this._encoding = '|'; break; } case DataType.DT_INT64: { this._values = tensor.int64_val || null; - this._layout = '|'; + this._encoding = '|'; break; } case DataType.DT_UINT64: { this._values = tensor.uint64_val || null; - this._layout = '|'; + this._encoding = '|'; break; } case DataType.DT_BOOL: { this._values = tensor.bool_val || null; - this._layout = '|'; + this._encoding = '|'; break; } case DataType.DT_STRING: { this._values = tensor.string_val || null; - this._layout = '|'; + this._encoding = '|'; break; } case DataType.DT_COMPLEX64: { - this._layout = '|'; + this._encoding = '|'; const values = tensor.scomplex_val || null; this._values = new Array(values.length >> 1); for (let i = 0; i < values.length; i += 2) { @@ -1306,7 +1306,7 @@ tf.Tensor = class { break; } case DataType.DT_COMPLEX128: { - this._layout = '|'; + this._encoding = '|'; const values = tensor.dcomplex_val || null; this._values = new Array(values.length >> 1); for (let i = 0; i < values.length; i += 2) { @@ -1337,13 +1337,13 @@ tf.Tensor = class { return this._category; } - get layout() { - return this._layout; + get encoding() { + return this._encoding; } get values() { let values = this._values; - if (this._layout === '|' && Array.isArray(values)) { + if (this._encoding === '|' && Array.isArray(values)) { if (this._type.dataType === 'string') { values = values.map((value) => tf.Utility.decodeText(value)); } diff --git a/source/tflite.js b/source/tflite.js index 0f9df6da539..75120993d62 100644 --- a/source/tflite.js +++ b/source/tflite.js @@ -569,7 +569,7 @@ tflite.Tensor = class { return this._type; } - get layout() { + get encoding() { switch (this._type.dataType) { case 'string': return '|'; default: return '<'; diff --git a/source/torch.js b/source/torch.js index 97d39870ea6..a443af2abd0 100644 --- a/source/torch.js +++ b/source/torch.js @@ -490,7 +490,7 @@ torch.Tensor = class { return this._type; } - get layout() { + get encoding() { return '|'; } diff --git a/source/view.js b/source/view.js index 11555439dd7..ca504d0d650 100644 --- a/source/view.js +++ b/source/view.js @@ -2675,13 +2675,7 @@ view.ValueView = class extends view.Control { if (location !== undefined) { this._bold('location', location); } - let layout = this._value.type ? this._value.type.layout : null; - if (initializer) { - if (layout && layout !== initializer.layout) { - throw new view.Error('Tensor type layout mismatch.'); - } - layout = layout || initializer.layout; - } + const layout = this._value.type ? this._value.type.layout : null; if (layout) { const layouts = new Map([ [ 'sparse', 'sparse' ], @@ -2729,7 +2723,9 @@ view.ValueView = class extends view.Control { if (Array.isArray(tensor.stride) && tensor.stride.length > 0) { this._code('stride', tensor.stride.join(',')); } - if (tensor.layout !== '<' && tensor.layout !== '>' && tensor.layout !== '|' && tensor.layout !== 'sparse' && tensor.layout !== 'sparse.coo') { + if (tensor.encoding !== '<' && tensor.encoding !== '>' && tensor.encoding !== '|') { + contentLine.innerHTML = "Tensor encoding '" + tensor.layout + "' is not implemented."; + } else if (tensor.layout && (tensor.layout !== 'sparse' && tensor.layout !== 'sparse.coo')) { contentLine.innerHTML = "Tensor layout '" + tensor.layout + "' is not implemented."; } else if (tensor.empty) { contentLine.innerHTML = 'Tensor data is empty.'; @@ -3237,40 +3233,40 @@ view.Tensor = class { this._tensor = tensor; this._type = tensor.type; this._stride = tensor.stride; - switch (tensor.layout) { + this._encoding = tensor.encoding; + this._layout = tensor.type.layout; + switch (this._encoding) { case undefined: case '': case '<': { this._data = this._tensor.values; - this._layout = '<'; + this._encoding = '<'; this._littleEndian = true; break; } case '>': { this._data = this._tensor.values; - this._layout = '>'; + this._encoding = '>'; this._littleEndian = false; break; } case '|': { this._values = this._tensor.values; - this._layout = '|'; + this._encoding = '|'; break; } - case 'sparse': { - this._indices = this._tensor.indices; - this._values = this._tensor.values; - this._layout = 'sparse'; - break; + default: { + throw new view.Error("Unsupported tensor encoding '" + this._encoding + "'."); } + } + switch (this._layout) { + case 'sparse': case 'sparse.coo': { this._indices = this._tensor.indices; this._values = this._tensor.values; - this._layout = 'sparse.coo'; break; } default: { - this._layout = tensor.layout; break; } } @@ -3291,6 +3287,10 @@ view.Tensor = class { return this._type; } + get encoding() { + return this._encoding; + } + get layout() { return this._layout; } @@ -3301,19 +3301,20 @@ view.Tensor = class { get empty() { switch (this._layout) { - case '<': - case '>': { - return !(Array.isArray(this._data) || this._data instanceof Uint8Array || this._data instanceof Int8Array) || this._data.length === 0; - } - case '|': { - return !(Array.isArray(this._values) || ArrayBuffer.isView(this._values)) || this._values.length === 0; - } case 'sparse': case 'sparse.coo': { return !this._values || this.indices || this._values.values.length === 0; } default: { - throw new Error("Unsupported tensor format '" + this._format + "'."); + switch (this._encoding) { + case '<': + case '>': + return !(Array.isArray(this._data) || this._data instanceof Uint8Array || this._data instanceof Int8Array) || this._data.length === 0; + case '|': + return !(Array.isArray(this._values) || ArrayBuffer.isView(this._values)) || this._values.length === 0; + default: + throw new Error("Unsupported tensor encoding '" + this._encoding + "'."); + } } } } @@ -3321,7 +3322,7 @@ view.Tensor = class { get value() { const context = this._context(); context.limit = Number.MAX_SAFE_INTEGER; - switch (context.layout) { + switch (context.encoding) { case '<': case '>': { return this._decodeData(context, 0); @@ -3330,7 +3331,7 @@ view.Tensor = class { return this._decodeValues(context, 0); } default: { - throw new Error("Unsupported tensor format '" + this._format + "'."); + throw new Error("Unsupported tensor encoding '" + context.encoding + "'."); } } } @@ -3338,7 +3339,7 @@ view.Tensor = class { toString() { const context = this._context(); context.limit = 10000; - switch (context.layout) { + switch (context.encoding) { case '<': case '>': { const value = this._decodeData(context, 0); @@ -3349,59 +3350,30 @@ view.Tensor = class { return view.Tensor._stringify(value, '', ' '); } default: { - throw new Error("Unsupported tensor format '" + this._format + "'."); + throw new Error("Unsupported tensor encoding '" + context.encoding + "'."); } } } _context() { - if (this._layout !== '<' && this._layout !== '>' && this._layout !== '|' && this._layout !== 'sparse' && this._layout !== 'sparse.coo') { + if (this._encoding !== '<' && this._encoding !== '>' && this._encoding !== '|') { + throw new Error("Tensor encoding '" + this._encoding + "' is not supported."); + } + if (this._layout && (this._layout !== 'sparse' && this._layout !== 'sparse.coo')) { throw new Error("Tensor layout '" + this._layout + "' is not supported."); } const dataType = this._type.dataType; const context = {}; - context.layout = this._layout; + context.encoding = this._encoding; context.dimensions = this._type.shape.dimensions.map((value) => !Number.isInteger(value) && value.toNumber ? value.toNumber() : value); context.dataType = dataType; const size = context.dimensions.reduce((a, b) => a * b, 1); switch (this._layout) { - case '<': - case '>': { - context.data = (this._data instanceof Uint8Array || this._data instanceof Int8Array) ? this._data : this._data.peek(); - context.view = new DataView(context.data.buffer, context.data.byteOffset, context.data.byteLength); - if (view.Tensor.dataTypes.has(dataType)) { - context.itemsize = view.Tensor.dataTypes.get(dataType); - if (context.data.length < (context.itemsize * size)) { - throw new Error('Invalid tensor data size.'); - } - } else if (dataType.startsWith('uint') && !isNaN(parseInt(dataType.substring(4), 10))) { - context.dataType = 'uint'; - context.bits = parseInt(dataType.substring(4), 10); - context.itemsize = 1; - } else if (dataType.startsWith('int') && !isNaN(parseInt(dataType.substring(3), 10))) { - context.dataType = 'int'; - context.bits = parseInt(dataType.substring(3), 10); - context.itemsize = 1; - } else { - throw new Error("Tensor data type '" + dataType + "' is not implemented."); - } - break; - } - case '|': { - context.data = this._values; - if (!view.Tensor.dataTypes.has(dataType) && dataType !== 'string' && dataType !== 'object') { - throw new Error("Tensor data type '" + dataType + "' is not implemented."); - } - if (size !== this._values.length) { - throw new Error('Invalid tensor data length.'); - } - break; - } case 'sparse': { const indices = new view.Tensor(this._indices).value; const values = new view.Tensor(this._values).value; context.data = this._decodeSparse(dataType, context.dimensions, indices, values); - context.layout = '|'; + context.encoding = '|'; break; } case 'sparse.coo': { @@ -3423,11 +3395,47 @@ view.Tensor = class { } } context.data = this._decodeSparse(dataType, context.dimensions, indices, values); - context.layout = '|'; + context.encoding = '|'; break; } default: { - throw new view.Tensor("Unsupported tensor layout '" + this._layout + "'."); + switch (this._encoding) { + case '<': + case '>': { + context.data = (this._data instanceof Uint8Array || this._data instanceof Int8Array) ? this._data : this._data.peek(); + context.view = new DataView(context.data.buffer, context.data.byteOffset, context.data.byteLength); + if (view.Tensor.dataTypes.has(dataType)) { + context.itemsize = view.Tensor.dataTypes.get(dataType); + if (context.data.length < (context.itemsize * size)) { + throw new Error('Invalid tensor data size.'); + } + } else if (dataType.startsWith('uint') && !isNaN(parseInt(dataType.substring(4), 10))) { + context.dataType = 'uint'; + context.bits = parseInt(dataType.substring(4), 10); + context.itemsize = 1; + } else if (dataType.startsWith('int') && !isNaN(parseInt(dataType.substring(3), 10))) { + context.dataType = 'int'; + context.bits = parseInt(dataType.substring(3), 10); + context.itemsize = 1; + } else { + throw new Error("Tensor data type '" + dataType + "' is not implemented."); + } + break; + } + case '|': { + context.data = this._values; + if (!view.Tensor.dataTypes.has(dataType) && dataType !== 'string' && dataType !== 'object') { + throw new Error("Tensor data type '" + dataType + "' is not implemented."); + } + if (size !== this._values.length) { + throw new Error('Invalid tensor data length.'); + } + break; + } + default: { + throw new view.Tensor("Unsupported tensor encoding '" + this._encoding + "'."); + } + } } } context.index = 0; diff --git a/source/xmodel.js b/source/xmodel.js index af13740bf7c..342d01d5a20 100644 --- a/source/xmodel.js +++ b/source/xmodel.js @@ -244,7 +244,7 @@ xmodel.Tensor = class { if (node.op_attr && node.op_attr.data) { const data = node.op_attr.data; if (data.bytes_value && data.bytes_value.value) { - this.layout = '<'; + this.encoding = '<'; this.values = data.bytes_value.value; } } diff --git a/test/models.js b/test/models.js index ecd11e42053..5c3e14cbba8 100755 --- a/test/models.js +++ b/test/models.js @@ -550,9 +550,9 @@ class Target { } if (this.assert) { for (const assert of this.assert) { - const parts = assert.split('=').map((item) => item.trim()); + const parts = assert.split('==').map((item) => item.trim()); const properties = parts[0].split('.'); - const value = parts[1]; + const value = JSON.parse(parts[1].replace(/\s*'|'\s*/g, '"')); let context = { model: this.model }; while (properties.length) { const property = properties.shift(); @@ -571,7 +571,7 @@ class Target { } throw new Error("Invalid property path: '" + parts[0]); } - if (context !== value.toString()) { + if (context !== value) { throw new Error("Invalid '" + context.toString() + "' != '" + assert + "'."); } } @@ -592,7 +592,10 @@ class Target { if (value.initializer) { value.initializer.type.toString(); const tensor = new view.Tensor(value.initializer); - if (tensor.layout !== '<' && tensor.layout !== '>' && tensor.layout !== '|' && tensor.layout !== 'sparse' && tensor.layout !== 'sparse.coo') { + if (tensor.encoding !== '<' && tensor.encoding !== '>' && tensor.encoding !== '|') { + throw new Error("Tensor encoding '" + tensor.encoding + "' is not implemented."); + } + if (tensor.layout && (tensor.layout !== 'sparse' && tensor.layout !== 'sparse.coo')) { throw new Error("Tensor layout '" + tensor.layout + "' is not implemented."); } if (!tensor.empty) { diff --git a/test/models.json b/test/models.json index ef85ec4c5b4..01f1360168b 100644 --- a/test/models.json +++ b/test/models.json @@ -3112,7 +3112,7 @@ "target": "centerface.param,centerface.bin", "source": "https://raw.githubusercontent.com/MirrorYuChen/ncnn_example/798f64b7d5f0b883e05cb994258d43658b0661b6/models/centerface.param,https://raw.githubusercontent.com/MirrorYuChen/ncnn_example/798f64b7d5f0b883e05cb994258d43658b0661b6/models/centerface.bin", "format": "ncnn", - "assert": [ "model.graphs[0].nodes[0].type.name=Convolution" ], + "assert": [ "model.graphs[0].nodes[0].type.name == 'Convolution'" ], "link": "https://github.com/MirrorYuChen/ncnn_example" }, { @@ -3176,7 +3176,7 @@ "target": "MobileNetSSD_deploy.param.bin,MobileNetSSD_deploy.bin", "source": "https://raw.githubusercontent.com/chehongshu/ncnnforandroid_objectiondetection_Mobilenetssd/master/MobileNetSSD_demo/app/src/main/assets/MobileNetSSD_deploy.param.bin,https://raw.githubusercontent.com/chehongshu/ncnnforandroid_objectiondetection_Mobilenetssd/master/MobileNetSSD_demo/app/src/main/assets/MobileNetSSD_deploy.bin", "format": "ncnn", - "assert": [ "model.graphs[0].nodes[1].type.name=Convolution" ], + "assert": [ "model.graphs[0].nodes[1].type.name == 'Convolution'" ], "link": "https://github.com/chehongshu/ncnnforandroid_objectiondetection_Mobilenetssd" }, { @@ -3518,7 +3518,7 @@ "target": "denotation_Add_ImageNet1920WithImageMetadataBgr8_SRGB_0_255.onnx", "source": "https://github.com/lutzroeder/netron/files/2587943/onnx_denotation_models.zip[denotation_Add_ImageNet1920WithImageMetadataBgr8_SRGB_0_255.onnx]", "format": "ONNX v3", - "assert": [ "model.graphs[0].nodes[0].outputs[0].value[0].type.denotation = Image(Bgr8,SRGB,NominalRange_0_255)" ], + "assert": [ "model.graphs[0].nodes[0].outputs[0].value[0].type.denotation == 'Image(Bgr8,SRGB,NominalRange_0_255)'" ], "link": "https://github.com/lutzroeder/netron/issues/183" }, { @@ -3880,7 +3880,7 @@ "target": "sparse_initializer_as_output.json", "source": "https://github.com/lutzroeder/netron/files/12444489/sparse_initializer_as_output.json.zip[sparse_initializer_as_output.json]", "format": "ONNX JSON v7", - "assert": [ "model.graphs[0].outputs[0].value[0].type.layout=sparse" ], + "assert": [ "model.graphs[0].outputs[0].value[0].type.layout == 'sparse'" ], "link": "https://github.com/lutzroeder/netron/issues/741" }, { @@ -3895,7 +3895,7 @@ "target": "sparse_to_dense_matmul.onnx", "source": "https://github.com/lutzroeder/netron/files/12444490/sparse_to_dense_matmul.onnx.zip[sparse_to_dense_matmul.onnx]", "format": "ONNX v7", - "assert": [ "model.graphs[0].nodes[0].inputs[0].value[0].type.layout=sparse" ], + "assert": [ "model.graphs[0].nodes[0].inputs[0].value[0].type.layout == 'sparse'" ], "link": "https://github.com/lutzroeder/netron/issues/741" }, { @@ -5194,6 +5194,7 @@ "target": "sparse_coo.pth", "source": "https://github.com/lutzroeder/netron/files/9541426/sparse_coo.pth.zip[sparse_coo.pth]", "format": "PyTorch v1.6", + "assert": [ "model.graphs[0].nodes[0].inputs[0].value[0].type.layout == 'sparse.coo'" ], "link": "https://github.com/lutzroeder/netron/issues/720" }, { @@ -5972,7 +5973,7 @@ "source": "https://github.com/lutzroeder/netron/files/5613448/events.out.tfevents.1606692323.b5c8f88cc7ee.58.2.zip[events.out.tfevents.1606692323.b5c8f88cc7ee.58.2]", "format": "TensorFlow Event File v2", "producer": "PyTorch", - "assert": [ "model.graphs[0].nodes[0].type.name=aten::_convolution" ], + "assert": [ "model.graphs[0].nodes[0].type.name == 'aten::_convolution'" ], "link": "https://github.com/lutzroeder/netron/issues/638" }, { @@ -6452,7 +6453,7 @@ "target": "densenet.tflite", "source": "https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/densenet_2018_04_27.tgz[densenet/densenet.tflite]", "format": "TensorFlow Lite v3", - "assert": [ "model.graphs[0].nodes[0].type.name=Conv2D" ], + "assert": [ "model.graphs[0].nodes[0].type.name == 'Conv2D'" ], "link": "https://www.tensorflow.org/lite/guide/hosted_models" }, { From 6ed267f4049d10a2db524e28b745a5b43b04d123 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Sat, 26 Aug 2023 09:31:50 -0700 Subject: [PATCH 17/36] Add NNC test file (#1144) --- source/nnc.js | 32 ++++++++++++++++++++++++++++++++ source/view.js | 4 ++-- test/models.json | 7 +++++++ 3 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 source/nnc.js diff --git a/source/nnc.js b/source/nnc.js new file mode 100644 index 00000000000..da37e7ead24 --- /dev/null +++ b/source/nnc.js @@ -0,0 +1,32 @@ + +var nnc = {}; + +nnc.ModelFactory = class { + + match(context) { + const stream = context.stream; + const signature = [ 0xC0, 0x0F, 0x00, 0x00, 0x45, 0x4E, 0x4E, 0x43 ]; + if (stream && signature.length <= stream.length && stream.peek(signature.length).every((value, index) => value === signature[index])) { + return 'nnc'; + } + return ''; + } + + async open(/* context, target */) { + throw new nnc.Error('File contains undocumented NNC data.'); + } +}; + +nnc.Error = class extends Error { + + constructor(message) { + super(message); + this.name = 'Error loading NNC model.'; + } +}; + +if (typeof module !== 'undefined' && typeof module.exports === 'object') { + module.exports.ModelFactory = nnc.ModelFactory; +} + + diff --git a/source/view.js b/source/view.js index ca504d0d650..93b32a5c836 100644 --- a/source/view.js +++ b/source/view.js @@ -5094,6 +5094,7 @@ view.ModelFactoryService = class { this.register('./mlir', [ '.mlir']); this.register('./sentencepiece', [ '.model' ]); this.register('./hailo', [ '.hn', '.har' ]); + this.register('./nnc', [ '.nnc' ]); this.register('./safetensors', [ '.safetensors' ]); } @@ -5286,8 +5287,7 @@ view.ModelFactoryService = class { const file_identifier = tags.get('file_identifier'); const formats = [ { name: 'onnxruntime.experimental.fbs.InferenceSession data', identifier: 'ORTM' }, - { name: 'tflite.Model data', identifier: 'TFL3' }, - { name: 'FlatBuffers ENNC data', identifier: 'ENNC' }, + { name: 'tflite.Model data', identifier: 'TFL3' } ]; for (const format of formats) { if (file_identifier === format.identifier) { diff --git a/test/models.json b/test/models.json index 01f1360168b..0ebb12aa476 100644 --- a/test/models.json +++ b/test/models.json @@ -3320,6 +3320,13 @@ "error": "NNEF v1.0 support not implemented.", "link": "https://github.com/lutzroeder/netron/issues/992" }, + { + "type": "nnc", + "target": "sm_uint8_fence.nnc", + "source": "https://github.com/lutzroeder/netron/files/12446059/sm_uint8_fence.nnc.zip[sm_uint8_fence.nnc]", + "error": "File contains undocumented NNC data.", + "link": "https://github.com/lutzroeder/netron/issues/1144" + }, { "type": "numpy", "target": "cifar10-1w-1a.npz", From 836c6da1dd340efb0161445283d58beafa2a0614 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Sat, 26 Aug 2023 15:11:09 -0700 Subject: [PATCH 18/36] Update pytorch.js (#1142) --- source/pytorch.js | 16 +++++++++++++++- test/models.json | 2 +- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/source/pytorch.js b/source/pytorch.js index 3d4d1cd9733..68981f906c1 100644 --- a/source/pytorch.js +++ b/source/pytorch.js @@ -2682,6 +2682,20 @@ pytorch.jit.Execution = class extends pytorch.Execution { tensor.resize_(Array.isArray(tensor.shape) && tensor.shape.length > size ? tensor.shape.slice(-size) : Array(size).fill(NaN)); } } + // if torch.gt(torch.dim(x), 1): + // xxxx + // ops.prim.RaiseException(...) + if (statement.type === 'if' && + pytorch.Utility.isCall(statement.condition, 'torch.gt', 2) && + pytorch.Utility.isCall(statement.condition.args[0], 'torch.dim', 1) && + statement.then.statements.length > 0 && + pytorch.Utility.isCall(statement.then.statements.slice(-1).pop(), 'ops.prim.RaiseException')) { + const tensor = this.expression(statement.condition.args[0].args[0], context); + const size = this.expression(statement.condition.args[1], context); + if (pytorch.Utility.isTensor(tensor) && Number.isInteger(size) && size < 10) { + tensor.resize_(Array.isArray(tensor.shape) && tensor.shape.length > size ? tensor.shape.slice(-size) : Array(size).fill(NaN)); + } + } // if bool(...): // ops.prim.RaiseException(torch.format(_1, dtype)) // else: @@ -3365,7 +3379,7 @@ pytorch.Utility = class { static isCall(expression, name, size) { if (expression.type === 'call' && - expression.args.length === size && + (size === undefined || size === expression.args.length) && pytorch.Utility.target(expression.target) === name) { return true; } diff --git a/test/models.json b/test/models.json index 0ebb12aa476..28c086697a3 100644 --- a/test/models.json +++ b/test/models.json @@ -5132,7 +5132,7 @@ "type": "pytorch", "target": "silero_vad.jit", "source": "https://github.com/lutzroeder/netron/files/12439374/silero_vad.zip[silero_vad.jit]", - "error": "Too many dimensions for input audio chunk ?", + "error": "Supported sampling rates: [8000, 16000] (or multiply of 16000)", "link": "https://github.com/lutzroeder/netron/issues/1142" }, { From e7e15f6ebb8980850e5132d2fc46eedbb23d1436 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Sun, 27 Aug 2023 10:48:40 -0700 Subject: [PATCH 19/36] Update python.js --- source/python.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/source/python.js b/source/python.js index ac5f05046e0..b3acc718b01 100644 --- a/source/python.js +++ b/source/python.js @@ -2178,9 +2178,12 @@ python.Execution = class { }); this.registerFunction('megengine.functional.nn.conv2d', function() {}); this.registerFunction('megengine.functional.nn.relu', function() {}); + this.registerFunction('megengine.functional.nn.sigmoid', function() {}); this.registerFunction('megengine.module.qat.module.QATModule._apply_fakequant_with_observer', function() {}); this.registerFunction('megengine.functional.tensor.concat', function() {}); this.registerFunction('megengine.functional.tensor.flatten', function() {}); + this.registerFunction('megengine.functional.tensor.split', function() {}); + this.registerFunction('megengine.functional.tensor.reshape', function() {}); this.registerType('megengine.core._imperative_rt.common.CompNode', class {}); this.registerType('megengine.core._imperative_rt.ops.FakeQuant', class {}); this.registerType('megengine.core._imperative_rt.ops.GetVarShape', class {}); From cfd58ca607a42d807bbca8117813a1503dcda988 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Sun, 27 Aug 2023 16:33:14 -0700 Subject: [PATCH 20/36] Update index.html --- source/index.html | 6 +++--- source/view.js | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/index.html b/source/index.html index 0231db2a74f..40b3265c870 100644 --- a/source/index.html +++ b/source/index.html @@ -92,9 +92,9 @@ .titlebar-button { display: flex; justify-content: center; align-items: center; width: 46px; height: 32px; user-select: none; -webkit-app-region: no-drag; } .titlebar-button:hover { color: #000000; background-color: rgba(0, 0, 0, 0.15); } .titlebar-button-close:hover { color: #ffffff; background-color: #b43029; } -.menu-button { display: flex; justify-content: center; align-items: center; color: #aaaaaa; font-size: 16px; height: 32px; width: 32px; position: fixed; top: 0; left: 0; right: 0; bottom: 0; z-index: 2; -webkit-app-region: no-drag; -webkit-app-region: no-drag; user-select: none; } +.menu-button { display: flex; justify-content: center; align-items: center; color: #aaaaaa; font-size: 20px; height: 32px; width: 32px; position: fixed; top: 0; left: 0; right: 0; bottom: 0; z-index: 2; -webkit-app-region: no-drag; -webkit-app-region: no-drag; user-select: none; } .menu-button:hover { color: #000000; } -.menu { display: block; position: absolute; left: -16em; width: 16em; top: 0; height: 100%; z-index: 2; background-color: #ececec; border-right: 1px solid rgba(255, 255, 255, 0.5); padding-top: 40px; padding-bottom: 2px; margin-left: 0; margin-top: 0; overflow: hidden; transition: 0.1s; } +.menu { display: block; position: absolute; left: -17em; width: 17em; top: 0; height: 100%; z-index: 2; background-color: #ececec; border-right: 1px solid rgba(255, 255, 255, 0.5); padding-top: 40px; padding-bottom: 2px; margin-left: 0; margin-top: 0; overflow: hidden; transition: 0.1s; } .menu .menu-group { margin-bottom: 12px; } .menu .menu-group .menu-group-header { display: block; border: none; border-radius: 0; color: black; width: 100%; text-align: left; margin: 4px 12px 5px 12px; white-space: no-wrap; font-size: 11px; font-weight: bold; color: #bbbbbb; white-space: nowrap; } .menu .menu-group .menu-command { display: block; border: none; border-radius: 0; background-color: transparent; color: black; width: 100%; text-align: left; padding: 4px 12px 5px 12px; font-size: 12px; } @@ -496,6 +496,6 @@

- + \ No newline at end of file diff --git a/source/view.js b/source/view.js index 93b32a5c836..0bd700b9af0 100644 --- a/source/view.js +++ b/source/view.js @@ -1226,7 +1226,7 @@ view.Menu = class { this.unregister(this._pop); this.unregister(this._push); this._element.style.opacity = 0; - this._element.style.left = '-16em'; + this._element.style.left = '-17em'; const button = this._element.ownerDocument.activeElement; if (this._buttons.indexOf(button) > 0) { button.blur(); From 3ceee636e7d393caedc573e3f61ff68743f557a4 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Sun, 27 Aug 2023 20:52:46 -0700 Subject: [PATCH 21/36] Update Keras test files --- test/models.json | 70 ++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/test/models.json b/test/models.json index 28c086697a3..92624485dfe 100644 --- a/test/models.json +++ b/test/models.json @@ -2135,6 +2135,41 @@ "format": "Keras v2.4.0", "link": "https://github.com/lutzroeder/netron/issues/57" }, + { + "type": "keras", + "target": "issue326.json", + "source": "https://github.com/lutzroeder/netron/files/12449721/issue326.json.zip[issue326.json]", + "format": "Keras v2.2.4-tf", + "link": "https://github.com/lutzroeder/netron/issues/326" + }, + { + "type": "keras", + "target": "issue428.h5", + "source": "https://github.com/lutzroeder/netron/files/12449731/issue428.h5.zip[issue428.h5]", + "format": "Keras v2.2.4-tf", + "link": "https://github.com/lutzroeder/netron/issues/428" + }, + { + "type": "keras", + "target": "issue435.h5", + "source": "https://github.com/lutzroeder/netron/files/12449734/issue435.h5.zip[issue435.h5]", + "format": "Keras v2.2.4", + "link": "https://github.com/lutzroeder/netron/issues/435" + }, + { + "type": "keras", + "target": "issue553.h5", + "source": "https://github.com/lutzroeder/netron/files/12449758/issue553.h5.zip[issue553.h5]", + "format": "Keras v2.2.4-tf", + "link": "https://github.com/lutzroeder/netron/issues/553" + }, + { + "type": "keras", + "target": "issue855.h5", + "source": "https://github.com/lutzroeder/netron/files/12449745/issue855.h5.zip[issue855.h5]", + "format": "Keras v2.7.0", + "link": "https://github.com/lutzroeder/netron/issues/855" + }, { "type": "keras", "target": "issue1142.1.keras", @@ -2267,41 +2302,6 @@ "format": "Keras Weights v2.3.1", "link": "https://github.com/lutzroeder/netron/issues/428" }, - { - "type": "keras", - "target": "netron_issue_326.json", - "source": "https://github.com/lutzroeder/netron/files/3563087/netron_issue_326.zip[netron_issue_326.json]", - "format": "Keras v2.2.4-tf", - "link": "https://github.com/lutzroeder/netron/issues/326" - }, - { - "type": "keras", - "target": "netron_issue_428.h5", - "source": "https://github.com/lutzroeder/netron/files/4221535/netron_issue_428.zip[netron_issue_428.h5]", - "format": "Keras v2.2.4-tf", - "link": "https://github.com/lutzroeder/netron/issues/428" - }, - { - "type": "keras", - "target": "netron_issue_435.h5", - "source": "https://github.com/lutzroeder/netron/files/4269953/netron_issue_435.zip[netron_issue_435.h5]", - "format": "Keras v2.2.4", - "link": "https://github.com/lutzroeder/netron/issues/435" - }, - { - "type": "keras", - "target": "netron_issue_553.h5", - "source": "https://github.com/lutzroeder/netron/files/4979486/netron_issue_553.zip[netron_issue_553.h5]", - "format": "Keras v2.2.4-tf", - "link": "https://github.com/lutzroeder/netron/issues/553" - }, - { - "type": "keras", - "target": "netron_issue_855.h5", - "source": "https://github.com/lutzroeder/netron/files/7729736/netron_issue_855.h5.zip[netron_issue_855.h5]", - "format": "Keras v2.7.0", - "link": "https://github.com/lutzroeder/netron/issues/855" - }, { "type": "keras", "target": "nietzsche.h5", From 9164c153dd004d48c6c1d413175d57eb52e01c01 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Mon, 28 Aug 2023 18:59:48 -0700 Subject: [PATCH 22/36] Update tflite-schema.js --- source/tflite-metadata.json | 12 ++++++++++++ source/tflite-schema.js | 32 +++++++++++++++++++++++++++++++- 2 files changed, 43 insertions(+), 1 deletion(-) diff --git a/source/tflite-metadata.json b/source/tflite-metadata.json index 226cde29812..94014e8a94d 100644 --- a/source/tflite-metadata.json +++ b/source/tflite-metadata.json @@ -879,6 +879,18 @@ { "name": "body_subgraph_index", "type": "int32", "default": 0 } ] }, + { + "name": "StablehloScatter", + "attributes": [ + { "name": "indices_are_sorted", "type": "boolean", "default": false }, + { "name": "update_window_dims", "type": "int64[]", "default": 0 }, + { "name": "inserted_window_dims", "type": "int64[]", "default": 0 }, + { "name": "scatter_dims_to_operand_dims", "type": "int64[]", "default": 0 }, + { "name": "index_vector_dim", "type": "int64", "default": 0 }, + { "name": "unique_indices", "type": "boolean", "default": false }, + { "name": "update_computation_subgraph_index", "type": "int32", "default": 0 } + ] + }, { "name": "StablehloSlice", "attributes": [ diff --git a/source/tflite-schema.js b/source/tflite-schema.js index b2d98ff106a..a55cea54869 100644 --- a/source/tflite-schema.js +++ b/source/tflite-schema.js @@ -433,7 +433,8 @@ $root.tflite.BuiltinOperator = { STABLEHLO_RSQRT: 186, STABLEHLO_SELECT: 187, STABLEHLO_SUBTRACT: 188, - STABLEHLO_TANH: 189 + STABLEHLO_TANH: 189, + STABLEHLO_SCATTER: 190 }; $root.tflite.BuiltinOptions = class { @@ -713,6 +714,7 @@ $root.tflite.BuiltinOptions2 = class { case 4: return $root.tflite.StablehloConvolutionOptions.decode(reader, position); case 5: return $root.tflite.StablehloCustomCallOptions.decode(reader, position); case 6: return $root.tflite.StablehloReduceOptions.decode(reader, position); + case 7: return $root.tflite.StablehloScatterOptions.decode(reader, position); default: return undefined; } } @@ -725,6 +727,7 @@ $root.tflite.BuiltinOptions2 = class { case 'StablehloConvolutionOptions': return $root.tflite.StablehloConvolutionOptions.decodeText(reader, json); case 'StablehloCustomCallOptions': return $root.tflite.StablehloCustomCallOptions.decodeText(reader, json); case 'StablehloReduceOptions': return $root.tflite.StablehloReduceOptions.decodeText(reader, json); + case 'StablehloScatterOptions': return $root.tflite.StablehloScatterOptions.decodeText(reader, json); default: return undefined; } } @@ -874,6 +877,33 @@ $root.tflite.StablehloConvolutionOptions = class StablehloConvolutionOptions { } }; +$root.tflite.StablehloScatterOptions = class StablehloScatterOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloScatterOptions(); + $.indices_are_sorted = reader.bool_(position, 4, false); + $.update_window_dims = reader.int64s_(position, 6); + $.inserted_window_dims = reader.int64s_(position, 8); + $.scatter_dims_to_operand_dims = reader.int64s_(position, 10); + $.index_vector_dim = reader.int64_(position, 12, 0); + $.unique_indices = reader.bool_(position, 14, false); + $.update_computation_subgraph_index = reader.int32_(position, 16, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloScatterOptions(); + $.indices_are_sorted = reader.value(json.indices_are_sorted, false); + $.update_window_dims = reader.array(json.update_window_dims); + $.inserted_window_dims = reader.array(json.inserted_window_dims); + $.scatter_dims_to_operand_dims = reader.array(json.scatter_dims_to_operand_dims); + $.index_vector_dim = reader.value(json.index_vector_dim, 0); + $.unique_indices = reader.value(json.unique_indices, false); + $.update_computation_subgraph_index = reader.value(json.update_computation_subgraph_index, 0); + return $; + } +}; + $root.tflite.Padding = { SAME: 0, VALID: 1 From c553af9c277b7a7df5ffd575b6a6d7c364317813 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Mon, 28 Aug 2023 19:14:58 -0700 Subject: [PATCH 23/36] Update tf-proto.js --- source/tf-proto.js | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/source/tf-proto.js b/source/tf-proto.js index 37dfcc85f27..019ad095695 100644 --- a/source/tf-proto.js +++ b/source/tf-proto.js @@ -6372,6 +6372,9 @@ $root.tensorflow.GPUOptions.Experimental = class Experimental { case 1: message.virtual_devices.push($root.tensorflow.GPUOptions.Experimental.VirtualDevices.decode(reader, reader.uint32())); break; + case 15: + message.num_virtual_devices_per_gpu = reader.int32(); + break; case 2: message.use_unified_memory = reader.bool(); break; @@ -6425,6 +6428,9 @@ $root.tensorflow.GPUOptions.Experimental = class Experimental { case "virtual_devices": message.virtual_devices.push($root.tensorflow.GPUOptions.Experimental.VirtualDevices.decodeText(reader)); break; + case "num_virtual_devices_per_gpu": + message.num_virtual_devices_per_gpu = reader.int32(); + break; case "use_unified_memory": message.use_unified_memory = reader.bool(); break; @@ -6470,6 +6476,7 @@ $root.tensorflow.GPUOptions.Experimental = class Experimental { } }; +$root.tensorflow.GPUOptions.Experimental.prototype.num_virtual_devices_per_gpu = 0; $root.tensorflow.GPUOptions.Experimental.prototype.use_unified_memory = false; $root.tensorflow.GPUOptions.Experimental.prototype.num_dev_to_dev_copy_streams = 0; $root.tensorflow.GPUOptions.Experimental.prototype.collective_ring_order = ""; @@ -9636,6 +9643,9 @@ $root.tensorflow.CoordinationServiceConfig = class CoordinationServiceConfig { case 11: message.allow_new_incarnation_to_reconnect = reader.bool(); break; + case 12: + message.force_disable = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -9680,6 +9690,9 @@ $root.tensorflow.CoordinationServiceConfig = class CoordinationServiceConfig { case "allow_new_incarnation_to_reconnect": message.allow_new_incarnation_to_reconnect = reader.bool(); break; + case "force_disable": + message.force_disable = reader.bool(); + break; default: reader.field(tag, message); break; @@ -9697,6 +9710,7 @@ $root.tensorflow.CoordinationServiceConfig.prototype.heartbeat_timeout_in_ms = p $root.tensorflow.CoordinationServiceConfig.prototype.shutdown_barrier_timeout_in_ms = protobuf.Int64.create(0); $root.tensorflow.CoordinationServiceConfig.prototype.agent_destruction_without_shutdown = false; $root.tensorflow.CoordinationServiceConfig.prototype.allow_new_incarnation_to_reconnect = false; +$root.tensorflow.CoordinationServiceConfig.prototype.force_disable = false; $root.tensorflow.MemmappedFileSystemDirectoryElement = class MemmappedFileSystemDirectoryElement { From 0e71c3702d8a7b3ff9d6f90d9b62c806e9016b2d Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Mon, 28 Aug 2023 19:48:29 -0700 Subject: [PATCH 24/36] Update coreml.js (#1145) --- source/coreml.js | 4 ++-- test/models.json | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/source/coreml.js b/source/coreml.js index d1a3150b2ff..37af5d71185 100644 --- a/source/coreml.js +++ b/source/coreml.js @@ -687,7 +687,7 @@ coreml.Graph = class { } } }; - const loadModel = (model, values, group, weights) => { + const loadModel = (model, values, group) => { this._groups = this._groups | (group.length > 0 ? true : false); const description = model && model.description && model.description.metadata && model.description.metadata.shortDescription ? model.description.metadata.shortDescription : ''; switch (model.Type) { @@ -1000,7 +1000,7 @@ coreml.Graph = class { return new coreml.Argument(input.name, true, [ value ]); }); } - this._type = loadModel(model, values, '', weights); + this._type = loadModel(model, values, ''); if (this._description) { this._outputs = this._description.output.map((output) => { const value = values.input(output.name); diff --git a/test/models.json b/test/models.json index 92624485dfe..92881651bbd 100644 --- a/test/models.json +++ b/test/models.json @@ -1622,6 +1622,13 @@ "format": "Core ML v3", "link": "https://developer.apple.com/machine-learning/models" }, + { + "type": "coreml", + "target": "yolov8n.mlpackage,yolov8n.mlpackage/Manifest.json,yolov8n.mlpackage/Data/com.apple.CoreML/model.mlmodel,yolov8n.mlpackage/Data/com.apple.CoreML/weights/weight.bin", + "source": "https://github.com/lutzroeder/netron/files/12459452/yolov8n.mlpackage.zip[.,yolov8n.mlpackage/Manifest.json,yolov8n.mlpackage/Data/com.apple.CoreML/model.mlmodel,yolov8n.mlpackage/Data/com.apple.CoreML/weights/weight.bin]", + "format": "Core ML Package v5", + "link": "https://github.com/lutzroeder/netron/issues/1145" + }, { "type": "darknet", "target": "alexnet.cfg", From f8c77a3e470b75400af8b6fef2443163015b00f3 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Mon, 28 Aug 2023 19:48:37 -0700 Subject: [PATCH 25/36] Update onnx-metadata.json --- source/onnx-metadata.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/onnx-metadata.json b/source/onnx-metadata.json index c64bacf28b3..022423a4191 100644 --- a/source/onnx-metadata.json +++ b/source/onnx-metadata.json @@ -9151,7 +9151,7 @@ "module": "ai.onnx", "version": 12, "support_level": "common", - "description": "An einsum of the form `term1, term2 -> output-term` produces an output tensor using the following equation\n\n```\noutput[output-term] = reduce-sum( input1[term1] * input2[term] )\n```\n\nwhere the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2)\nthat do not occur in the output-term.\n\nThe Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation\nconvention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to\nan operand tensor, and the characters within the terms correspond to operands dimensions.\n\nThis sequence may be followed by \"->\" to separate the left and right hand side of the equation.\nIf the equation contains \"->\" followed by the right-hand side, the explicit (not classical) form of the Einstein\nsummation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases,\noutput indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the\nequation.\n\nWhen a dimension character is repeated in the left-hand side, it represents summation along the dimension.\n\nThe equation may contain ellipsis (\"...\") to enable broadcasting. Ellipsis must indicate a fixed number of dimensions.\nSpecifically, every occurrence of ellipsis in the equation must represent the same number of dimensions.\nThe right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the\nbeginning of the output. The equation string may contain space (U+0020) character.\n", + "description": "An einsum of the form `term1, term2 -> output-term` produces an output tensor using the following equation\n\n```\noutput[output-term] = reduce-sum( input1[term1] * input2[term2] )\n```\n\nwhere the reduce-sum performs a summation over all the indices occurring in the input terms (term1, term2)\nthat do not occur in the output-term.\n\nThe Einsum operator evaluates algebraic tensor operations on a sequence of tensors, using the Einstein summation\nconvention. The equation string contains a comma-separated sequence of lower case letters. Each term corresponds to\nan operand tensor, and the characters within the terms correspond to operands dimensions.\n\nThis sequence may be followed by \"->\" to separate the left and right hand side of the equation.\nIf the equation contains \"->\" followed by the right-hand side, the explicit (not classical) form of the Einstein\nsummation is performed, and the right-hand side indices indicate output tensor dimensions. In other cases,\noutput indices are (implicitly) set to the alphabetically sorted sequence of indices appearing exactly once in the\nequation.\n\nWhen a dimension character is repeated in the left-hand side, it represents summation along the dimension.\n\nThe equation may contain ellipsis (\"...\") to enable broadcasting. Ellipsis must indicate a fixed number of dimensions.\nSpecifically, every occurrence of ellipsis in the equation must represent the same number of dimensions.\nThe right-hand side may contain exactly one ellipsis. In implicit mode, the ellipsis dimensions are set to the\nbeginning of the output. The equation string may contain space (U+0020) character.\n", "attributes": [ { "name": "equation", From 43b10b1f019ef6b82eb6e754b1e3d40e50216813 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Tue, 29 Aug 2023 18:03:24 -0700 Subject: [PATCH 26/36] Update tflite-schema.js --- source/tflite-metadata.json | 63 ++++++++++++ source/tflite-schema.js | 195 +++++++++++++++++++++++++++++++++++- 2 files changed, 257 insertions(+), 1 deletion(-) diff --git a/source/tflite-metadata.json b/source/tflite-metadata.json index 94014e8a94d..28e1db60845 100644 --- a/source/tflite-metadata.json +++ b/source/tflite-metadata.json @@ -833,6 +833,13 @@ { "name": "broadcast_dimensions", "type": "int64[]", "default": 0 } ] }, + { + "name": "StablehloCompare", + "attributes": [ + { "name": "comparison_direction", "type": "StablehloComparisonDirection", "default": "STABLEHLO_COMPARISON_DIRECTION_EQ" }, + { "name": "compare_type", "type": "StablehloComparisonType", "default": "STABLEHLO_COMPARISON_TYPE_NOTYPE" } + ] + }, { "name": "StablehloConcatenate", "attributes": [ @@ -872,6 +879,36 @@ { "name": "custom_attributes", "type": "uint8[]", "default": 0 } ] }, + { + "name": "StablehloDotGeneral", + "attributes": [ + { "name": "lhs_batching_dimensions", "type": "int64[]", "default": 0 }, + { "name": "rhs_batching_dimensions", "type": "int64[]", "default": 0 }, + { "name": "lhs_contracting_dimensions", "type": "int64[]", "default": 0 }, + { "name": "rhs_contracting_dimensions", "type": "int64[]", "default": 0 }, + { "name": "precision_config", "type": "StablehloPrecisionConfig[]", "default": "DEFAULT" } + ] + }, + { + "name": "StablehloDynamicSlice", + "attributes": [ + { "name": "slice_sizes", "type": "int64[]", "default": 0 } + ] + }, + { + "name": "StablehloIota", + "attributes": [ + { "name": "iota_dimension", "type": "int64", "default": 0 } + ] + }, + { + "name": "StablehloPad", + "attributes": [ + { "name": "edge_padding_low", "type": "int64[]", "default": 0 }, + { "name": "edge_padding_high", "type": "int64[]", "default": 0 }, + { "name": "interior_padding", "type": "int64[]", "default": 0 } + ] + }, { "name": "StablehloReduce", "attributes": [ @@ -879,6 +916,17 @@ { "name": "body_subgraph_index", "type": "int32", "default": 0 } ] }, + { + "name": "StablehloReduceWindow", + "attributes": [ + { "name": "window_dimensions", "type": "int64[]", "default": 0 }, + { "name": "window_strides", "type": "int64[]", "default": 0 }, + { "name": "base_dilations", "type": "int64[]", "default": 0 }, + { "name": "window_dilations", "type": "int64[]", "default": 0 }, + { "name": "padding", "type": "int64[]", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + }, { "name": "StablehloScatter", "attributes": [ @@ -899,6 +947,21 @@ { "name": "strides", "type": "int64[]", "default": 0 } ] }, + { + "name": "StablehloSort", + "attributes": [ + { "name": "dimension", "type": "int64", "default": 0 }, + { "name": "is_stable", "type": "boolean", "default": false }, + { "name": "comparator_subgraph_index", "type": "int32", "default": 0 } + ] + }, + { + "name": "StablehloWhile", + "attributes": [ + { "name": "cond_subgraph_index", "type": "int32", "default": 0 }, + { "name": "body_subgraph_index", "type": "int32", "default": 0 } + ] + }, { "name": "StridedSlice", "category": "Tensor", diff --git a/source/tflite-schema.js b/source/tflite-schema.js index a55cea54869..4c061146186 100644 --- a/source/tflite-schema.js +++ b/source/tflite-schema.js @@ -434,7 +434,17 @@ $root.tflite.BuiltinOperator = { STABLEHLO_SELECT: 187, STABLEHLO_SUBTRACT: 188, STABLEHLO_TANH: 189, - STABLEHLO_SCATTER: 190 + STABLEHLO_SCATTER: 190, + STABLEHLO_COMPARE: 191, + STABLEHLO_CONVERT: 192, + STABLEHLO_DYNAMIC_SLICE: 193, + STABLEHLO_DYNAMIC_UPDATE_SLICE: 194, + STABLEHLO_PAD: 195, + STABLEHLO_IOTA: 196, + STABLEHLO_DOT_GENERAL: 197, + STABLEHLO_REDUCE_WINDOW: 198, + STABLEHLO_SORT: 199, + STABLEHLO_WHILE: 200 }; $root.tflite.BuiltinOptions = class { @@ -715,6 +725,14 @@ $root.tflite.BuiltinOptions2 = class { case 5: return $root.tflite.StablehloCustomCallOptions.decode(reader, position); case 6: return $root.tflite.StablehloReduceOptions.decode(reader, position); case 7: return $root.tflite.StablehloScatterOptions.decode(reader, position); + case 8: return $root.tflite.StablehloCompareOptions.decode(reader, position); + case 9: return $root.tflite.StablehloDynamicSliceOptions.decode(reader, position); + case 10: return $root.tflite.StablehloPadOptions.decode(reader, position); + case 11: return $root.tflite.StablehloIotaOptions.decode(reader, position); + case 12: return $root.tflite.StablehloDotGeneralOptions.decode(reader, position); + case 13: return $root.tflite.StablehloReduceWindowOptions.decode(reader, position); + case 14: return $root.tflite.StablehloSortOptions.decode(reader, position); + case 15: return $root.tflite.StablehloWhileOptions.decode(reader, position); default: return undefined; } } @@ -728,6 +746,14 @@ $root.tflite.BuiltinOptions2 = class { case 'StablehloCustomCallOptions': return $root.tflite.StablehloCustomCallOptions.decodeText(reader, json); case 'StablehloReduceOptions': return $root.tflite.StablehloReduceOptions.decodeText(reader, json); case 'StablehloScatterOptions': return $root.tflite.StablehloScatterOptions.decodeText(reader, json); + case 'StablehloCompareOptions': return $root.tflite.StablehloCompareOptions.decodeText(reader, json); + case 'StablehloDynamicSliceOptions': return $root.tflite.StablehloDynamicSliceOptions.decodeText(reader, json); + case 'StablehloPadOptions': return $root.tflite.StablehloPadOptions.decodeText(reader, json); + case 'StablehloIotaOptions': return $root.tflite.StablehloIotaOptions.decodeText(reader, json); + case 'StablehloDotGeneralOptions': return $root.tflite.StablehloDotGeneralOptions.decodeText(reader, json); + case 'StablehloReduceWindowOptions': return $root.tflite.StablehloReduceWindowOptions.decodeText(reader, json); + case 'StablehloSortOptions': return $root.tflite.StablehloSortOptions.decodeText(reader, json); + case 'StablehloWhileOptions': return $root.tflite.StablehloWhileOptions.decodeText(reader, json); default: return undefined; } } @@ -739,6 +765,90 @@ $root.tflite.StablehloPrecisionConfig = { HIGHEST: 2 }; +$root.tflite.StablehloDotGeneralOptions = class StablehloDotGeneralOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloDotGeneralOptions(); + $.lhs_batching_dimensions = reader.int64s_(position, 4); + $.rhs_batching_dimensions = reader.int64s_(position, 6); + $.lhs_contracting_dimensions = reader.int64s_(position, 8); + $.rhs_contracting_dimensions = reader.int64s_(position, 10); + $.precision_config = reader.typedArray(position, 12, Uint32Array); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloDotGeneralOptions(); + $.lhs_batching_dimensions = reader.array(json.lhs_batching_dimensions); + $.rhs_batching_dimensions = reader.array(json.rhs_batching_dimensions); + $.lhs_contracting_dimensions = reader.array(json.lhs_contracting_dimensions); + $.rhs_contracting_dimensions = reader.array(json.rhs_contracting_dimensions); + $.precision_config = reader.objectArray(json.precision_config, $root.tflite.StablehloPrecisionConfig.decodeText); + return $; + } +}; + +$root.tflite.StablehloReduceWindowOptions = class StablehloReduceWindowOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloReduceWindowOptions(); + $.window_dimensions = reader.int64s_(position, 4); + $.window_strides = reader.int64s_(position, 6); + $.base_dilations = reader.int64s_(position, 8); + $.window_dilations = reader.int64s_(position, 10); + $.padding = reader.int64s_(position, 12); + $.body_subgraph_index = reader.int32_(position, 14, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloReduceWindowOptions(); + $.window_dimensions = reader.array(json.window_dimensions); + $.window_strides = reader.array(json.window_strides); + $.base_dilations = reader.array(json.base_dilations); + $.window_dilations = reader.array(json.window_dilations); + $.padding = reader.array(json.padding); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + +$root.tflite.StablehloWhileOptions = class StablehloWhileOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloWhileOptions(); + $.cond_subgraph_index = reader.int32_(position, 4, 0); + $.body_subgraph_index = reader.int32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloWhileOptions(); + $.cond_subgraph_index = reader.value(json.cond_subgraph_index, 0); + $.body_subgraph_index = reader.value(json.body_subgraph_index, 0); + return $; + } +}; + +$root.tflite.StablehloSortOptions = class StablehloSortOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloSortOptions(); + $.dimension = reader.int64_(position, 4, 0); + $.is_stable = reader.bool_(position, 6, false); + $.comparator_subgraph_index = reader.int32_(position, 8, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloSortOptions(); + $.dimension = reader.value(json.dimension, 0); + $.is_stable = reader.value(json.is_stable, false); + $.comparator_subgraph_index = reader.value(json.comparator_subgraph_index, 0); + return $; + } +}; + $root.tflite.StablehloConcatenateOptions = class StablehloConcatenateOptions { static decode(reader, position) { @@ -769,6 +879,89 @@ $root.tflite.StablehloBroadcastInDimOptions = class StablehloBroadcastInDimOptio } }; +$root.tflite.StablehloComparisonDirection = { + STABLEHLO_COMPARISON_DIRECTION_EQ: 0, + STABLEHLO_COMPARISON_DIRECTION_NE: 1, + STABLEHLO_COMPARISON_DIRECTION_GE: 2, + STABLEHLO_COMPARISON_DIRECTION_GT: 3, + STABLEHLO_COMPARISON_DIRECTION_LE: 4, + STABLEHLO_COMPARISON_DIRECTION_LT: 5 +}; + +$root.tflite.StablehloComparisonType = { + STABLEHLO_COMPARISON_TYPE_NOTYPE: 0, + STABLEHLO_COMPARISON_TYPE_FLOAT: 1, + STABLEHLO_COMPARISON_TYPE_FLOAT_TOTAL_ORDER: 2, + STABLEHLO_COMPARISON_TYPE_SIGNED: 3, + STABLEHLO_COMPARISON_TYPE_UNSIGNED: 4 +}; + +$root.tflite.StablehloCompareOptions = class StablehloCompareOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloCompareOptions(); + $.comparison_direction = reader.uint32_(position, 4, 0); + $.compare_type = reader.uint32_(position, 6, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloCompareOptions(); + $.comparison_direction = $root.tflite.StablehloComparisonDirection[json.comparison_direction]; + $.compare_type = $root.tflite.StablehloComparisonType[json.compare_type]; + return $; + } +}; + +$root.tflite.StablehloDynamicSliceOptions = class StablehloDynamicSliceOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloDynamicSliceOptions(); + $.slice_sizes = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloDynamicSliceOptions(); + $.slice_sizes = reader.array(json.slice_sizes); + return $; + } +}; + +$root.tflite.StablehloPadOptions = class StablehloPadOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloPadOptions(); + $.edge_padding_low = reader.int64s_(position, 4); + $.edge_padding_high = reader.int64s_(position, 6); + $.interior_padding = reader.int64s_(position, 8); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloPadOptions(); + $.edge_padding_low = reader.array(json.edge_padding_low); + $.edge_padding_high = reader.array(json.edge_padding_high); + $.interior_padding = reader.array(json.interior_padding); + return $; + } +}; + +$root.tflite.StablehloIotaOptions = class StablehloIotaOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloIotaOptions(); + $.iota_dimension = reader.int64_(position, 4, 0); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloIotaOptions(); + $.iota_dimension = reader.value(json.iota_dimension, 0); + return $; + } +}; + $root.tflite.StablehloCustomCallOptions = class StablehloCustomCallOptions { static decode(reader, position) { From 1afb38502c6815121d06a32e45c759331ccc4534 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Tue, 29 Aug 2023 21:22:21 -0700 Subject: [PATCH 27/36] Update view.js (#1122) --- source/grapher.js | 11 ++++++++--- source/view.js | 8 +++++--- 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/source/grapher.js b/source/grapher.js index d2b9893ad8e..ffb4cec2c0e 100644 --- a/source/grapher.js +++ b/source/grapher.js @@ -253,12 +253,17 @@ grapher.Node = class { } select() { - this.element.classList.add('select'); - return [ this.element ]; + if (this.element) { + this.element.classList.add('select'); + return [ this.element ]; + } + return []; } deselect() { - this.element.classList.remove('select'); + if (this.element) { + this.element.classList.remove('select'); + } } static roundedRect(x, y, width, height, r1, r2, r3, r4) { diff --git a/source/view.js b/source/view.js index 0bd700b9af0..85b99529f31 100644 --- a/source/view.js +++ b/source/view.js @@ -1594,7 +1594,6 @@ view.Graph = class extends grapher.Graph { createNode(node) { const value = new view.Node(this, node); value.name = (this._nodeKey++).toString(); - this.setNode(value); this._table.set(node, value); return value; } @@ -1602,7 +1601,6 @@ view.Graph = class extends grapher.Graph { createInput(input) { const value = new view.Input(this, input); value.name = (this._nodeKey++).toString(); - this.setNode(value); this._table.set(input, value); return value; } @@ -1610,7 +1608,6 @@ view.Graph = class extends grapher.Graph { createOutput(output) { const value = new view.Output(this, output); value.name = (this._nodeKey++).toString(); - this.setNode(value); this._table.set(output, value); return value; } @@ -1647,12 +1644,14 @@ view.Graph = class extends grapher.Graph { } for (const input of graph.inputs) { const viewInput = this.createInput(input); + this.setNode(viewInput); for (const value of input.value) { this.createValue(value).from = viewInput; } } for (const node of graph.nodes) { const viewNode = this.createNode(node); + this.setNode(viewNode); const inputs = node.inputs; for (const input of inputs) { for (const value of input.value) { @@ -1720,6 +1719,7 @@ view.Graph = class extends grapher.Graph { } for (const output of graph.outputs) { const viewOutput = this.createOutput(output); + this.setNode(viewOutput); for (const value of output.value) { this.createValue(value).to.push(viewOutput); } @@ -1910,10 +1910,12 @@ view.Node = class extends grapher.Node { } if (Array.isArray(node.chain) && node.chain.length > 0) { for (const innerNode of node.chain) { + this.context.createNode(innerNode); this._add(innerNode); } } if (node.inner) { + this.context.createNode(node.inner); this._add(node.inner); } if (node.nodes) { From a1344cc32300adf047cf8c96643d7d66c34312b1 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Tue, 29 Aug 2023 21:22:58 -0700 Subject: [PATCH 28/36] Update keras.js (#1141) --- source/keras.js | 116 +++++++++++++++++++++++++----------------------- 1 file changed, 60 insertions(+), 56 deletions(-) diff --git a/source/keras.js b/source/keras.js index 97cc87615fc..ac1eaf377bc 100644 --- a/source/keras.js +++ b/source/keras.js @@ -336,23 +336,23 @@ keras.Graph = class { this._outputs = []; this._nodes = []; group = group || ''; - const args = new Map(); - const arg = (name, type, tensor) => { + const values = new Map(); + const value = (name, type, tensor) => { if (tensor) { return new keras.Value(name, type || null, tensor); } - if (!args.has(name)) { - args.set(name, new keras.Value(name, type || null, tensor || null)); + if (!values.has(name)) { + values.set(name, new keras.Value(name, type || null, tensor || null)); } else if (type || tensor) { throw new keras.Error("Duplicate value '" + name + "'."); } - return args.get(name); + return values.get(name); }; const loadNode = (layer, inputs, outputs, weights, group) => { layer = Object.assign({}, layer); layer.inputs = inputs; layer.outputs = outputs; - return new keras.Node(this._metadata, layer, group, weights, arg); + return new keras.Node(this._metadata, layer, group, weights, value); }; const getInputType = (layer) => { if (layer && layer.config) { @@ -386,32 +386,34 @@ keras.Graph = class { const outputs = null; const inputName = 'input'; let inputType = null; - let value = inputName; + let name = inputName; let index = 0; const layers = config.layers ? config.layers : config; for (const layer of layers) { - let name = index.toString(); - const nodeInputs = [ { name: value } ]; + let current = index.toString(); + const nodeInputs = [ { name: name } ]; if (index == 0) { inputType = getInputType(layer); - this._inputs.push(new keras.Argument(inputName, true, [ arg(inputName, inputType) ])); + const argument = new keras.Argument(inputName, true, [ value(inputName, inputType) ]); + this._inputs.push(argument); } index++; if (layer.config && layer.config.name) { - name = layer.config.name; + current = layer.config.name; } - value = name; - let nodeOutputs = [ value ]; + name = current; + let nodeOutputs = [ name ]; if (index == layers.length) { if (outputs && outputs.length > 0) { nodeOutputs = [ outputs[0] ]; - value = null; + name = null; } } this.nodes.push(loadNode(layer, nodeInputs, nodeOutputs, weights, group)); } - if (value) { - this._outputs.push(new keras.Argument(value, true, [ arg(value) ])); + if (name) { + const argument = new keras.Argument(name, true, [ value(name) ]); + this._outputs.push(argument); } break; } @@ -523,7 +525,7 @@ keras.Graph = class { type = getInputType(node); nodes.delete(name); } - const argument = new keras.Argument(name, true, [ arg(name, type) ]); + const argument = new keras.Argument(name, true, [ value(name, type) ]); this._inputs.push(argument); } } @@ -531,19 +533,19 @@ keras.Graph = class { if (output_layers) { for (let j = 0; j < output_layers.length; j++) { const output_layer = output_layers[j]; - let outputName = output_layer[0]; - const outputNode = nodes.get(outputName); + let name = output_layer[0]; + const outputNode = nodes.get(name); if (outputNode) { const outputIndex = output_layer[2]; if (outputIndex != 0) { - outputName += ':' + outputIndex.toString(); + name += ':' + outputIndex.toString(); } while (outputIndex >= outputNode.outputs.length) { outputNode.outputs.push(''); } - outputNode.outputs[outputIndex] = outputName; + outputNode.outputs[outputIndex] = name; } - const argument = new keras.Argument(outputName, true, [ arg(outputName) ]); + const argument = new keras.Argument(name, true, [ value(name) ]); this._outputs.push(argument); } } @@ -563,7 +565,7 @@ keras.Graph = class { for (const name of weights.keys()) { if (weights.get('', name).length <= 6) { const layer = { class_name: 'Weights', config: { name: name } }; - const node = new keras.Node(metadata, layer, '', weights, arg); + const node = new keras.Node(metadata, layer, '', weights, value); this._nodes.push(node); } } @@ -644,7 +646,7 @@ keras.Value = class { keras.Node = class { - constructor(metadata, layer, group, weights, arg) { + constructor(metadata, layer, group, weights, value) { const config = layer.config || {}; const args = layer.args || {}; let inputs = layer.inputs || []; @@ -671,18 +673,19 @@ keras.Node = class { delete config.input_layers; delete config.output_layers; } - this._inputs = [ new keras.Argument('inputs', true, inputs.map((input) => arg(input.name))) ]; - this._outputs = [ new keras.Argument('outputs', true, outputs.map((name) => arg(name))) ]; + this._inputs = [ new keras.Argument('inputs', true, inputs.map((input) => value(input.name))) ]; + this._outputs = [ new keras.Argument('outputs', true, outputs.map((name) => value(name))) ]; inputs = []; outputs = []; break; } + case 'Wrapper': case 'Bidirectional': case 'TimeDistributed': { if (config && config.layer) { const inner = config.layer; delete config.layer; - this._inner = new keras.Node(metadata, inner, null, null, arg); + this._inner = new keras.Node(metadata, inner, null, null, value); if (type == 'Bidirectional' && inner.config.name) { names = [ name + '/forward_' + inner.config.name, name + '/backward_' + inner.config.name ]; if (!group) { @@ -734,13 +737,13 @@ keras.Node = class { if (typeof value === 'string') { const set = new Map([ [ 'elu', 'ELU' ], [ 'exponential', 'Exponential' ], [ 'hard_sigmoid', 'HardSigmoid' ], [ 'linear', 'Linear' ], [ 'relu', 'ReLU' ], [ 'selu', 'SELU' ], [ 'softmax', 'Softmax'], [ 'sigmoid', 'Sigmoid' ], [ 'softplus', 'SoftPlus' ], [ 'softsign', 'SoftSign' ], [ 'tanh', 'TanH' ] ]); const type = set.has(value) ? set.get(value) : value; - this.chain.push(new keras.Node(metadata, { class_name: type }, null, null, arg)); + this.chain.push(new keras.Node(metadata, { class_name: type }, null, null, value)); } else if (value && typeof value.class_name === 'string' && value.config) { const type = value.class_name; if (!metadata.type(type)) { metadata.add(type, { name: type, category: 'Activation' }); } - this.chain.push(new keras.Node(metadata, value, null, null, arg)); + this.chain.push(new keras.Node(metadata, value, null, null, value)); } } if (name !== 'name') { @@ -751,17 +754,17 @@ keras.Node = class { } const innerType = this.inner ? this.inner.type : null; - const innerSchema = innerType ? metadata.type(innerType) : null; + const innerMetadata = innerType ? metadata.type(innerType) : null; let inputIndex = 0; while (inputs.length > 0) { let list = false; - let inputName = null; + let name = null; let visible = true; - if (!innerSchema || inputIndex == 0) { + if (!innerMetadata || inputIndex == 0) { if (this._type && this._type.inputs && inputIndex < this._type.inputs.length) { const input = this._type.inputs[inputIndex]; - inputName = input.name; - if (type === 'BatchNormalization' && inputName === 'gamma' && config.scale === false) { + name = input.name; + if (type === 'BatchNormalization' && name === 'gamma' && config.scale === false) { inputIndex++; continue; } @@ -774,13 +777,13 @@ keras.Node = class { switch (type) { case 'Bidirectional': { let innerIndex = inputIndex; - if (innerSchema && innerSchema.inputs) { - if (innerIndex < innerSchema.inputs.length) { - inputName = 'forward_' + innerSchema.inputs[innerIndex].name; + if (innerMetadata && innerMetadata.inputs) { + if (innerIndex < innerMetadata.inputs.length) { + name = 'forward_' + innerMetadata.inputs[innerIndex].name; } else { - innerIndex = innerIndex - innerSchema.inputs.length + 1; - if (innerIndex < innerSchema.inputs.length) { - inputName = 'backward_' + innerSchema.inputs[innerIndex].name; + innerIndex = innerIndex - innerMetadata.inputs.length + 1; + if (innerIndex < innerMetadata.inputs.length) { + name = 'backward_' + innerMetadata.inputs[innerIndex].name; } } } @@ -788,8 +791,8 @@ keras.Node = class { break; } case 'TimeDistributed': - if (innerSchema && innerSchema.inputs && inputIndex < innerSchema.inputs.length) { - inputName = innerSchema.inputs[inputIndex].name; + if (innerMetadata && innerMetadata.inputs && inputIndex < innerMetadata.inputs.length) { + name = innerMetadata.inputs[inputIndex].name; } break; default: @@ -799,33 +802,34 @@ keras.Node = class { const input = !list ? [ inputs.shift() ] : inputs.splice(0, inputs.length); const inputArguments = input.map((input) => { if (input.name) { - return arg(input.name, null, initializers[input.name]); + return value(input.name, null, initializers[input.name]); } if (input.value !== undefined) { const tensor = new keras.Tensor('', input.shape, config.dtype || '?', null, '|', input.value); - return arg('', null, tensor); + return value('', null, tensor); } throw new keras.Error("Invalid argument '" + JSON.stringify(input.name) + "'."); }); - if (!inputName && inputArguments.length == 1 && inputArguments[0].initializer && inputArguments[0].initializer.name) { + if (!name && inputArguments.length == 1 && inputArguments[0].initializer && inputArguments[0].initializer.name) { if (names.length === 1 && names[0] === '') { - inputName = inputArguments[0].initializer.name; + name = inputArguments[0].initializer.name; } else { const parts = inputArguments[0].initializer.name.split('/').pop().split(':').shift().split('_'); const inputName1 = parts.pop(); const inputName2 = parts.length > 0 ? [ parts.pop(), inputName1 ].join('_') : ''; const inputNames = new Set([ 'recurrent_kernel', 'running_mean', 'running_std', 'moving_mean', 'moving_variance', 'depthwise_filter', 'pointwise_filter' ]); - inputName = inputNames.has(inputName2) ? inputName2 : inputName1; + name = inputNames.has(inputName2) ? inputName2 : inputName1; } } - this._inputs.push(new keras.Argument(inputName || inputIndex.toString(), visible, inputArguments)); + const argument = new keras.Argument(name || inputIndex.toString(), visible, inputArguments); + this._inputs.push(argument); inputIndex++; } for (let i = 0; i < outputs.length; i++) { const output = outputs[i]; const outputName = (this._type && this._type.outputs && i < this._type.outputs.length && this._type.outputs[i] && this._type.outputs[i].name) ? this._type.outputs[i].name : i.toString(); - const args = output.length === 0 ? [] : [ arg(output) ]; + const args = output.length === 0 ? [] : [ value(output) ]; const argument = new keras.Argument(outputName, true, args); this._outputs.push(argument); } @@ -833,19 +837,19 @@ keras.Node = class { const inputTypes = new Map((this._type.inputs || []).map((input) => [ input.name, input.type ])); for (const entry of Object.entries(args)) { const name = entry[0]; - const value = entry[1]; + const arg = entry[1]; if (name !== 'name') { - if (value.name || (inputTypes.has(name) && inputTypes.get(name) === 'Tensor' && value)) { - if (value.name) { - const argument = new keras.Argument(name, true, [ arg(value.name) ]); + if (arg.name || (inputTypes.has(name) && inputTypes.get(name) === 'Tensor' && arg)) { + if (arg.name) { + const argument = new keras.Argument(name, true, [ value(arg.name) ]); this._inputs.push(argument); } else { - const tensor = new keras.Tensor('', value.shape, config.dtype || '?', null, '|', value.value); - const argument = new keras.Argument(name, true, [ arg('', null, tensor) ]); + const tensor = new keras.Tensor('', arg.shape, config.dtype || '?', null, '|', arg.value); + const argument = new keras.Argument(name, true, [ value('', null, tensor) ]); this._inputs.push(argument); } } else { - const attribute = new keras.Attribute(metadata.attribute(type, name), name, value); + const attribute = new keras.Attribute(metadata.attribute(type, name), name, arg); this._attributes.push(attribute); } } From 254b5da097aa0fd458252fd7242bb5bd73950779 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Wed, 30 Aug 2023 19:24:20 -0700 Subject: [PATCH 29/36] Update onnx-metadata.json --- source/onnx-metadata.json | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/source/onnx-metadata.json b/source/onnx-metadata.json index 022423a4191..949d3bd10c1 100644 --- a/source/onnx-metadata.json +++ b/source/onnx-metadata.json @@ -7342,7 +7342,7 @@ { "name": "input", "type": "T1", - "description": "For real input, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][1]. For complex input, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][2]. The first dimension is the batch dimension. The following N dimentions correspond to the signal's dimensions. The final dimension represents the real and imaginary parts of the value in that order." + "description": "For real input, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][1]. For complex input, the following shape is expected: [batch_idx][signal_dim1][signal_dim2]...[signal_dimN][2]. The first dimension is the batch dimension. The following N dimensions correspond to the signal's dimensions. The final dimension represents the real and imaginary parts of the value in that order." }, { "name": "dft_length", @@ -9094,7 +9094,7 @@ "module": "ai.onnx", "version": 11, "support_level": "common", - "description": "A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data.\nOutputs Scale, ZeroPoint and Quantized Input for a given FP32 Input.\nScale is calculated as:\n```\ny_scale = (max(x) - min(x))/(qmax - qmin)\n```\n\n* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n* data range is adjusted to include 0.\n\nZero point is calculated as:\n```\nintermediate_zero_point = qmin - min(x)/y_scale\ny_zero_point = cast(round(saturate(itermediate_zero_point)))\n```\n\n* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n\nData quantization formula is:\n```\ny = saturate (round (x / y_scale) + y_zero_point)\n```\n\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n", + "description": "A Function to fuse calculation for Scale, Zero Point and FP32->8Bit conversion of FP32 Input data.\nOutputs Scale, ZeroPoint and Quantized Input for a given FP32 Input.\nScale is calculated as:\n```\ny_scale = (max(x) - min(x))/(qmax - qmin)\n```\n\n* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n* data range is adjusted to include 0.\n\nZero point is calculated as:\n```\nintermediate_zero_point = qmin - min(x)/y_scale\ny_zero_point = cast(round(saturate(itermediate_zero_point)))\n```\n\n* where qmax and qmin are max and min values for quantization range .i.e [0, 255] in case of uint8\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n\nData quantization formula is:\n```\ny = saturate (round (x / y_scale) + y_zero_point)\n```\n\n* for saturation, it saturates to [0, 255] if it's uint8, or [-127, 127] if it's int8. Right now only uint8 is supported.\n* rounding to nearest ties to even.\n", "inputs": [ { "name": "x", @@ -10173,7 +10173,7 @@ "module": "ai.onnx.ml", "version": 1, "support_level": "common", - "description": "Concatenates input tensors into one continuous output.
\n All input shapes are 2-D and are concatenated along the second dimention. 1-D tensors are treated as [1,C].\n Inputs are copied to the output maintaining the order of the input arguments.
\n All inputs must be integers or floats, while the output will be all floating point values.\n", + "description": "Concatenates input tensors into one continuous output.
\n All input shapes are 2-D and are concatenated along the second dimension. 1-D tensors are treated as [1,C].\n Inputs are copied to the output maintaining the order of the input arguments.
\n All inputs must be integers or floats, while the output will be all floating point values.\n", "attributes": [ { "name": "inputdimensions", @@ -20143,7 +20143,7 @@ "module": "ai.onnx", "version": 9, "support_level": "common", - "description": "MaxUnpool essentially computes the partial inverse of the MaxPool op.\n The input information to this op is typically the output information from a MaxPool op. The first\n input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output)\n from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding\n to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op.\n The third (optional) input is a tensor that specifies the output size of the unpooling operation.\n\nMaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal\n values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling\n the result of an unpooling operation should give back the original input to the unpooling op.\n\nMaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.\n The third input argument, output_size, is meant to disambiguate the op and produce output tensor of\n known/predictable size.\n\nIn addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,\n which define the exact unpooling op. The attributes typically have the same values as the corrsponding\n pooling op that the unpooling op is trying to invert.\n", + "description": "MaxUnpool essentially computes the partial inverse of the MaxPool op.\n The input information to this op is typically the output information from a MaxPool op. The first\n input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output)\n from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corresponding\n to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op.\n The third (optional) input is a tensor that specifies the output size of the unpooling operation.\n\nMaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal\n values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling\n the result of an unpooling operation should give back the original input to the unpooling op.\n\nMaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.\n The third input argument, output_size, is meant to disambiguate the op and produce output tensor of\n known/predictable size.\n\nIn addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,\n which define the exact unpooling op. The attributes typically have the same values as the corresponding\n pooling op that the unpooling op is trying to invert.\n", "attributes": [ { "name": "kernel_shape", @@ -20228,7 +20228,7 @@ "module": "ai.onnx", "version": 11, "support_level": "common", - "description": "MaxUnpool essentially computes the partial inverse of the MaxPool op.\n The input information to this op is typically the output information from a MaxPool op. The first\n input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output)\n from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corrsponding\n to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op.\n The third (optional) input is a tensor that specifies the output size of the unpooling operation.\n\nMaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal\n values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling\n the result of an unpooling operation should give back the original input to the unpooling op.\n\nMaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.\n The third input argument, output_size, is meant to disambiguate the op and produce output tensor of\n known/predictable size.\n\nIn addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,\n which define the exact unpooling op. The attributes typically have the same values as the corrsponding\n pooling op that the unpooling op is trying to invert.\n", + "description": "MaxUnpool essentially computes the partial inverse of the MaxPool op.\n The input information to this op is typically the output information from a MaxPool op. The first\n input tensor X is the tensor that needs to be unpooled, which is typically the pooled tensor (first output)\n from MaxPool. The second input tensor, I, contains the indices to the (locally maximal) elements corresponding\n to the elements in the first input tensor X. Input tensor I is typically the second output of the MaxPool op.\n The third (optional) input is a tensor that specifies the output size of the unpooling operation.\n\nMaxUnpool is intended to do 'partial' inverse of the MaxPool op. 'Partial' because all the non-maximal\n values from the original input to MaxPool are set to zero in the output of the MaxUnpool op. Pooling\n the result of an unpooling operation should give back the original input to the unpooling op.\n\nMaxUnpool can produce the same output size for several input sizes, which makes unpooling op ambiguous.\n The third input argument, output_size, is meant to disambiguate the op and produce output tensor of\n known/predictable size.\n\nIn addition to the inputs, MaxUnpool takes three attributes, namely kernel_shape, strides, and pads,\n which define the exact unpooling op. The attributes typically have the same values as the corresponding\n pooling op that the unpooling op is trying to invert.\n", "attributes": [ { "name": "kernel_shape", @@ -20504,7 +20504,7 @@ "name": "axes", "type": "int64[]", "required": false, - "description": "A list of integers, along which to reduce. The default is to caculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance." + "description": "A list of integers, along which to reduce. The default is to calculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance." } ], "inputs": [ @@ -20554,7 +20554,7 @@ "name": "axes", "type": "int64[]", "required": false, - "description": "A list of integers, along which to reduce. The default is to caculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance." + "description": "A list of integers, along which to reduce. The default is to calculate along axes [0,2,3] for calculating mean and variance along each channel. Two variables with the same C-coordinate are associated with the same mean and variance." } ], "inputs": [ @@ -31384,7 +31384,7 @@ "module": "ai.onnx", "version": 11, "support_level": "common", - "description": "Round takes one input Tensor and rounds the values, element-wise, meaning\nit finds the nearest integer for each value.\nIn case of halfs, the rule is to round them to the nearest even integer.\nIf input x is integral, +0, -0, NaN, or infinite, x itself is returned.\nThe output tensor has the same shape and type as the input.\n\nExamples:\n```\nround([0.9]) = [1.0]\nround([2.5]) = [2.0]\nround([2.3]) = [2.0]\nround([1.5]) = [2.0]\nround([-4.5]) = [-4.0]\n```\n", + "description": "Round takes one input Tensor and rounds the values, element-wise, meaning\nit finds the nearest integer for each value.\nIn case of halves, the rule is to round them to the nearest even integer.\nIf input x is integral, +0, -0, NaN, or infinite, x itself is returned.\nThe output tensor has the same shape and type as the input.\n\nExamples:\n```\nround([0.9]) = [1.0]\nround([2.5]) = [2.0]\nround([2.3]) = [2.0]\nround([1.5]) = [2.0]\nround([-4.5]) = [-4.0]\n```\n", "inputs": [ { "name": "X", @@ -35056,7 +35056,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Produces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://numpy.org/doc/stable/user/basics.indexing.html?highlight=slice#slicing-and-striding\n\nSlice uses the `starts`, `ends`, `axes` and `steps` inputs to select a sub-tensor\nof its input `data` tensor.\n\nAn effective `starts[i]`, `ends[i]`, and `steps[i]` must be computed for each `i`\nin `[0, ... r-1]` where `r = rank(input)` as follows:\n\nIf `axes` are omitted, they are set to `[0, ..., r-1]`.\nIf `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)`\n\nThe effective values are initialized as `start[i] = 0`, `ends[i] = dims[i]` where\n`dims` are the dimensions of `input` and `steps[i] = `1.\n\nAll negative elements of `axes` are made non-negatve by adding `r` to them, where\n`r =rank(input)`.\n\nAll negative values in `starts[i]` and `ends[i]` have `dims[axes[i]]` added to them,\nwhere `dims` are the dimensions of `input`. Then `start[axes[i]]` is the adjusted\n`starts[i]` is clamped into the range `[0, dims[axes[i]]]` for positive stepping\nand `[0, dims[axes[i]]-1]` for negative stepping.\n\nThe clamping for the adjusted `ends[i]` depends on the sign of `steps[i]` and must\naccommodate copying 0 through `dims[axes[i]]` elements, so for positive stepping\n`ends[axes[i]]` is clamped to `[0, dims[axes[i]]]`, while for negative stepping it\nis clamped to `[-1, dims[axes[i]]-1]`.\n\nFinally, `steps[axes[i]] = steps[i]`.\n\nFor slicing to the end of a dimension with unknown size, it is recommended to pass\nin `INT_MAX` when slicing forward and 'INT_MIN' when slicing backward.\n\nExample 1:\n\n```\ndata = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]\naxes = [0, 1]\nstarts = [1, 0]\nends = [2, 3]\nsteps = [1, 2]\nresult = [\n [5, 7],\n]\n```\n\nExample 2:\n\n```\ndata = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]\nstarts = [0, 1]\nends = [-1, 1000]\nresult = [\n [2, 3, 4],\n]\n```\n", + "description": "Produces a slice of the input tensor along multiple axes. Similar to numpy:\nhttps://numpy.org/doc/stable/user/basics.indexing.html?highlight=slice#slicing-and-striding\n\nSlice uses the `starts`, `ends`, `axes` and `steps` inputs to select a sub-tensor\nof its input `data` tensor.\n\nAn effective `starts[i]`, `ends[i]`, and `steps[i]` must be computed for each `i`\nin `[0, ... r-1]` where `r = rank(input)` as follows:\n\nIf `axes` are omitted, they are set to `[0, ..., r-1]`.\nIf `steps` are omitted, they are set to `[1, ..., 1]` of length `len(starts)`\n\nThe effective values are initialized as `start[i] = 0`, `ends[i] = dims[i]` where\n`dims` are the dimensions of `input` and `steps[i] = 1`.\n\nAll negative elements of `axes` are made non-negative by adding `r` to them, where\n`r =rank(input)`.\n\nAll negative values in `starts[i]` and `ends[i]` have `dims[axes[i]]` added to them,\nwhere `dims` are the dimensions of `input`. Then `start[axes[i]]` is the adjusted\n`starts[i]` is clamped into the range `[0, dims[axes[i]]]` for positive stepping\nand `[0, dims[axes[i]]-1]` for negative stepping.\n\nThe clamping for the adjusted `ends[i]` depends on the sign of `steps[i]` and must\naccommodate copying 0 through `dims[axes[i]]` elements, so for positive stepping\n`ends[axes[i]]` is clamped to `[0, dims[axes[i]]]`, while for negative stepping it\nis clamped to `[-1, dims[axes[i]]-1]`.\n\nFinally, `steps[axes[i]] = steps[i]`.\n\nFor slicing to the end of a dimension with unknown size, it is recommended to pass\nin `INT_MAX` when slicing forward and 'INT_MIN' when slicing backward.\n\nExample 1:\n\n```\ndata = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]\naxes = [0, 1]\nstarts = [1, 0]\nends = [2, 3]\nsteps = [1, 2]\nresult = [\n [5, 7],\n]\n```\n\nExample 2:\n\n```\ndata = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n]\nstarts = [0, 1]\nends = [-1, 1000]\nresult = [\n [2, 3, 4],\n]\n```\n", "inputs": [ { "name": "data", @@ -35340,7 +35340,7 @@ "module": "ai.onnx", "version": 12, "support_level": "common", - "description": "Loss function that measures the softmax cross entropy\nbetween 'scores' and 'labels'.\nThis operator first computes a loss tensor whose shape is identical to the labels input.\nIf the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N).\nIf the input is N-D tensor with shape (N, C, D1, D2, ..., Dk),\nthe loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L.\nAfter L is available, this operator can optionally do a reduction operator.\n\nshape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\nshape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n\nThe loss for one sample, l_i, can caculated as follows:\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.\nor\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided.\n\nloss is zero for the case when label-value equals ignore_index.\n l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index\n\nwhere:\n p = Softmax(scores)\n y = Log(p)\n c = labels[i][d1][d2]...[dk]\n\nFinally, L is optionally reduced:\nIf reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk).\nIf reduction = 'sum', the output is scalar: Sum(L).\nIf reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W),\nwhere tensor W is of shape (N, D1, D2, ..., Dk) and W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]].\n", + "description": "Loss function that measures the softmax cross entropy\nbetween 'scores' and 'labels'.\nThis operator first computes a loss tensor whose shape is identical to the labels input.\nIf the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N).\nIf the input is N-D tensor with shape (N, C, D1, D2, ..., Dk),\nthe loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L.\nAfter L is available, this operator can optionally do a reduction operator.\n\nshape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\nshape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n\nThe loss for one sample, l_i, can calculated as follows:\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.\nor\n l[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided.\n\nloss is zero for the case when label-value equals ignore_index.\n l[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index\n\nwhere:\n p = Softmax(scores)\n y = Log(p)\n c = labels[i][d1][d2]...[dk]\n\nFinally, L is optionally reduced:\nIf reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk).\nIf reduction = 'sum', the output is scalar: Sum(L).\nIf reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: ReduceSum(L) / ReduceSum(W),\nwhere tensor W is of shape (N, D1, D2, ..., Dk) and W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]].\n", "attributes": [ { "name": "ignore_index", @@ -35556,7 +35556,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Loss function that measures the softmax cross entropy\nbetween 'scores' and 'labels'.\nThis operator first computes a loss tensor whose shape is identical to the labels input.\nIf the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N).\nIf the input is N-D tensor with shape (N, C, D1, D2, ..., Dk),\nthe loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L.\nAfter L is available, this operator can optionally do a reduction operator.\n\n* shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n* shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n\nThe loss for one sample, l_i, can caculated as follows:\n```\nl[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.\n```\nor\n```\nl[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided.\n```\n\nloss is zero for the case when label-value equals ignore_index.\n```\nl[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index\n```\n\nwhere:\n```\np = Softmax(scores)\ny = Log(p)\nc = labels[i][d1][d2]...[dk]\n```\n\nFinally, L is optionally reduced:\n\n* If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk).\n* If reduction = 'sum', the output is scalar: Sum(L).\n* If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: `ReduceSum(L) / ReduceSum(W)`,\n where tensor W is of shape `(N, D1, D2, ..., Dk)` and `W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]`.\n", + "description": "Loss function that measures the softmax cross entropy\nbetween 'scores' and 'labels'.\nThis operator first computes a loss tensor whose shape is identical to the labels input.\nIf the input is 2-D with shape (N, C), the loss tensor may be a N-element vector L = (l_1, l_2, ..., l_N).\nIf the input is N-D tensor with shape (N, C, D1, D2, ..., Dk),\nthe loss tensor L may have (N, D1, D2, ..., Dk) as its shape and L[i,][j_1][j_2]...[j_k] denotes a scalar element in L.\nAfter L is available, this operator can optionally do a reduction operator.\n\n* shape(scores): (N, C) where C is the number of classes, or (N, C, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n* shape(labels): (N) where each value is 0 <= labels[i] <= C-1, or (N, D1, D2,..., Dk),\n with K >= 1 in case of K-dimensional loss.\n\nThe loss for one sample, l_i, can calculated as follows:\n```\nl[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk], where i is the index of classes.\n```\nor\n```\nl[i][d1][d2]...[dk] = -y[i][c][d1][d2]..[dk] * weights[c], if 'weights' is provided.\n```\n\nloss is zero for the case when label-value equals ignore_index.\n```\nl[i][d1][d2]...[dk] = 0, when labels[n][d1][d2]...[dk] = ignore_index\n```\n\nwhere:\n```\np = Softmax(scores)\ny = Log(p)\nc = labels[i][d1][d2]...[dk]\n```\n\nFinally, L is optionally reduced:\n\n* If reduction = 'none', the output is L with shape (N, D1, D2, ..., Dk).\n* If reduction = 'sum', the output is scalar: Sum(L).\n* If reduction = 'mean', the output is scalar: ReduceMean(L), or if weight is provided: `ReduceSum(L) / ReduceSum(W)`,\n where tensor W is of shape `(N, D1, D2, ..., Dk)` and `W[n][d1][d2]...[dk] = weights[labels[i][d1][d2]...[dk]]`.\n", "attributes": [ { "name": "ignore_index", @@ -38713,7 +38713,7 @@ "name": "nodes_missing_value_tracks_true", "type": "int64[]", "required": false, - "description": "For each node, define what to do in the presence of a missing value: if a value is missing (NaN), use the 'true' or 'false' branch based on the value in this array.
This attribute may be left undefined, and the defalt value is false (0) for all nodes." + "description": "For each node, define what to do in the presence of a missing value: if a value is missing (NaN), use the 'true' or 'false' branch based on the value in this array.
This attribute may be left undefined, and the default value is false (0) for all nodes." }, { "name": "nodes_modes", @@ -38886,7 +38886,7 @@ "name": "nodes_missing_value_tracks_true", "type": "int64[]", "required": false, - "description": "For each node, define what to do in the presence of a missing value: if a value is missing (NaN), use the 'true' or 'false' branch based on the value in this array.
This attribute may be left undefined, and the defalt value is false (0) for all nodes." + "description": "For each node, define what to do in the presence of a missing value: if a value is missing (NaN), use the 'true' or 'false' branch based on the value in this array.
This attribute may be left undefined, and the default value is false (0) for all nodes." }, { "name": "nodes_modes", @@ -39024,7 +39024,7 @@ "name": "nodes_missing_value_tracks_true", "type": "int64[]", "required": false, - "description": "For each node, define what to do in the presence of a NaN: use the 'true' (if the attribute value is 1) or 'false' (if the attribute value is 0) branch based on the value in this array.
This attribute may be left undefined and the defalt value is false (0) for all nodes." + "description": "For each node, define what to do in the presence of a NaN: use the 'true' (if the attribute value is 1) or 'false' (if the attribute value is 0) branch based on the value in this array.
This attribute may be left undefined and the default value is false (0) for all nodes." }, { "name": "nodes_modes", @@ -39179,7 +39179,7 @@ "name": "nodes_missing_value_tracks_true", "type": "int64[]", "required": false, - "description": "For each node, define what to do in the presence of a NaN: use the 'true' (if the attribute value is 1) or 'false' (if the attribute value is 0) branch based on the value in this array.
This attribute may be left undefined and the defalt value is false (0) for all nodes." + "description": "For each node, define what to do in the presence of a NaN: use the 'true' (if the attribute value is 1) or 'false' (if the attribute value is 0) branch based on the value in this array.
This attribute may be left undefined and the default value is false (0) for all nodes." }, { "name": "nodes_modes", @@ -39430,7 +39430,7 @@ "module": "ai.onnx", "version": 11, "support_level": "common", - "description": "Find the unique elements of a tensor. When an optional attribute 'axis' is provided, unique subtensors sliced along the 'axis' are returned.\nOtherwise the input tensor is flattened and unique values of the flattened tensor are returned.\n\nThis operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs.\nThe first output tensor 'Y' contains all unique values or subtensors of the input.\nThe second optional output tensor 'indices' contains indices of 'Y' elements' first occurance in 'X'..\nThe third optional output tensor 'inverse_indices' contains, for elements of 'X', its corresponding indices in 'Y'. \".\nThe fourth optional output tensor 'counts' contains the count of each element of 'Y' in the input.\n\nOutputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input.\n\nhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html\n\nExample 1:\n```\ninput_X = [2, 1, 1, 3, 4, 3]\nattribute_sorted = 0\nattribute_axis = None\noutput_Y = [2, 1, 3, 4]\noutput_indices = [0, 1, 3, 4]\noutput_inverse_indices = [0, 1, 1, 2, 3, 2]\noutput_counts = [1, 2, 2, 1]\n```\n\nExample 2:\n```\ninput_X = [[1, 3], [2, 3]]\nattribute_sorted = 1\nattribute_axis = None\noutput_Y = [1, 2, 3]\noutput_indices = [0, 2, 1]\noutput_inverse_indices = [0, 2, 1, 2]\noutput_counts = [1, 1, 2]\n```\n\nExample 3:\n```\ninput_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]\nattribute_sorted = 1\nattribute_axis = 0\noutput_Y = [[1, 0, 0], [2, 3, 4]]\noutput_indices = [0, 2]\noutput_inverse_indices = [0, 0, 1]\noutput_counts = [2, 1]\n```\n\nExample 4:\n```\ninput_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],\n [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]\nattribute_sorted = 1\nattribute_axis = 1\n```\n\nintermediate data are presented below for better understanding:\nthere are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):\n```\nA: [[1, 1], [1, 1]],\n [[0, 1], [0, 1]],\n [[2, 1], [2, 1]],\n [[0, 1], [0, 1]].\n```\n\nthere are 3 unique subtensors:\n```\n[[1, 1], [1, 1]],\n[[0, 1], [0, 1]],\n[[2, 1], [2, 1]].\n```\n\nsorted unique subtensors:\n```\nB: [[0, 1], [0, 1]],\n [[1, 1], [1, 1]],\n [[2, 1], [2, 1]].\n```\n\noutput_Y is constructed from B:\n```\n[[[0. 1.], [1. 1.], [2. 1.]],\n [[0. 1.], [1. 1.], [2. 1.]]]\n```\n\noutput_indices is to map from B to A:\n```\n[1, 0, 2]\n```\n\noutput_inverse_indices is to map from A to B:\n```\n[1, 0, 2, 0]\n```\n\noutput_counts:\n```\n[2, 1, 1]\n```\n", + "description": "Find the unique elements of a tensor. When an optional attribute 'axis' is provided, unique subtensors sliced along the 'axis' are returned.\nOtherwise the input tensor is flattened and unique values of the flattened tensor are returned.\n\nThis operator returns the unique values or sliced unique subtensors of the input tensor and three optional outputs.\nThe first output tensor 'Y' contains all unique values or subtensors of the input.\nThe second optional output tensor 'indices' contains indices of 'Y' elements' first occurrence in 'X'.\nThe third optional output tensor 'inverse_indices' contains, for elements of 'X', its corresponding indices in 'Y'.\nThe fourth optional output tensor 'counts' contains the count of each element of 'Y' in the input.\n\nOutputs are either sorted in ascending order or optionally in the order of the first occurrence of the values in the input.\n\nhttps://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html\n\nExample 1:\n```\ninput_X = [2, 1, 1, 3, 4, 3]\nattribute_sorted = 0\nattribute_axis = None\noutput_Y = [2, 1, 3, 4]\noutput_indices = [0, 1, 3, 4]\noutput_inverse_indices = [0, 1, 1, 2, 3, 2]\noutput_counts = [1, 2, 2, 1]\n```\n\nExample 2:\n```\ninput_X = [[1, 3], [2, 3]]\nattribute_sorted = 1\nattribute_axis = None\noutput_Y = [1, 2, 3]\noutput_indices = [0, 2, 1]\noutput_inverse_indices = [0, 2, 1, 2]\noutput_counts = [1, 1, 2]\n```\n\nExample 3:\n```\ninput_X = [[1, 0, 0], [1, 0, 0], [2, 3, 4]]\nattribute_sorted = 1\nattribute_axis = 0\noutput_Y = [[1, 0, 0], [2, 3, 4]]\noutput_indices = [0, 2]\noutput_inverse_indices = [0, 0, 1]\noutput_counts = [2, 1]\n```\n\nExample 4:\n```\ninput_x = [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],\n [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]]\nattribute_sorted = 1\nattribute_axis = 1\n```\n\nintermediate data are presented below for better understanding:\nthere are 4 subtensors sliced along axis 1 of input_x (shape = (2, 4, 2)):\n```\nA: [[1, 1], [1, 1]],\n [[0, 1], [0, 1]],\n [[2, 1], [2, 1]],\n [[0, 1], [0, 1]].\n```\n\nthere are 3 unique subtensors:\n```\n[[1, 1], [1, 1]],\n[[0, 1], [0, 1]],\n[[2, 1], [2, 1]].\n```\n\nsorted unique subtensors:\n```\nB: [[0, 1], [0, 1]],\n [[1, 1], [1, 1]],\n [[2, 1], [2, 1]].\n```\n\noutput_Y is constructed from B:\n```\n[[[0. 1.], [1. 1.], [2. 1.]],\n [[0. 1.], [1. 1.], [2. 1.]]]\n```\n\noutput_indices is to map from B to A:\n```\n[1, 0, 2]\n```\n\noutput_inverse_indices is to map from A to B:\n```\n[1, 0, 2, 0]\n```\n\noutput_counts:\n```\n[2, 1, 1]\n```\n", "attributes": [ { "name": "axis", @@ -39465,7 +39465,7 @@ "name": "indices", "type": "tensor(int64)", "option": "optional", - "description": "A 1-D INT64 tensor containing indices of 'Y' elements' first occurance in 'X'. When 'axis' is provided, it contains indices to subtensors in input 'X' on the 'axis'. When 'axis' is not provided, it contains indices to values in the flattened input tensor. " + "description": "A 1-D INT64 tensor containing indices of 'Y' elements' first occurrence in 'X'. When 'axis' is provided, it contains indices to subtensors in input 'X' on the 'axis'. When 'axis' is not provided, it contains indices to values in the flattened input tensor. " }, { "name": "inverse_indices", From ec8001888e185fa796c53b1ec5da72fff31397c6 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Wed, 30 Aug 2023 19:24:27 -0700 Subject: [PATCH 30/36] Update tf-proto.js --- source/tf-proto.js | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/source/tf-proto.js b/source/tf-proto.js index 019ad095695..d0c3e70f071 100644 --- a/source/tf-proto.js +++ b/source/tf-proto.js @@ -7065,6 +7065,9 @@ $root.tensorflow.ConfigProto.Experimental = class Experimental { case 24: message.disable_optimize_for_static_graph = reader.bool(); break; + case 26: + message.disable_eager_executor_streaming_enqueue = reader.bool(); + break; default: reader.skipType(tag & 7); break; @@ -7142,6 +7145,9 @@ $root.tensorflow.ConfigProto.Experimental = class Experimental { case "disable_optimize_for_static_graph": message.disable_optimize_for_static_graph = reader.bool(); break; + case "disable_eager_executor_streaming_enqueue": + message.disable_eager_executor_streaming_enqueue = reader.bool(); + break; default: reader.field(tag, message); break; @@ -7172,6 +7178,7 @@ $root.tensorflow.ConfigProto.Experimental.prototype.disable_functional_ops_lower $root.tensorflow.ConfigProto.Experimental.prototype.xla_prefer_single_graph_cluster = false; $root.tensorflow.ConfigProto.Experimental.prototype.coordination_config = null; $root.tensorflow.ConfigProto.Experimental.prototype.disable_optimize_for_static_graph = false; +$root.tensorflow.ConfigProto.Experimental.prototype.disable_eager_executor_streaming_enqueue = false; $root.tensorflow.ConfigProto.Experimental.MlirBridgeRollout = { "MLIR_BRIDGE_ROLLOUT_UNSPECIFIED": 0, From 89104391b1f2dfea9738d064339069ad1db3bd34 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Wed, 30 Aug 2023 19:30:13 -0700 Subject: [PATCH 31/36] Update circle-schema.js --- source/circle-metadata.json | 6 +- source/circle-schema.js | 114 ++++++++++++++++++++++++++++++++++-- 2 files changed, 114 insertions(+), 6 deletions(-) diff --git a/source/circle-metadata.json b/source/circle-metadata.json index d4e0efb4092..cfcf0ad24e6 100644 --- a/source/circle-metadata.json +++ b/source/circle-metadata.json @@ -394,7 +394,8 @@ "attributes": [ { "name": "padding", "type": "Padding", "default": "SAME" }, { "name": "stride_w", "type": "int32", "default": 0 }, - { "name": "stride_h", "type": "int32", "default": 0 } + { "name": "stride_h", "type": "int32", "default": 0 }, + { "name": "fused_activation_function", "type": "ActivationFunctionType", "default": "NONE" } ] }, { @@ -404,7 +405,8 @@ { "name": "cell_clip", "type": "float32", "default": 0 }, { "name": "proj_clip", "type": "float32", "default": 0 }, { "name": "time_major", "type": "boolean", "default": false }, - { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false } + { "name": "asymmetric_quantize_inputs", "type": "boolean", "default": false }, + { "name": "diagonal_recurrent_tensors", "type": "boolean", "default": false } ] }, { diff --git a/source/circle-schema.js b/source/circle-schema.js index 3b4f767c3ea..aff88ffed8d 100644 --- a/source/circle-schema.js +++ b/source/circle-schema.js @@ -19,7 +19,8 @@ $root.circle.TensorType = { RESOURCE: 13, VARIANT: 14, UINT32: 15, - UINT16: 16 + UINT16: 16, + INT4: 17 }; $root.circle.CustomQuantization = class CustomQuantization { @@ -190,6 +191,25 @@ $root.circle.SparsityParameters = class SparsityParameters { } }; +$root.circle.VariantSubType = class VariantSubType { + + static decode(reader, position) { + const $ = new $root.circle.VariantSubType(); + $.shape = reader.typedArray(position, 4, Int32Array); + $.type = reader.int8_(position, 6, 0); + $.has_rank = reader.bool_(position, 8, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.circle.VariantSubType(); + $.shape = reader.typedArray(json.shape, Int32Array); + $.type = $root.circle.TensorType[json.type]; + $.has_rank = reader.value(json.has_rank, false); + return $; + } +}; + $root.circle.Tensor = class Tensor { static decode(reader, position) { @@ -203,6 +223,7 @@ $root.circle.Tensor = class Tensor { $.sparsity = reader.table(position, 16, $root.circle.SparsityParameters.decode); $.shape_signature = reader.typedArray(position, 18, Int32Array); $.has_rank = reader.bool_(position, 20, false); + $.variant_tensors = reader.tableArray(position, 22, $root.circle.VariantSubType.decode); return $; } @@ -217,6 +238,7 @@ $root.circle.Tensor = class Tensor { $.sparsity = reader.object(json.sparsity, $root.circle.SparsityParameters.decodeText); $.shape_signature = reader.typedArray(json.shape_signature, Int32Array); $.has_rank = reader.value(json.has_rank, false); + $.variant_tensors = reader.objectArray(json.variant_tensors, $root.circle.VariantSubType.decodeText); return $; } }; @@ -381,7 +403,12 @@ $root.circle.BuiltinOperator = { UNSORTED_SEGMENT_PROD: 153, UNSORTED_SEGMENT_MAX: 154, UNSORTED_SEGMENT_SUM: 155, - ATAN2: 156 + ATAN2: 156, + UNSORTED_SEGMENT_MIN: 157, + SIGN: 158, + BITCAST: 159, + BITWISE_XOR: 160, + RIGHT_SHIFT: 161 }; $root.circle.BuiltinOptions = class { @@ -507,8 +534,13 @@ $root.circle.BuiltinOptions = class { case 117: return $root.circle.DynamicUpdateSliceOptions.decode(reader, position); case 118: return $root.circle.UnsortedSegmentProdOptions.decode(reader, position); case 119: return $root.circle.UnsortedSegmentMaxOptions.decode(reader, position); - case 120: return $root.circle.UnsortedSegmentSumOptions.decode(reader, position); - case 121: return $root.circle.ATan2Options.decode(reader, position); + case 120: return $root.circle.UnsortedSegmentMinOptions.decode(reader, position); + case 121: return $root.circle.UnsortedSegmentSumOptions.decode(reader, position); + case 122: return $root.circle.ATan2Options.decode(reader, position); + case 123: return $root.circle.SignOptions.decode(reader, position); + case 124: return $root.circle.BitcastOptions.decode(reader, position); + case 125: return $root.circle.BitwiseXorOptions.decode(reader, position); + case 126: return $root.circle.RightShiftOptions.decode(reader, position); case 252: return $root.circle.BCQGatherOptions.decode(reader, position); case 253: return $root.circle.BCQFullyConnectedOptions.decode(reader, position); case 254: return $root.circle.InstanceNormOptions.decode(reader, position); @@ -637,8 +669,13 @@ $root.circle.BuiltinOptions = class { case 'DynamicUpdateSliceOptions': return $root.circle.DynamicUpdateSliceOptions.decodeText(reader, json); case 'UnsortedSegmentProdOptions': return $root.circle.UnsortedSegmentProdOptions.decodeText(reader, json); case 'UnsortedSegmentMaxOptions': return $root.circle.UnsortedSegmentMaxOptions.decodeText(reader, json); + case 'UnsortedSegmentMinOptions': return $root.circle.UnsortedSegmentMinOptions.decodeText(reader, json); case 'UnsortedSegmentSumOptions': return $root.circle.UnsortedSegmentSumOptions.decodeText(reader, json); case 'ATan2Options': return $root.circle.ATan2Options.decodeText(reader, json); + case 'SignOptions': return $root.circle.SignOptions.decodeText(reader, json); + case 'BitcastOptions': return $root.circle.BitcastOptions.decodeText(reader, json); + case 'BitwiseXorOptions': return $root.circle.BitwiseXorOptions.decodeText(reader, json); + case 'RightShiftOptions': return $root.circle.RightShiftOptions.decodeText(reader, json); case 'BCQGatherOptions': return $root.circle.BCQGatherOptions.decodeText(reader, json); case 'BCQFullyConnectedOptions': return $root.circle.BCQFullyConnectedOptions.decodeText(reader, json); case 'InstanceNormOptions': return $root.circle.InstanceNormOptions.decodeText(reader, json); @@ -1047,6 +1084,7 @@ $root.circle.UnidirectionalSequenceLSTMOptions = class UnidirectionalSequenceLST $.proj_clip = reader.float32_(position, 8, 0); $.time_major = reader.bool_(position, 10, false); $.asymmetric_quantize_inputs = reader.bool_(position, 12, false); + $.diagonal_recurrent_tensors = reader.bool_(position, 14, false); return $; } @@ -1057,6 +1095,7 @@ $root.circle.UnidirectionalSequenceLSTMOptions = class UnidirectionalSequenceLST $.proj_clip = reader.value(json.proj_clip, 0); $.time_major = reader.value(json.time_major, false); $.asymmetric_quantize_inputs = reader.value(json.asymmetric_quantize_inputs, false); + $.diagonal_recurrent_tensors = reader.value(json.diagonal_recurrent_tensors, false); return $; } }; @@ -1657,6 +1696,7 @@ $root.circle.TransposeConvOptions = class TransposeConvOptions { $.padding = reader.int8_(position, 4, 0); $.stride_w = reader.int32_(position, 6, 0); $.stride_h = reader.int32_(position, 8, 0); + $.fused_activation_function = reader.int8_(position, 10, 0); return $; } @@ -1665,6 +1705,7 @@ $root.circle.TransposeConvOptions = class TransposeConvOptions { $.padding = $root.circle.Padding[json.padding]; $.stride_w = reader.value(json.stride_w, 0); $.stride_h = reader.value(json.stride_h, 0); + $.fused_activation_function = $root.circle.ActivationFunctionType[json.fused_activation_function]; return $; } }; @@ -2550,6 +2591,71 @@ $root.circle.ATan2Options = class ATan2Options { } }; +$root.circle.UnsortedSegmentMinOptions = class UnsortedSegmentMinOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.UnsortedSegmentMinOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.UnsortedSegmentMinOptions(); + return $; + } +}; + +$root.circle.SignOptions = class SignOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.SignOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.SignOptions(); + return $; + } +}; + +$root.circle.BitcastOptions = class BitcastOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.BitcastOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.BitcastOptions(); + return $; + } +}; + +$root.circle.BitwiseXorOptions = class BitwiseXorOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.BitwiseXorOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.BitwiseXorOptions(); + return $; + } +}; + +$root.circle.RightShiftOptions = class RightShiftOptions { + + static decode(/* reader, position */) { + const $ = new $root.circle.RightShiftOptions(); + return $; + } + + static decodeText(/* reader, json */) { + const $ = new $root.circle.RightShiftOptions(); + return $; + } +}; + $root.circle.BCQGatherOptions = class BCQGatherOptions { static decode(reader, position) { From 152a067fa58c10e07d12d36ef9281af2c5f84ff3 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Thu, 31 Aug 2023 21:58:27 -0700 Subject: [PATCH 32/36] Update tflite-schema.js --- source/tflite-metadata.json | 17 +++++++++++++ source/tflite-schema.js | 48 ++++++++++++++++++++++++++++++++++++- 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/source/tflite-metadata.json b/source/tflite-metadata.json index 28e1db60845..8a6cee7935c 100644 --- a/source/tflite-metadata.json +++ b/source/tflite-metadata.json @@ -895,6 +895,17 @@ { "name": "slice_sizes", "type": "int64[]", "default": 0 } ] }, + { + "name": "StablehloGather", + "attributes": [ + { "name": "offset_dims", "type": "int64[]", "default": 0 }, + { "name": "collapsed_slice_dims", "type": "int64[]", "default": 0 }, + { "name": "start_index_map", "type": "int64[]", "default": 0 }, + { "name": "index_vector_dim", "type": "int64", "default": 0 }, + { "name": "slice_sizes", "type": "int64[]", "default": 0 }, + { "name": "indices_are_sorted", "type": "boolean", "default": false } + ] + }, { "name": "StablehloIota", "attributes": [ @@ -955,6 +966,12 @@ { "name": "comparator_subgraph_index", "type": "int32", "default": 0 } ] }, + { + "name": "StablehloTranspose", + "attributes": [ + { "name": "permutation", "type": "int64[]", "default": 0 } + ] + }, { "name": "StablehloWhile", "attributes": [ diff --git a/source/tflite-schema.js b/source/tflite-schema.js index 4c061146186..72bb28681de 100644 --- a/source/tflite-schema.js +++ b/source/tflite-schema.js @@ -444,7 +444,9 @@ $root.tflite.BuiltinOperator = { STABLEHLO_DOT_GENERAL: 197, STABLEHLO_REDUCE_WINDOW: 198, STABLEHLO_SORT: 199, - STABLEHLO_WHILE: 200 + STABLEHLO_WHILE: 200, + STABLEHLO_GATHER: 201, + STABLEHLO_TRANSPOSE: 202 }; $root.tflite.BuiltinOptions = class { @@ -733,6 +735,8 @@ $root.tflite.BuiltinOptions2 = class { case 13: return $root.tflite.StablehloReduceWindowOptions.decode(reader, position); case 14: return $root.tflite.StablehloSortOptions.decode(reader, position); case 15: return $root.tflite.StablehloWhileOptions.decode(reader, position); + case 16: return $root.tflite.StablehloGatherOptions.decode(reader, position); + case 17: return $root.tflite.StablehloTransposeOptions.decode(reader, position); default: return undefined; } } @@ -754,11 +758,53 @@ $root.tflite.BuiltinOptions2 = class { case 'StablehloReduceWindowOptions': return $root.tflite.StablehloReduceWindowOptions.decodeText(reader, json); case 'StablehloSortOptions': return $root.tflite.StablehloSortOptions.decodeText(reader, json); case 'StablehloWhileOptions': return $root.tflite.StablehloWhileOptions.decodeText(reader, json); + case 'StablehloGatherOptions': return $root.tflite.StablehloGatherOptions.decodeText(reader, json); + case 'StablehloTransposeOptions': return $root.tflite.StablehloTransposeOptions.decodeText(reader, json); default: return undefined; } } }; +$root.tflite.StablehloGatherOptions = class StablehloGatherOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloGatherOptions(); + $.offset_dims = reader.int64s_(position, 4); + $.collapsed_slice_dims = reader.int64s_(position, 6); + $.start_index_map = reader.int64s_(position, 8); + $.index_vector_dim = reader.int64_(position, 10, 0); + $.slice_sizes = reader.int64s_(position, 12); + $.indices_are_sorted = reader.bool_(position, 14, false); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloGatherOptions(); + $.offset_dims = reader.array(json.offset_dims); + $.collapsed_slice_dims = reader.array(json.collapsed_slice_dims); + $.start_index_map = reader.array(json.start_index_map); + $.index_vector_dim = reader.value(json.index_vector_dim, 0); + $.slice_sizes = reader.array(json.slice_sizes); + $.indices_are_sorted = reader.value(json.indices_are_sorted, false); + return $; + } +}; + +$root.tflite.StablehloTransposeOptions = class StablehloTransposeOptions { + + static decode(reader, position) { + const $ = new $root.tflite.StablehloTransposeOptions(); + $.permutation = reader.int64s_(position, 4); + return $; + } + + static decodeText(reader, json) { + const $ = new $root.tflite.StablehloTransposeOptions(); + $.permutation = reader.array(json.permutation); + return $; + } +}; + $root.tflite.StablehloPrecisionConfig = { DEFAULT: 0, HIGH: 1, From ad475612c01523bdf8a9953e3f771f3465eb6ee4 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Fri, 1 Sep 2023 17:09:40 -0700 Subject: [PATCH 33/36] Update onnx-metadata.json --- source/onnx-metadata.json | 372 ++++++++++++++++++++++++++++++-------- 1 file changed, 294 insertions(+), 78 deletions(-) diff --git a/source/onnx-metadata.json b/source/onnx-metadata.json index 949d3bd10c1..8ba1d8d5b2e 100644 --- a/source/onnx-metadata.json +++ b/source/onnx-metadata.json @@ -26197,7 +26197,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Computes the L1 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the L1 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "axes", @@ -26233,7 +26233,7 @@ "max_output": 1, "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -26271,7 +26271,7 @@ "module": "ai.onnx", "version": 18, "support_level": "common", - "description": "Computes the L1 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the L1 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "keepdims", @@ -26314,7 +26314,7 @@ "inputs_range": "1 - 2", "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -26498,7 +26498,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Computes the L2 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the L2 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "axes", @@ -26534,7 +26534,7 @@ "max_output": 1, "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -26572,7 +26572,7 @@ "module": "ai.onnx", "version": 18, "support_level": "common", - "description": "Computes the L2 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the L2 norm of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "keepdims", @@ -26615,7 +26615,7 @@ "inputs_range": "1 - 2", "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -26791,7 +26791,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Computes the log sum of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the log sum of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "axes", @@ -26827,7 +26827,7 @@ "max_output": 1, "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -26861,7 +26861,7 @@ "module": "ai.onnx", "version": 18, "support_level": "common", - "description": "Computes the log sum of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the log sum of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "keepdims", @@ -26904,7 +26904,7 @@ "inputs_range": "1 - 2", "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -27084,7 +27084,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "axes", @@ -27120,7 +27120,7 @@ "max_output": 1, "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -27158,7 +27158,7 @@ "module": "ai.onnx", "version": 18, "support_level": "common", - "description": "Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the log sum exponent of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "keepdims", @@ -27201,7 +27201,7 @@ "inputs_range": "1 - 2", "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -27289,21 +27289,25 @@ } ], "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, { "summary": "default_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "do_not_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "negative_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" } ] }, @@ -27362,21 +27366,25 @@ } ], "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, { "summary": "default_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "do_not_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "negative_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" } ] }, @@ -27437,21 +27445,25 @@ } ], "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, { "summary": "default_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "do_not_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "negative_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" } ] }, @@ -27460,7 +27472,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Computes the max of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the max of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "axes", @@ -27496,7 +27508,7 @@ "max_output": 1, "type_constraints": [ { - "description": "Constrain input and output types to high-precision and 8 bit numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -27513,21 +27525,25 @@ } ], "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, { "summary": "default_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "do_not_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "negative_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" } ] }, @@ -27536,7 +27552,7 @@ "module": "ai.onnx", "version": 18, "support_level": "common", - "description": "Computes the max of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the max of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "keepdims", @@ -27579,7 +27595,7 @@ "inputs_range": "1 - 2", "type_constraints": [ { - "description": "Constrain input and output types to high-precision and 8 bit numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -27596,21 +27612,113 @@ } ], "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMax", + "module": "ai.onnx", + "version": 20, + "support_level": "common", + "description": "Computes the max of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nIf the input data type is Boolean, the comparison should consider `False < True`.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric and Boolean tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(uint8)", + "tensor(int8)", + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[True],\n# [True],\n# [True],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_bool_inputs\",\n)" + }, { "summary": "default_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\nnode = onnx.helper.make_node(\n \"ReduceMax\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdim_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_max_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "do_not_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[20., 2.]\n# [40., 2.]\n# [60., 2.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "negative_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMax\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[20., 2.]]\n# [[40., 2.]]\n# [[60., 2.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.maximum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_max_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" } ] }, @@ -27765,7 +27873,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Computes the mean of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the mean of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "axes", @@ -27801,7 +27909,7 @@ "max_output": 1, "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -27839,7 +27947,7 @@ "module": "ai.onnx", "version": 18, "support_level": "common", - "description": "Computes the mean of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the mean of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "keepdims", @@ -27882,7 +27990,7 @@ "inputs_range": "1 - 2", "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -27970,21 +28078,25 @@ } ], "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, { "summary": "default_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "do_not_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "negative_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" } ] }, @@ -28043,21 +28155,25 @@ } ], "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, { "summary": "default_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "do_not_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "negative_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" } ] }, @@ -28118,21 +28234,25 @@ } ], "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, { "summary": "default_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "do_not_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "negative_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" } ] }, @@ -28141,7 +28261,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Computes the min of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the min of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "axes", @@ -28177,7 +28297,7 @@ "max_output": 1, "type_constraints": [ { - "description": "Constrain input and output types to high-precision and 8 bit numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -28194,21 +28314,25 @@ } ], "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, { "summary": "default_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "do_not_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "negative_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" } ] }, @@ -28217,7 +28341,7 @@ "module": "ai.onnx", "version": 18, "support_level": "common", - "description": "Computes the min of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the min of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "keepdims", @@ -28260,7 +28384,7 @@ "inputs_range": "1 - 2", "type_constraints": [ { - "description": "Constrain input and output types to high-precision and 8 bit numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -28277,21 +28401,113 @@ } ], "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, { "summary": "default_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "do_not_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" }, { "summary": "negative_axes_keepdims", - "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n)" + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + } + ] + }, + { + "name": "ReduceMin", + "module": "ai.onnx", + "version": 20, + "support_level": "common", + "description": "Computes the min of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nIf the input data type is Boolean, the comparison should consider `False < True`.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", + "attributes": [ + { + "name": "keepdims", + "type": "int64", + "required": false, + "default": 1, + "description": "Keep the reduced dimension or not, default 1 means keep reduced dimension." + }, + { + "name": "noop_with_empty_axes", + "type": "int64", + "required": false, + "description": "Defines behavior if 'axes' is empty. Default behavior with 'false' is to reduce all axes. When axes is empty and this attribute is set to true, input tensor will not be reduced,and the output tensor would be equivalent to input tensor." + } + ], + "inputs": [ + { + "name": "data", + "type": "T", + "description": "An input tensor." + }, + { + "name": "axes", + "type": "tensor(int64)", + "option": "optional", + "description": "Optional input list of integers, along which to reduce. The default is to reduce over all the dimensions of the input tensor if 'noop_with_empty_axes' is false, else act as an Identity op when 'noop_with_empty_axes' is true. Accepted range is [-r, r-1] where r = rank(data)." + } + ], + "min_input": 1, + "max_input": 2, + "outputs": [ + { + "name": "reduced", + "type": "T", + "description": "Reduced output tensor." + } + ], + "min_output": 1, + "max_output": 1, + "inputs_range": "1 - 2", + "type_constraints": [ + { + "description": "Constrain input and output types to numeric and Boolean tensors.", + "type_param_str": "T", + "allowed_type_strs": [ + "tensor(uint32)", + "tensor(uint64)", + "tensor(int32)", + "tensor(int64)", + "tensor(float16)", + "tensor(float)", + "tensor(double)", + "tensor(bfloat16)", + "tensor(uint8)", + "tensor(int8)", + "tensor(bool)" + ] + } + ], + "examples": [ + { + "summary": "bool_inputs", + "code": "axes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[True, True], [True, False], [False, True], [False, False]],\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=bool(keepdims))\n# print(reduced)\n# [[ True],\n# [False],\n# [False],\n# [False]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_bool_inputs\",\n)" + }, + { + "summary": "default_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = None\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\", inputs=[\"data\"], outputs=[\"reduced\"], keepdims=keepdims\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n# print(reduced)\n# [[[1.]]]\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=axes, keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data],\n outputs=[reduced],\n name=\"test_reduce_min_default_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "do_not_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 0\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[5., 1.]\n# [30., 1.]\n# [55., 1.]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_do_not_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([1], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" + }, + { + "summary": "negative_axes_keepdims", + "code": "shape = [3, 2, 2]\naxes = np.array([-2], dtype=np.int64)\nkeepdims = 1\n\nnode = onnx.helper.make_node(\n \"ReduceMin\",\n inputs=[\"data\", \"axes\"],\n outputs=[\"reduced\"],\n keepdims=keepdims,\n)\n\ndata = np.array(\n [[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]],\n dtype=np.float32,\n)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n# print(reduced)\n# [[[5., 1.]]\n# [[30., 1.]]\n# [[55., 1.]]]\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_example\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)\n\nnp.random.seed(0)\ndata = np.random.uniform(-10, 10, shape).astype(np.float32)\nreduced = np.minimum.reduce(data, axis=tuple(axes), keepdims=keepdims == 1)\n\nexpect(\n node,\n inputs=[data, axes],\n outputs=[reduced],\n name=\"test_reduce_min_negative_axes_keepdims_random\",\n opset_imports=[onnx.helper.make_opsetid(\"\", 18)],\n)" } ] }, @@ -28446,7 +28662,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Computes the product of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the product of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "axes", @@ -28482,7 +28698,7 @@ "max_output": 1, "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -28520,7 +28736,7 @@ "module": "ai.onnx", "version": 18, "support_level": "common", - "description": "Computes the product of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the product of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "keepdims", @@ -28563,7 +28779,7 @@ "inputs_range": "1 - 2", "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -28755,7 +28971,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Computes the sum of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the sum of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "keepdims", @@ -28798,7 +29014,7 @@ "inputs_range": "1 - 2", "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -28986,7 +29202,7 @@ "module": "ai.onnx", "version": 13, "support_level": "common", - "description": "Computes the sum square of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the sum square of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "axes", @@ -29022,7 +29238,7 @@ "max_output": 1, "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", @@ -29060,7 +29276,7 @@ "module": "ai.onnx", "version": 18, "support_level": "common", - "description": "Computes the sum square of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if keepdims equals 1. If keepdims equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults keepdims to\nFalse instead of True.", + "description": "Computes the sum square of the input tensor's elements along the provided axes. The resulting\ntensor has the same rank as the input if `keepdims` equals 1. If `keepdims` equals 0, then\nthe resulting tensor has the reduced dimension pruned. Input tensors of rank zero are\nvalid.\n\nThe above behavior is similar to numpy, with the exception that numpy defaults `keepdims`\nto `False` instead of `True`.", "attributes": [ { "name": "keepdims", @@ -29103,7 +29319,7 @@ "inputs_range": "1 - 2", "type_constraints": [ { - "description": "Constrain input and output types to high-precision numeric tensors.", + "description": "Constrain input and output types to numeric tensors.", "type_param_str": "T", "allowed_type_strs": [ "tensor(uint32)", From 69bfc4f06578f55320efe57639b9e6561507ed87 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Fri, 1 Sep 2023 17:10:22 -0700 Subject: [PATCH 34/36] Add RKNN test file (#639) (#1147) --- source/rknn.js | 6 +++--- test/models.json | 7 +++++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/source/rknn.js b/source/rknn.js index 1e86623ea26..ee32998dad1 100644 --- a/source/rknn.js +++ b/source/rknn.js @@ -548,14 +548,14 @@ rknn.Container = class extends Map { case 0x0002: case 0x1002: case 0x1003: + case 0x0003: case 0x0004: + case 0x0005: + case 0x0006: if (data_size > 0) { stream.skip(40); } break; - case 0x0003: - case 0x0005: - case 0x0006: default: throw new rknn.Error("Unsupported RKNN container version '" + version + "'."); } diff --git a/test/models.json b/test/models.json index 92881651bbd..e15fb0cbfed 100644 --- a/test/models.json +++ b/test/models.json @@ -5488,6 +5488,13 @@ "format": "TorchScript v1.5", "link": "https://github.com/lutzroeder/netron/issues/842" }, + { + "type": "rknn", + "target": "100_epoch.rknn", + "source": "https://github.com/lutzroeder/netron/files/12502077/100_epoch.rknn.zip[100_epoch.rknn]", + "format": "RKNN v1.5.2+b642f30c", + "link": "https://github.com/lutzroeder/netron/issues/639" + }, { "type": "rknn", "target": "autopilot.rknn", From 595a0140e3b4ba6100e5f63d141bcda82b62deba Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Sat, 2 Sep 2023 09:04:30 -0700 Subject: [PATCH 35/36] Update to 7.1.6 --- package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/package.json b/package.json index 7bddb91f252..9763aa727a3 100755 --- a/package.json +++ b/package.json @@ -6,8 +6,8 @@ "email": "lutzroeder@users.noreply.github.com", "url": "https://www.lutzroeder.com" }, - "version": "7.1.5", - "date": "2023-08-25 13:12:12", + "version": "7.1.6", + "date": "2023-09-02 16:04:30", "description": "Visualizer for neural network, deep learning, and machine learning models", "license": "MIT", "repository": "lutzroeder/netron", From b70307a626f1930a3c99d0a2dc10f02e2e94a1e7 Mon Sep 17 00:00:00 2001 From: Lutz Roeder Date: Sat, 2 Sep 2023 13:32:35 -0700 Subject: [PATCH 36/36] Update view.js --- source/view.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/view.js b/source/view.js index 85b99529f31..6862d5fb262 100644 --- a/source/view.js +++ b/source/view.js @@ -5060,7 +5060,7 @@ view.ModelFactoryService = class { this.register('./lasagne', [ '.pkl', '.pickle', '.joblib', '.model', '.pkl.z', '.joblib.z' ]); this.register('./lightgbm', [ '.txt', '.pkl', '.model' ]); this.register('./keras', [ '.h5', '.hd5', '.hdf5', '.keras', '.json', '.cfg', '.model', '.pb', '.pth', '.weights', '.pkl', '.lite', '.tflite', '.ckpt' ], [ '.zip' ]); - this.register('./sklearn', [ '.pkl', '.pickle', '.joblib', '.model', '.meta', '.pb', '.pt', '.h5', '.pkl.z', '.joblib.z' ]); + this.register('./sklearn', [ '.pkl', '.pickle', '.joblib', '.model', '.meta', '.pb', '.pt', '.h5', '.pkl.z', '.joblib.z', '.pickle.dat' ]); this.register('./megengine', [ '.tm', '.mge' ]); this.register('./pickle', [ '.pkl', '.pickle', '.joblib', '.model', '.meta', '.pb', '.pt', '.h5', '.pkl.z', '.joblib.z', '.pdstates', '.mge' ]); this.register('./cntk', [ '.model', '.cntk', '.cmf', '.dnn' ]);