diff --git a/docs/BuildONNX.md b/docs/BuildONNX.md index 2e932df876..4c6aee449b 100644 --- a/docs/BuildONNX.md +++ b/docs/BuildONNX.md @@ -2,7 +2,7 @@ # Installing `third_party ONNX` for Backend Tests or Rebuilding ONNX Operations -Backend tests are triggered by `make check-onnx-backend` in the build directory and require a few preliminary steps to run successfully. Similarly, rebuilding the ONNX operations in ONNX-MLIR from their ONNX descriptions is triggered by `make OMONNXOpsIncTranslation`. +Backend tests are triggered by `make check-onnx-backend` in the build directory and require a few preliminary steps to run successfully. Similarly, rebuilding the ONNX operations in ONNX-MLIR from their ONNX descriptions is triggered by `make OMONNXOpsTableGenIncGen`. You will need to install python 3.x if its not default in your environment, and possibly set the cmake `PYTHON_EXECUTABLE` variable in your top cmake file. @@ -24,7 +24,7 @@ Here are the steps taken to upgrade the ONNX version: 6. Build onnx in the `build/` directory using: set CMAKE_ARGS=-DONNX_USE_LITE_PROTO=ON -7. Run in the `build/` directory : make OMONNXOpsIncTranslation +7. Run in the `build/` directory : make OMONNXOpsTableGenIncGen 8. Run in `build/` directory : make onnx-mlir-docs diff --git a/docs/ImportONNXDefs.md b/docs/ImportONNXDefs.md index caf12beb8b..d9d98c51cf 100644 --- a/docs/ImportONNXDefs.md +++ b/docs/ImportONNXDefs.md @@ -104,7 +104,7 @@ the code in the dictionary `custom_definition_misc`. The key is the operation na In order to run gen_onnx_mlir.py, ONNX has to be installed. Refer to Readme. In your build directory, execute the following command. ``` - make OMONNXOpsIncTranslation + make OMONNXOpsTableGenIncGen ``` This command will generate those two files (src/Dialect/ONNX/ONNXOps.td.inc and OpBuilderTable.inc), and copy them to the right place in src directory. diff --git a/src/Builder/CMakeLists.txt b/src/Builder/CMakeLists.txt index 589849e431..6e0d9d229e 100644 --- a/src/Builder/CMakeLists.txt +++ b/src/Builder/CMakeLists.txt @@ -18,8 +18,3 @@ add_onnx_mlir_library(OMBuilder MLIRFuncDialect onnx ) - -configure_file(OpBuildTable.inc.dc.in - ${CMAKE_CURRENT_BINARY_DIR}/OpBuildTable.inc.dc - @ONLY - ) diff --git a/src/Builder/FrontendDialectTransformer.cpp b/src/Builder/FrontendDialectTransformer.cpp index eddf91b07d..d1ed97122a 100644 --- a/src/Builder/FrontendDialectTransformer.cpp +++ b/src/Builder/FrontendDialectTransformer.cpp @@ -47,10 +47,12 @@ SUPPRESS_WARNINGS_POP #include +#include #include #include -#include +#include #include +#include #include #define DEBUG_TYPE "frontend_dialect_transformer" @@ -62,7 +64,8 @@ static constexpr int32_t MINIMUM_SUPPORTED_OPSET = 6; using namespace mlir; namespace onnx_mlir { -namespace detail { + +namespace { using ValueSymbolMapping = SymbolMapping; using SymbolToOnnxTypeMapping = SymbolMapping; @@ -99,21 +102,25 @@ class FrontendGenImpl { ModuleOp module_; OpBuilder builder_; - // onnxop: list of versions for dialect - std::map> op_dialect_version_map_; - // onnxop: the top version in third_part/onnx - std::map op_dialect_top_version_map_; + using ImportHandlerType = void (FrontendGenImpl::*)(const onnx::NodeProto &); + + struct VersionedHandler { + int version; + ImportHandlerType handler; + }; + + using ONNXOpVersions = SmallVector; + + // Maps NodeProto::op_type() to sorted vector of (version, handler) pairs. + // TODO: Key by (domain, op_type) pair so we don't rely on names being unique + // across all domains. + std::unordered_map onnx_ops_map_; // mapping between string name and symbol ValueSymbolMapping frontend_symbols_; ModelInputShaper modelInputShaper_; - using ImportHandlerType = void (onnx_mlir::detail::FrontendGenImpl::*)( - const onnx::NodeProto &); - - std::map import_handler_map_; - // The total number of elements in all initializers. This value is a rough // counter of the number of parameters in a model. int64_t num_of_parameters_ = 0; @@ -145,7 +152,7 @@ class FrontendGenImpl { // opset_map_ is the internal (map) representation of ModelProto::opset_import // It maps each domain (e.g., "ai.onnx") to the specific version of that opset // used by this model. - std::map opset_map_; + std::unordered_map opset_map_; void SetOpSetImport(const onnx::ModelProto &model) { opset_map_.clear(); for (auto &binding : model.opset_import()) { @@ -153,6 +160,13 @@ class FrontendGenImpl { } } + int64_t GetDomainVersion(const std::string &domain) { + auto it = opset_map_.find(domain); + if (it == opset_map_.end()) + return 0; + return it->second; + } + void BindOnnxName(const std::string &onnx_name, Value symbol) { frontend_symbols_.AddMapping(onnx_name, symbol); } @@ -433,32 +447,6 @@ class FrontendGenImpl { return builder_.getFunctionType(argTypes, retTys); } - void ImportNodeGeneric(const onnx::NodeProto &node) { - std::vector inputs; - for (const auto &item : node.input()) { - if (const Value *valuePtr = frontend_symbols_.GetByOnnxName(item)) { - inputs.push_back(*valuePtr); - } - } - OperationState result(UnknownLoc(), "frontend." + node.op_type()); - for (auto item : node.output()) { - result.addTypes(UnrankedTensorType::get(builder_.getF32Type())); - } - result.addOperands(inputs); - result.addAttributes(ImportNodeAttributes(node)); - // Create corresponding regions for graph attributes. - for (const auto &attr : node.attribute()) - // Ignore subgraph attributes, as they will be imported as regions. - if (attr.type() == onnx::AttributeProto_AttributeType_GRAPH) - result.addRegion(); - - auto op = builder_.create(result); - for (int i = 0; i < node.output().size(); i++) { - auto r = op->getResult(i); - frontend_symbols_.AddMapping(node.output()[i], r); - } - } - static constexpr int MAX_TYPE = 20; // Get these indices from TensorProto in @@ -683,7 +671,7 @@ class FrontendGenImpl { } template - void buildOperation(const onnx::NodeProto &node) { + void doBuildOperation(const onnx::NodeProto &node) { std::vector inputs; int expectedNumOperands = T::getNumberOfOperands(); int expectedNumResults = T::getNumberOfResults(); @@ -693,317 +681,12 @@ class FrontendGenImpl { node, inputs, expectedNumOperands, expectedNumResults, attributes); } - // The output type of CategoryMapper needs special handling - // If the input is I64, the output is string. - // If the input is string, the output is I64. - void ImportCategoryMapper(const onnx::NodeProto &node) { - std::vector inputs; - int expectedNumOperands = ONNXCategoryMapperOp::getNumberOfOperands(); - int expectedNumResults = ONNXCategoryMapperOp::getNumberOfResults(); - getNodeInputs(node, inputs); - auto attributes = ImportNodeAttributes(node); - std::vector outputTypes; - auto inputType = inputs[0].getType().cast(); - if (inputType.getElementType().isInteger(64)) { - outputTypes.emplace_back( - mlir::ONNXStringType::get(builder_.getContext())); - } else { - outputTypes.emplace_back(builder_.getIntegerType(64)); - } - buildOutputAndOperation(node, inputs, - expectedNumOperands, expectedNumResults, attributes, outputTypes); - } - - // The output type of Scan needs special handling - // The final_stete_and_scan_outputs of Scan shows final values of loop's - // N state variables followed by K scan_outputs. - void ImportScan(const onnx::NodeProto &node) { - int expectedNumOperands = ONNXScanOp::getNumberOfOperands(); - int expectedNumResults = ONNXScanOp::getNumberOfResults(); - std::vector inputs; - getNodeInputs(node, inputs); - auto attributes = ImportNodeAttributes(node); - int num_scan_inputs = -1; - int i; - for (i = 0; i < node.attribute_size(); ++i) { - auto attr = node.attribute(i); - if (attr.name() == "num_scan_inputs") { - num_scan_inputs = attr.i(); - break; - } - } - assert((i < node.attribute_size()) && - "mandatory num_scan_inputs attr not in onnx.Scan"); - buildOutputAndOperation(node, inputs, expectedNumOperands, - expectedNumResults, attributes, - /*givenOutputTypes=*/std::vector(), - /*num_use_inference_outputs=*/num_scan_inputs); - return; - } - - std::vector ImportCastAttributes( - const onnx::NodeProto &node) { - std::vector attributes; - for (int i = 0; i < node.attribute_size(); ++i) { - auto attr = node.attribute(i); - auto mlir_type = convertONNXTypeToMLIRType( - builder_, static_cast(attr.i())); - Attribute mlirAttr = TypeAttr::get(mlir_type); - attributes.push_back(builder_.getNamedAttr(attr.name(), mlirAttr)); - } - - // If the node has a name, then import it. - if (node.has_name()) { - attributes.push_back(builder_.getNamedAttr( - "onnx_node_name", builder_.getStringAttr(node.name()))); - } - return attributes; - } - - /*! - * Special handle for Cast operations. - */ - void ImportNodeCast(const onnx::NodeProto &node) { - std::vector inputs; - int expectedNumOperands = ONNXCastOp::getNumberOfOperands(); - int expectedNumResults = ONNXCastOp::getNumberOfResults(); - for (const auto &item : node.input()) - if (item.empty()) { - // Optional inputs using empty string will be imported as NoneType. - inputs.emplace_back(createNoneValue()); - } else { - if (const Value *valuePtr = frontend_symbols_.GetByOnnxName(item)) { - inputs.push_back(*valuePtr); - } - } - auto attributes = ImportCastAttributes(node); - buildOutputAndOperation( - node, inputs, expectedNumOperands, expectedNumResults, attributes); - } - - /*! - * Special handle for MaxPool operations. - */ - void ImportNodeMaxPool(const onnx::NodeProto &node) { - int nOuts = node.output().size(); - if (nOuts == 1) { - buildOperation(node); - } else { - buildOperation(node); - } - } - - /*! - * Special handle for BatchNormalization operations. - */ - void ImportNodeBatchNormalization(const onnx::NodeProto &node) { - int nOuts = node.output().size(); - if (nOuts == 1) { - // Inference mode with one output. - buildOperation(node); - } else { - // Training mode with four trailing optional outputs. Not handled yet. - buildOperation(node); - } - } - - /*! - * Special handle for Dropout operations. - */ - void ImportNodeDropout(const onnx::NodeProto &node) { - int nOps = node.input().size(); - int nIn = ONNXDropoutOp::getNumberOfOperands(); - if (nOps == nIn) { - // All inputs are specified - buildOperation(node); - return; - } - - // Add the default value for optional input - // Copy the provided inputs first - std::vector inputs; - for (const auto &item : node.input()) { - if (const Value *valuePtr = frontend_symbols_.GetByOnnxName(item)) { - inputs.push_back(*valuePtr); - } - } - - // If ratio is not specified, the default value is 0.5 - if (nOps < 2) { - llvm::SmallVector dims; - dims.push_back(1); - llvm::SmallVector values; - values.push_back(0.5); - Type elementType = builder_.getF32Type(); - llvm::ArrayRef tensorDims(dims.data(), dims.size()); - auto tensorType = RankedTensorType::get(tensorDims, elementType); - auto constantDenseAttribute = - DenseElementsAttr::get(tensorType, llvm::ArrayRef(values)); - auto constantOp = builder_.create( - UnknownLoc(), Attribute(), constantDenseAttribute); - Value constantResult = *(constantOp.getODSResults(0).begin()); - inputs.push_back(constantResult); - } - - // If training_mode is not specified, the default value is false - if (nOps < 3) { - llvm::SmallVector dims; - dims.push_back(1); - llvm::SmallVector values; - values.push_back(false); - Type elementType = builder_.getIntegerType(1); - llvm::ArrayRef tensorDims(dims.data(), dims.size()); - auto tensorType = RankedTensorType::get(tensorDims, elementType); - auto constantDenseAttribute = - DenseElementsAttr::get(tensorType, llvm::ArrayRef(values)); - auto constantOp = builder_.create( - UnknownLoc(), Attribute(), constantDenseAttribute); - Value constantResult = *(constantOp.getODSResults(0).begin()); - inputs.push_back(constantResult); - } - int nOut = ONNXDropoutOp::getNumberOfResults(); - auto attributes = ImportNodeAttributes(node); - buildOutputAndOperation(node, inputs, nIn, nOut, attributes); - } - - /*! - * Special handle for Pad operations. - */ - void ImportNodePad(const onnx::NodeProto &node) { - int nOps = node.input().size(); - if (nOps == 2) { - llvm::SmallVector dims; - dims.push_back(1); - - std::vector inputs; - getNodeInputs(node, inputs); - Type elementType = - inputs[0].getType().cast().getElementType(); - - llvm::SmallVector values( - 1, builder_.getZeroAttr(elementType)); - - llvm::ArrayRef tensorDims(dims.data(), dims.size()); - auto tensorType = RankedTensorType::get(tensorDims, elementType); - auto constantDenseAttribute = - DenseElementsAttr::get(tensorType, llvm::ArrayRef(values)); - - // Use the special builder defined in ONNXOp.td.inc. - auto constantOp = builder_.create( - UnknownLoc(), Attribute(), constantDenseAttribute); - Value constantResult = *(constantOp.getODSResults(0).begin()); - inputs.push_back(constantResult); - - int nIn = ONNXPadOp::getNumberOfOperands(); - int nOut = ONNXPadOp::getNumberOfResults(); - auto attributes = ImportNodeAttributes(node); - buildOutputAndOperation(node, inputs, nIn, nOut, attributes); - } else { - buildOperation(node); - } - } - - void ImportNodeSlice(const onnx::NodeProto &node) { - std::array inVals = { - nullptr, - }; - - for (const auto &item : llvm::enumerate(node.input())) { - if (const Value *valuePtr = - frontend_symbols_.GetByOnnxName(item.value())) { - inVals[item.index()] = *valuePtr; - } else { - assert(false && "Unknown input"); - } - } - - // Data input is imported but starts, ends, axes, and steps may come from - // attributes, and need to be created as constant ops. - const Type elementType = builder_.getIntegerType(64); - const auto attributes = ImportNodeAttributes(node); - for (auto attr : attributes) { - if (auto arrayAttr = attr.getValue().dyn_cast()) { - const auto tensorType = - RankedTensorType::get({(int64_t)arrayAttr.size()}, elementType); - auto constantDenseAttribute = - DenseElementsAttr::get(tensorType, arrayAttr.getValue()); - auto constantOp = builder_.create( - UnknownLoc(), Attribute(), constantDenseAttribute); - Value constantValue = constantOp.getOutput(); - - // Map from ONNX attributes to indices, which are - // matched with ONNXSliceOp::build ordering. - auto inputIdx = llvm::StringSwitch(attr.getName()) - .Case("starts", 1) - .Case("ends", 2) - .Case("axes", 3) - .Case("steps", 4) - .Default(-1); - if (inputIdx < 0) - continue; - assert(inVals[inputIdx] == nullptr && - "This input has already been filled in"); - inVals[inputIdx] = constantValue; - } - } - - assert(inVals[1] != nullptr && "Slice requires a starts attribute"); - assert(inVals[2] != nullptr && "Slice requires an ends attribute"); - inVals[3] = inVals[3] == nullptr ? createNoneValue() : inVals[3]; - inVals[4] = inVals[4] == nullptr ? createNoneValue() : inVals[4]; - - int nIn = ONNXSliceOp::getNumberOfOperands(); - int nOut = ONNXSliceOp::getNumberOfResults(); - const auto in = std::vector(inVals.begin(), inVals.end()); - - buildOutputAndOperation(node, in, nIn, nOut, attributes); - } - const onnx::OpSchema *GetOpSchema(const onnx::NodeProto &node) { - auto &domain = node.domain(); - auto version_it = opset_map_.find(domain); - if (version_it == opset_map_.end()) + int64_t version = GetDomainVersion(node.domain()); + if (version == 0) return nullptr; - auto version = version_it->second; - return onnx::OpSchemaRegistry::Schema(node.op_type(), version, domain); - } - - std::string GetImportVersionOfNode(const onnx::NodeProto &node) { - auto current_opset = opset_map_.find(node.domain())->second; - - LLVM_DEBUG(llvm::dbgs() << DEBUG_TYPE << ": Importing ONNX" - << node.op_type() << " (" << node.name() << ")" - << ", Opset: " << current_opset << "\n"); - - auto opset_list_it = op_dialect_version_map_.find(node.op_type()); - - // Custom ops may not be present in op_dialect_version_map_. If no version - // info is found, treat as unversioned (no renaming). - if (opset_list_it == op_dialect_version_map_.end()) - return ""; - - auto opset_list = opset_list_it->second; - - // A new opset is added to onnx-mlir when it becomes imcompactible. - // But the lowest opset in op_dialect_version_map_ is an exception. - // It is the current opset when onnx-mlir project is started. - // All opset lower than the last opset should use the last opset(version) - if (node.domain().compare("ai.onnx.ml") != 0 && - current_opset < opset_list.back() && - current_opset < MINIMUM_SUPPORTED_OPSET) - llvm::outs() << "Warning: ONNX " << node.op_type() - << " in your model is using Opset " << current_opset - << ", which is quite old. Please consider regenerating your " - "model with a newer Opset.\n"; - - for (int i = opset_list.size() - 1; i > 0; i--) { - if (current_opset < opset_list[i - 1]) { - LLVM_DEBUG(llvm::dbgs() << DEBUG_TYPE << ": - use Opset " - << opset_list[i] << "\n"); - return "V" + std::to_string(opset_list[i]); - } - } - return std::string(""); + return onnx::OpSchemaRegistry::Schema( + node.op_type(), version, node.domain()); } func::FuncOp CreateFuncOp( @@ -1197,22 +880,78 @@ class FrontendGenImpl { } } + bool TryImportONNXNode(const onnx::NodeProto &node) { + int64_t version = GetDomainVersion(node.domain()); + if (version == 0) { + // Unknown domain. + return false; + } + + LLVM_DEBUG(llvm::dbgs() << DEBUG_TYPE << ": Importing ONNX" + << node.op_type() << " (" << node.name() << ")" + << ", Opset: " << version << "\n"); + + auto versions_it = onnx_ops_map_.find(node.op_type()); + if (versions_it == onnx_ops_map_.end()) { + // Unknown op_type. + llvm::outs() << "Warning: ONNX " << node.op_type() << " from domain '" + << node.domain() << "," + << " in your model is unsupported.\n"; + return false; + } + + const ONNXOpVersions &opVersions = versions_it->second; + + // A new opset is added to onnx-mlir when it becomes imcompatible. + // But the lowest opset in op_dialect_version_map_ is an exception. + // It is the current opset when onnx-mlir project is started. + // All opset lower than the last opset should use the last opset(version) + if (node.domain().compare("ai.onnx.ml") != 0 && + version < opVersions.back().version && + version < MINIMUM_SUPPORTED_OPSET) + llvm::outs() << "Warning: ONNX " << node.op_type() + << " in your model is using Opset " << version + << ", which is quite old. Please consider regenerating your " + "model with a newer Opset.\n"; + + ImportHandlerType handler = opVersions.front().handler; + for (int i = opVersions.size() - 1; i > 0; --i) { + if (version < opVersions[i - 1].version) { + LLVM_DEBUG(llvm::dbgs() << DEBUG_TYPE << ": - use Opset " + << opVersions[i].version << "\n"); + handler = opVersions[i].handler; + } + } + (this->*handler)(node); + return true; + } + void ImportNode(const onnx::NodeProto &node) { - std::string versionStr = GetImportVersionOfNode(node); - - // look up handler for the opName. If not found, create a node - // for a custom op, and issue a warning. - auto handler = - import_handler_map_.find(node.op_type() + versionStr.c_str()); - if (handler != import_handler_map_.end()) { - (this->*(handler->second))(node); - } else { + bool imported = TryImportONNXNode(node); + if (!imported) { + LLVM_DEBUG(llvm::dbgs() << DEBUG_TYPE << ": Importing Custom op " + << node.op_type() << " (" << node.name() << ")" + << ", domain: '" << node.domain() << "'\n"); ImportCustomNode(node); } } void InitHandlerMap() { -#include "src/Builder/OpBuildTable.inc" + foreachONNXOp([this](auto nullOp) { + using T = decltype(nullOp); + if constexpr (std::is_base_of_v, T>) { + StringRef name = T::getONNXName(); + int version = T::getONNXSinceVersion(); + ImportHandlerType handler = &FrontendGenImpl::buildOperation; + ONNXOpVersions &opVersions = onnx_ops_map_[name.str()]; + // Insert in descending version order: + auto it = opVersions.begin(); + while (it != opVersions.end() && it->version > version) { + ++it; // Skip past larger versions. + } + opVersions.insert(it, {version, handler}); + } + }); } /*! @@ -1265,10 +1004,270 @@ class FrontendGenImpl { return mainFunc; } + + template + void buildOperation(const onnx::NodeProto &node); }; // class FrontendGenImpl -} // namespace detail -} // namespace onnx_mlir -namespace onnx_mlir { + +template +void FrontendGenImpl::buildOperation(const onnx::NodeProto &node) { + doBuildOperation(node); +} + +template <> +void FrontendGenImpl::buildOperation( + const onnx::NodeProto &node) { + int nOuts = node.output().size(); + if (nOuts == 1) { + // Inference mode with one output. + doBuildOperation(node); + } else { + // Training mode with four trailing optional outputs. Not handled yet. + doBuildOperation(node); + } +} + +template <> +void FrontendGenImpl::buildOperation(const onnx::NodeProto &node) { + std::vector inputs; + int expectedNumOperands = ONNXCastOp::getNumberOfOperands(); + int expectedNumResults = ONNXCastOp::getNumberOfResults(); + for (const auto &item : node.input()) { + if (item.empty()) { + // Optional inputs using empty string will be imported as NoneType. + inputs.emplace_back(createNoneValue()); + } else { + if (const Value *valuePtr = frontend_symbols_.GetByOnnxName(item)) { + inputs.push_back(*valuePtr); + } + } + } + + std::vector attributes; + for (int i = 0; i < node.attribute_size(); ++i) { + auto attr = node.attribute(i); + auto mlir_type = convertONNXTypeToMLIRType( + builder_, static_cast(attr.i())); + Attribute mlirAttr = TypeAttr::get(mlir_type); + attributes.push_back(builder_.getNamedAttr(attr.name(), mlirAttr)); + } + // If the node has a name, then import it. + if (node.has_name()) { + attributes.push_back(builder_.getNamedAttr( + "onnx_node_name", builder_.getStringAttr(node.name()))); + } + + buildOutputAndOperation( + node, inputs, expectedNumOperands, expectedNumResults, attributes); +} + +template <> +void FrontendGenImpl::buildOperation( + const onnx::NodeProto &node) { + int nOps = node.input().size(); + int nIn = ONNXDropoutOp::getNumberOfOperands(); + if (nOps == nIn) { + // All inputs are specified + doBuildOperation(node); + return; + } + + // Add the default value for optional input + // Copy the provided inputs first + std::vector inputs; + for (const auto &item : node.input()) { + if (const Value *valuePtr = frontend_symbols_.GetByOnnxName(item)) { + inputs.push_back(*valuePtr); + } + } + + // If ratio is not specified, the default value is 0.5 + if (nOps < 2) { + llvm::SmallVector dims; + dims.push_back(1); + llvm::SmallVector values; + values.push_back(0.5); + Type elementType = builder_.getF32Type(); + llvm::ArrayRef tensorDims(dims.data(), dims.size()); + auto tensorType = RankedTensorType::get(tensorDims, elementType); + auto constantDenseAttribute = + DenseElementsAttr::get(tensorType, llvm::ArrayRef(values)); + auto constantOp = builder_.create( + UnknownLoc(), Attribute(), constantDenseAttribute); + Value constantResult = *(constantOp.getODSResults(0).begin()); + inputs.push_back(constantResult); + } + + // If training_mode is not specified, the default value is false + if (nOps < 3) { + llvm::SmallVector dims; + dims.push_back(1); + llvm::SmallVector values; + values.push_back(false); + Type elementType = builder_.getIntegerType(1); + llvm::ArrayRef tensorDims(dims.data(), dims.size()); + auto tensorType = RankedTensorType::get(tensorDims, elementType); + auto constantDenseAttribute = + DenseElementsAttr::get(tensorType, llvm::ArrayRef(values)); + auto constantOp = builder_.create( + UnknownLoc(), Attribute(), constantDenseAttribute); + Value constantResult = *(constantOp.getODSResults(0).begin()); + inputs.push_back(constantResult); + } + int nOut = ONNXDropoutOp::getNumberOfResults(); + auto attributes = ImportNodeAttributes(node); + buildOutputAndOperation(node, inputs, nIn, nOut, attributes); +} + +template <> +void FrontendGenImpl::buildOperation( + const onnx::NodeProto &node) { + int nOuts = node.output().size(); + if (nOuts == 1) { + doBuildOperation(node); + } else { + doBuildOperation(node); + } +} + +template <> +void FrontendGenImpl::buildOperation(const onnx::NodeProto &node) { + int nOps = node.input().size(); + if (nOps == 2) { + llvm::SmallVector dims; + dims.push_back(1); + + std::vector inputs; + getNodeInputs(node, inputs); + Type elementType = inputs[0].getType().cast().getElementType(); + + llvm::SmallVector values( + 1, builder_.getZeroAttr(elementType)); + + llvm::ArrayRef tensorDims(dims.data(), dims.size()); + auto tensorType = RankedTensorType::get(tensorDims, elementType); + auto constantDenseAttribute = + DenseElementsAttr::get(tensorType, llvm::ArrayRef(values)); + + // Use the special builder defined in ONNXOp.td.inc. + auto constantOp = builder_.create( + UnknownLoc(), Attribute(), constantDenseAttribute); + Value constantResult = *(constantOp.getODSResults(0).begin()); + inputs.push_back(constantResult); + + int nIn = ONNXPadOp::getNumberOfOperands(); + int nOut = ONNXPadOp::getNumberOfResults(); + auto attributes = ImportNodeAttributes(node); + buildOutputAndOperation(node, inputs, nIn, nOut, attributes); + } else { + doBuildOperation(node); + } +} + +template <> +void FrontendGenImpl::buildOperation(const onnx::NodeProto &node) { + // The final_stete_and_scan_outputs of Scan shows final values of loop's + // N state variables followed by K scan_outputs. + int expectedNumOperands = ONNXScanOp::getNumberOfOperands(); + int expectedNumResults = ONNXScanOp::getNumberOfResults(); + std::vector inputs; + getNodeInputs(node, inputs); + auto attributes = ImportNodeAttributes(node); + int num_scan_inputs = -1; + int i; + for (i = 0; i < node.attribute_size(); ++i) { + auto attr = node.attribute(i); + if (attr.name() == "num_scan_inputs") { + num_scan_inputs = attr.i(); + break; + } + } + assert((i < node.attribute_size()) && + "mandatory num_scan_inputs attr not in onnx.Scan"); + buildOutputAndOperation(node, inputs, expectedNumOperands, + expectedNumResults, attributes, + /*givenOutputTypes=*/std::vector(), + /*num_use_inference_outputs=*/num_scan_inputs); +} + +template <> +void FrontendGenImpl::buildOperation(const onnx::NodeProto &node) { + std::array inVals = { + nullptr, + }; + + for (const auto &item : llvm::enumerate(node.input())) { + if (const Value *valuePtr = frontend_symbols_.GetByOnnxName(item.value())) { + inVals[item.index()] = *valuePtr; + } else { + assert(false && "Unknown input"); + } + } + + // Data input is imported but starts, ends, axes, and steps may come from + // attributes, and need to be created as constant ops. + const Type elementType = builder_.getIntegerType(64); + const auto attributes = ImportNodeAttributes(node); + for (auto attr : attributes) { + if (auto arrayAttr = attr.getValue().dyn_cast()) { + const auto tensorType = + RankedTensorType::get({(int64_t)arrayAttr.size()}, elementType); + auto constantDenseAttribute = + DenseElementsAttr::get(tensorType, arrayAttr.getValue()); + auto constantOp = builder_.create( + UnknownLoc(), Attribute(), constantDenseAttribute); + Value constantValue = constantOp.getOutput(); + + // Map from ONNX attributes to indices, which are + // matched with ONNXSliceOp::build ordering. + auto inputIdx = llvm::StringSwitch(attr.getName()) + .Case("starts", 1) + .Case("ends", 2) + .Case("axes", 3) + .Case("steps", 4) + .Default(-1); + if (inputIdx < 0) + continue; + assert(inVals[inputIdx] == nullptr && + "This input has already been filled in"); + inVals[inputIdx] = constantValue; + } + } + + assert(inVals[1] != nullptr && "Slice requires a starts attribute"); + assert(inVals[2] != nullptr && "Slice requires an ends attribute"); + inVals[3] = inVals[3] == nullptr ? createNoneValue() : inVals[3]; + inVals[4] = inVals[4] == nullptr ? createNoneValue() : inVals[4]; + + int nIn = ONNXSliceOp::getNumberOfOperands(); + int nOut = ONNXSliceOp::getNumberOfResults(); + const auto in = std::vector(inVals.begin(), inVals.end()); + + buildOutputAndOperation(node, in, nIn, nOut, attributes); +} + +template <> +void FrontendGenImpl::buildOperation( + const onnx::NodeProto &node) { + // If the input is I64, the output is string. + // If the input is string, the output is I64. + std::vector inputs; + int expectedNumOperands = ONNXCategoryMapperOp::getNumberOfOperands(); + int expectedNumResults = ONNXCategoryMapperOp::getNumberOfResults(); + getNodeInputs(node, inputs); + auto attributes = ImportNodeAttributes(node); + std::vector outputTypes; + auto inputType = inputs[0].getType().cast(); + if (inputType.getElementType().isInteger(64)) { + outputTypes.emplace_back(mlir::ONNXStringType::get(builder_.getContext())); + } else { + outputTypes.emplace_back(builder_.getIntegerType(64)); + } + buildOutputAndOperation(node, inputs, + expectedNumOperands, expectedNumResults, attributes, outputTypes); +} + +} // namespace bool ImportFrontendModelInternal(onnx::ModelProto &model, MLIRContext &context, OwningOpRef &module, ImportOptions options) { @@ -1378,8 +1377,7 @@ int ImportFrontendModelFile(StringRef model_fname, MLIRContext &context, void ImportFrontendModel(const onnx::ModelProto &model, MLIRContext &context, OwningOpRef &module, ImportOptions options) { - - detail::FrontendGenImpl myONNXGen(context); + FrontendGenImpl myONNXGen(context); module = myONNXGen.ImportONNXModel(model, options); } diff --git a/src/Builder/OpBuildTable.inc b/src/Builder/OpBuildTable.inc deleted file mode 100644 index f528af0345..0000000000 --- a/src/Builder/OpBuildTable.inc +++ /dev/null @@ -1,680 +0,0 @@ -//******************************************************** -// Do not modify this file directly. -// This file is automatically generated via script. -// Details can be found in docs/ImportONNXDefs.md . -//******************************************************** - -op_dialect_version_map_["Abs"] = {13}; -op_dialect_version_map_["Acos"] = {7}; -op_dialect_version_map_["Acosh"] = {9}; -op_dialect_version_map_["Adagrad"] = {1}; -op_dialect_version_map_["Adam"] = {1}; -op_dialect_version_map_["Add"] = {14}; -op_dialect_version_map_["And"] = {7}; -op_dialect_version_map_["ArgMax"] = {13}; -op_dialect_version_map_["ArgMin"] = {13}; -op_dialect_version_map_["ArrayFeatureExtractor"] = {1}; -op_dialect_version_map_["Asin"] = {7}; -op_dialect_version_map_["Asinh"] = {9}; -op_dialect_version_map_["Atan"] = {7}; -op_dialect_version_map_["Atanh"] = {9}; -op_dialect_version_map_["AveragePool"] = {11}; -op_dialect_version_map_["BatchNormalization"] = {15}; -op_dialect_version_map_["Bernoulli"] = {15}; -op_dialect_version_map_["Binarizer"] = {1}; -op_dialect_version_map_["BitShift"] = {11}; -op_dialect_version_map_["BitwiseAnd"] = {18}; -op_dialect_version_map_["BitwiseNot"] = {18}; -op_dialect_version_map_["BitwiseOr"] = {18}; -op_dialect_version_map_["BitwiseXor"] = {18}; -op_dialect_version_map_["BlackmanWindow"] = {17}; -op_dialect_version_map_["Cast"] = {13}; -op_dialect_version_map_["CastLike"] = {15}; -op_dialect_version_map_["CastMap"] = {1}; -op_dialect_version_map_["CategoryMapper"] = {1}; -op_dialect_version_map_["Ceil"] = {13}; -op_dialect_version_map_["Celu"] = {12}; -op_dialect_version_map_["CenterCropPad"] = {18}; -op_dialect_version_map_["Clip"] = {13, 12, 11, 6}; -op_dialect_version_map_["Compress"] = {11}; -op_dialect_version_map_["Concat"] = {13}; -op_dialect_version_map_["ConcatFromSequence"] = {11}; -op_dialect_version_map_["Constant"] = {13}; -op_dialect_version_map_["ConstantOfShape"] = {9}; -op_dialect_version_map_["Conv"] = {11}; -op_dialect_version_map_["ConvInteger"] = {10}; -op_dialect_version_map_["ConvTranspose"] = {11}; -op_dialect_version_map_["Cos"] = {7}; -op_dialect_version_map_["Cosh"] = {9}; -op_dialect_version_map_["Col2Im"] = {18}; -op_dialect_version_map_["CumSum"] = {14}; -op_dialect_version_map_["DepthToSpace"] = {13}; -op_dialect_version_map_["DequantizeLinear"] = {13}; -op_dialect_version_map_["Det"] = {11}; -op_dialect_version_map_["DFT"] = {17}; -op_dialect_version_map_["DictVectorizer"] = {1}; -op_dialect_version_map_["Div"] = {14}; -op_dialect_version_map_["Dropout"] = {13}; -op_dialect_version_map_["DynamicQuantizeLinear"] = {11}; -op_dialect_version_map_["Einsum"] = {12}; -op_dialect_version_map_["Elu"] = {6}; -op_dialect_version_map_["Equal"] = {13}; -op_dialect_version_map_["Erf"] = {13}; -op_dialect_version_map_["Exp"] = {13}; -op_dialect_version_map_["Expand"] = {13}; -op_dialect_version_map_["EyeLike"] = {9}; -op_dialect_version_map_["FeatureVectorizer"] = {1}; -op_dialect_version_map_["Flatten"] = {13}; -op_dialect_version_map_["Floor"] = {13}; -op_dialect_version_map_["GRU"] = {14}; -op_dialect_version_map_["Gather"] = {13}; -op_dialect_version_map_["GatherElements"] = {13}; -op_dialect_version_map_["GatherND"] = {13}; -op_dialect_version_map_["Gemm"] = {13}; -op_dialect_version_map_["GlobalAveragePool"] = {1}; -op_dialect_version_map_["GlobalLpPool"] = {2}; -op_dialect_version_map_["GlobalMaxPool"] = {1}; -op_dialect_version_map_["Gradient"] = {1}; -op_dialect_version_map_["Greater"] = {13}; -op_dialect_version_map_["GreaterOrEqual"] = {16}; -op_dialect_version_map_["GridSample"] = {16}; -op_dialect_version_map_["GroupNormalization"] = {18}; -op_dialect_version_map_["HammingWindow"] = {17}; -op_dialect_version_map_["HannWindow"] = {17}; -op_dialect_version_map_["HardSigmoid"] = {6}; -op_dialect_version_map_["Hardmax"] = {13}; -op_dialect_version_map_["HardSwish"] = {14}; -op_dialect_version_map_["Identity"] = {16}; -op_dialect_version_map_["If"] = {16}; -op_dialect_version_map_["Imputer"] = {1}; -op_dialect_version_map_["InstanceNormalization"] = {6}; -op_dialect_version_map_["IsInf"] = {10}; -op_dialect_version_map_["IsNaN"] = {13}; -op_dialect_version_map_["LayerNormalization"] = {17}; -op_dialect_version_map_["LRN"] = {13}; -op_dialect_version_map_["LSTM"] = {14}; -op_dialect_version_map_["LabelEncoder"] = {2}; -op_dialect_version_map_["LeakyRelu"] = {16}; -op_dialect_version_map_["Less"] = {13}; -op_dialect_version_map_["LessOrEqual"] = {16}; -op_dialect_version_map_["LinearClassifier"] = {1}; -op_dialect_version_map_["LinearRegressor"] = {1}; -op_dialect_version_map_["Log"] = {13}; -op_dialect_version_map_["LogSoftmax"] = {13}; -op_dialect_version_map_["Loop"] = {16}; -op_dialect_version_map_["LpNormalization"] = {1}; -op_dialect_version_map_["LpPool"] = {18}; -op_dialect_version_map_["MatMul"] = {13}; -op_dialect_version_map_["MatMulInteger"] = {10}; -op_dialect_version_map_["Max"] = {13}; -op_dialect_version_map_["MaxPool"] = {12}; -op_dialect_version_map_["MaxRoiPool"] = {1}; -op_dialect_version_map_["MaxUnpool"] = {11}; -op_dialect_version_map_["Mean"] = {13}; -op_dialect_version_map_["MeanVarianceNormalization"] = {13}; -op_dialect_version_map_["MelWeightMatrix"] = {17}; -op_dialect_version_map_["Min"] = {13}; -op_dialect_version_map_["Mish"] = {18}; -op_dialect_version_map_["Mod"] = {13}; -op_dialect_version_map_["Momentum"] = {1}; -op_dialect_version_map_["Mul"] = {14}; -op_dialect_version_map_["Multinomial"] = {7}; -op_dialect_version_map_["Neg"] = {13}; -op_dialect_version_map_["NegativeLogLikelihoodLoss"] = {13}; -op_dialect_version_map_["NonMaxSuppression"] = {11}; -op_dialect_version_map_["NonZero"] = {13}; -op_dialect_version_map_["Normalizer"] = {1}; -op_dialect_version_map_["Not"] = {1}; -op_dialect_version_map_["OneHot"] = {11}; -op_dialect_version_map_["OneHotEncoder"] = {1}; -op_dialect_version_map_["Optional"] = {15}; -op_dialect_version_map_["OptionalGetElement"] = {18}; -op_dialect_version_map_["OptionalHasElement"] = {18}; -op_dialect_version_map_["Or"] = {7}; -op_dialect_version_map_["PRelu"] = {16}; -op_dialect_version_map_["Pad"] = {18, 13, 11, 2}; -op_dialect_version_map_["Pow"] = {15}; -op_dialect_version_map_["QLinearConv"] = {10}; -op_dialect_version_map_["QLinearMatMul"] = {10}; -op_dialect_version_map_["QuantizeLinear"] = {13}; -op_dialect_version_map_["RNN"] = {14}; -op_dialect_version_map_["RandomNormal"] = {1}; -op_dialect_version_map_["RandomNormalLike"] = {1}; -op_dialect_version_map_["RandomUniform"] = {1}; -op_dialect_version_map_["RandomUniformLike"] = {1}; -op_dialect_version_map_["Range"] = {11}; -op_dialect_version_map_["Reciprocal"] = {13}; -op_dialect_version_map_["ReduceL1"] = {18, 13}; -op_dialect_version_map_["ReduceL2"] = {18, 13}; -op_dialect_version_map_["ReduceLogSum"] = {18, 13}; -op_dialect_version_map_["ReduceLogSumExp"] = {18, 13}; -op_dialect_version_map_["ReduceMax"] = {18, 13}; -op_dialect_version_map_["ReduceMean"] = {18, 13}; -op_dialect_version_map_["ReduceMin"] = {18, 13}; -op_dialect_version_map_["ReduceProd"] = {18, 13}; -op_dialect_version_map_["ReduceSum"] = {13, 11}; -op_dialect_version_map_["ReduceSumSquare"] = {18, 13}; -op_dialect_version_map_["Relu"] = {14}; -op_dialect_version_map_["Reshape"] = {14}; -op_dialect_version_map_["Resize"] = {18, 13, 11, 10}; -op_dialect_version_map_["ReverseSequence"] = {10}; -op_dialect_version_map_["RoiAlign"] = {16}; -op_dialect_version_map_["Round"] = {11}; -op_dialect_version_map_["SVMClassifier"] = {1}; -op_dialect_version_map_["SVMRegressor"] = {1}; -op_dialect_version_map_["Scaler"] = {1}; -op_dialect_version_map_["Scan"] = {16}; -op_dialect_version_map_["Scatter"] = {11}; -op_dialect_version_map_["ScatterElements"] = {18}; -op_dialect_version_map_["ScatterND"] = {18}; -op_dialect_version_map_["Selu"] = {6}; -op_dialect_version_map_["SequenceAt"] = {11}; -op_dialect_version_map_["SequenceConstruct"] = {11}; -op_dialect_version_map_["SequenceEmpty"] = {11}; -op_dialect_version_map_["SequenceErase"] = {11}; -op_dialect_version_map_["SequenceInsert"] = {11}; -op_dialect_version_map_["SequenceLength"] = {11}; -op_dialect_version_map_["SequenceMap"] = {17}; -op_dialect_version_map_["Shape"] = {15}; -op_dialect_version_map_["Shrink"] = {9}; -op_dialect_version_map_["Sigmoid"] = {13}; -op_dialect_version_map_["Sign"] = {13}; -op_dialect_version_map_["Sin"] = {7}; -op_dialect_version_map_["Sinh"] = {9}; -op_dialect_version_map_["Size"] = {13}; -op_dialect_version_map_["Slice"] = {13}; -op_dialect_version_map_["Softmax"] = {13, 11}; -op_dialect_version_map_["SoftmaxCrossEntropyLoss"] = {13}; -op_dialect_version_map_["Softplus"] = {1}; -op_dialect_version_map_["Softsign"] = {1}; -op_dialect_version_map_["SpaceToDepth"] = {13}; -op_dialect_version_map_["Split"] = {18, 13, 11}; -op_dialect_version_map_["SplitToSequence"] = {11}; -op_dialect_version_map_["Sqrt"] = {13}; -op_dialect_version_map_["Squeeze"] = {13, 11}; -op_dialect_version_map_["StringNormalizer"] = {10}; -op_dialect_version_map_["STFT"] = {17}; -op_dialect_version_map_["Sub"] = {14}; -op_dialect_version_map_["Sum"] = {13}; -op_dialect_version_map_["Tan"] = {7}; -op_dialect_version_map_["Tanh"] = {13}; -op_dialect_version_map_["TfIdfVectorizer"] = {9}; -op_dialect_version_map_["ThresholdedRelu"] = {10}; -op_dialect_version_map_["Tile"] = {13}; -op_dialect_version_map_["TopK"] = {11}; -op_dialect_version_map_["Transpose"] = {13}; -op_dialect_version_map_["Trilu"] = {14}; -op_dialect_version_map_["TreeEnsembleClassifier"] = {1}; -op_dialect_version_map_["TreeEnsembleRegressor"] = {1}; -op_dialect_version_map_["Unique"] = {11}; -op_dialect_version_map_["Unsqueeze"] = {13, 11}; -op_dialect_version_map_["Upsample"] = {9, 7}; -op_dialect_version_map_["Where"] = {16}; -op_dialect_version_map_["Xor"] = {7}; -op_dialect_version_map_["ZipMap"] = {1}; -import_handler_map_["Abs"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Acos"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Acosh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Add"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["And"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ArgMax"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ArgMin"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Asin"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Asinh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Atan"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Atanh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["AveragePool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BatchNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodeBatchNormalization; -import_handler_map_["Bernoulli"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BitShift"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BitwiseAnd"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BitwiseNot"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BitwiseOr"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BitwiseXor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["BlackmanWindow"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Cast"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodeCast; -import_handler_map_["CastLike"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Ceil"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Celu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["CenterCropPad"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Clip"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ClipV12"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ClipV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ClipV6"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Col2Im"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Compress"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Concat"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ConcatFromSequence"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Constant"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ConstantOfShape"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Conv"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ConvInteger"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ConvTranspose"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Cos"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Cosh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["CumSum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["DFT"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["DepthToSpace"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["DequantizeLinear"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Det"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Div"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Dropout"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodeDropout; -import_handler_map_["DynamicQuantizeLinear"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Einsum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Elu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Equal"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Erf"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Exp"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Expand"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["EyeLike"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Flatten"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Floor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GRU"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Gather"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GatherElements"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GatherND"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Gemm"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GlobalAveragePool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GlobalLpPool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GlobalMaxPool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Greater"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GreaterOrEqual"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GridSample"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["GroupNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["HammingWindow"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["HannWindow"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["HardSigmoid"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["HardSwish"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Hardmax"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Identity"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["If"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["InstanceNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["IsInf"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["IsNaN"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LRN"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LSTM"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LayerNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LeakyRelu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Less"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LessOrEqual"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Log"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LogSoftmax"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Loop"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LpNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LpPool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MatMul"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MatMulInteger"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Max"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MaxPool"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodeMaxPool; -import_handler_map_["MaxRoiPool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MaxUnpool"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Mean"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MeanVarianceNormalization"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["MelWeightMatrix"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Min"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Mish"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Mod"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Mul"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Multinomial"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Neg"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["NegativeLogLikelihoodLoss"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["NonMaxSuppression"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["NonZero"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Not"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["OneHot"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Optional"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["OptionalGetElement"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["OptionalHasElement"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Or"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["PRelu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Pad"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodePad; -import_handler_map_["PadV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["PadV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["PadV2"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Pow"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["QLinearConv"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["QLinearMatMul"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["QuantizeLinear"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RNN"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RandomNormal"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RandomNormalLike"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RandomUniform"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RandomUniformLike"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Range"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Reciprocal"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceL1"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceL1V13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceL2"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceL2V13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceLogSum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceLogSumV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceLogSumExp"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceLogSumExpV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMax"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMaxV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMean"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMeanV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMin"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceMinV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceProd"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceProdV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceSum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceSumV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceSumSquare"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReduceSumSquareV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Relu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Reshape"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Resize"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ResizeV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ResizeV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ResizeV10"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ReverseSequence"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["RoiAlign"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Round"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["STFT"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Scan"] = - &onnx_mlir::detail::FrontendGenImpl::ImportScan; -import_handler_map_["Scatter"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ScatterElements"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ScatterND"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Selu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceAt"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceConstruct"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceEmpty"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceErase"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceInsert"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceLength"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SequenceMap"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Shape"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Shrink"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sigmoid"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sign"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sin"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sinh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Size"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Slice"] = - &onnx_mlir::detail::FrontendGenImpl::ImportNodeSlice; -import_handler_map_["Softmax"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SoftmaxV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SoftmaxCrossEntropyLoss"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Softplus"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Softsign"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SpaceToDepth"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Split"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SplitV13"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SplitV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SplitToSequence"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sqrt"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Squeeze"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SqueezeV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["StringNormalizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sub"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Sum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Tan"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Tanh"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["TfIdfVectorizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ThresholdedRelu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Tile"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["TopK"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Transpose"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Trilu"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Unique"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Unsqueeze"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["UnsqueezeV11"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Upsample"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["UpsampleV7"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Where"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Xor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ArrayFeatureExtractor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Binarizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["CastMap"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["CategoryMapper"] = - &onnx_mlir::detail::FrontendGenImpl::ImportCategoryMapper; -import_handler_map_["DictVectorizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["FeatureVectorizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Imputer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LabelEncoder"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LinearClassifier"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["LinearRegressor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Normalizer"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["OneHotEncoder"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SVMClassifier"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["SVMRegressor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Scaler"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["TreeEnsembleClassifier"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["TreeEnsembleRegressor"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["ZipMap"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Adagrad"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Adam"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Gradient"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; -import_handler_map_["Momentum"] = - &onnx_mlir::detail::FrontendGenImpl::buildOperation; diff --git a/src/Builder/OpBuildTable.inc.dc.in b/src/Builder/OpBuildTable.inc.dc.in deleted file mode 100644 index 5fb17da9bd..0000000000 --- a/src/Builder/OpBuildTable.inc.dc.in +++ /dev/null @@ -1 +0,0 @@ -file-same-as-stdout({"file": "@CMAKE_CURRENT_SOURCE_DIR@/OpBuildTable.inc", "cmd": ["@Python3_EXECUTABLE@", "@PROJECT_SOURCE_DIR@/utils/gen_onnx_mlir.py", "--dry-run-op-build-table"]}) diff --git a/src/Dialect/ONNX/AdditionalONNXOps.td b/src/Dialect/ONNX/AdditionalONNXOps.td index fb7e75fe3a..514398fec5 100644 --- a/src/Dialect/ONNX/AdditionalONNXOps.td +++ b/src/Dialect/ONNX/AdditionalONNXOps.td @@ -18,7 +18,7 @@ // of original ONNX ops. These ops may possibly be removed in the future. // Example are ONNXBatchNormalizationInferenceModeOp, ... // -// After changes that impact ONNX, run "make OMONNXOpsIncTranslation". +// After changes that impact ONNX, run "make OMONNXOpsTableGenIncGen". // After changes that impact the documentation of the ops, run // "make onnx-mlir-docs". // diff --git a/src/Dialect/ONNX/ONNX.td b/src/Dialect/ONNX/ONNX.td index ce1b196d29..ee3885e639 100644 --- a/src/Dialect/ONNX/ONNX.td +++ b/src/Dialect/ONNX/ONNX.td @@ -10,7 +10,7 @@ // // When adding non-standard ONNX ops, please add them in AdditionalONNXOps.td. // -// After changes that impact ONNX, run "make OMONNXOpsIncTranslation". +// After changes that impact ONNX, run "make OMONNXOpsTableGenIncGen". // After changes that impact the documentation of the ops, run // "make onnx-mlir-docs". // @@ -23,10 +23,12 @@ include "mlir/IR/AttrTypeBase.td" include "mlir/IR/OpBase.td" include "mlir/IR/PatternBase.td" include "mlir/Interfaces/ControlFlowInterfaces.td" +include "mlir/Interfaces/SideEffectInterfaces.td" +include "src/Interface/HasOnnxSubgraphOpInterface.td" +include "src/Interface/ONNXOperationTrait.td" +include "src/Interface/ResultTypeInferenceOpInterface.td" include "src/Interface/ShapeHelperOpInterface.td" include "src/Interface/ShapeInferenceOpInterface.td" -include "src/Interface/ResultTypeInferenceOpInterface.td" -include "src/Interface/HasOnnxSubgraphOpInterface.td" include "src/IR/AttrBase.td" //===----------------------------------------------------------------------===// @@ -248,7 +250,6 @@ class ONNX_Op traits = []> : // 4. type of string, complex64 and complex128 for input/output are ignored // 5. unsigned int are treated as signed one -include "mlir/Interfaces/SideEffectInterfaces.td" include "src/Dialect/ONNX/ONNXOps.td.inc" include "src/Dialect/ONNX/AdditionalONNXOps.td" diff --git a/src/Dialect/ONNX/ONNXOps.hpp b/src/Dialect/ONNX/ONNXOps.hpp index bb99083b43..43b8f9754f 100644 --- a/src/Dialect/ONNX/ONNXOps.hpp +++ b/src/Dialect/ONNX/ONNXOps.hpp @@ -15,18 +15,45 @@ #pragma once #include "mlir/Dialect/Func/IR/FuncOps.h" +#include "mlir/IR/OpDefinition.h" #include "src/Dialect/ONNX/ONNXAttributes.hpp" #include "src/Dialect/ONNX/ONNXDialect.hpp" #include "src/Dialect/ONNX/ONNXOps/ShapeHelper.hpp" #include "src/Dialect/ONNX/ONNXTypes.hpp" #include "src/Interface/HasOnnxSubgraphOpInterface.hpp" +#include "src/Interface/ONNXOperationTrait.hpp" #include "src/Interface/ResultTypeInferenceOpInterface.hpp" #include "src/Interface/ShapeInferenceOpInterface.hpp" +#include + +#define GET_OP_CLASSES +#include "src/Dialect/ONNX/ONNXOps.hpp.inc" + namespace mlir { // OpSet level supported by onnx-mlir static constexpr int CURRENT_ONNX_OPSET = 17; -} // end namespace mlir -#define GET_OP_CLASSES -#include "src/Dialect/ONNX/ONNXOps.hpp.inc" +namespace detail { +using ONNXOpsT = std::variant< +#define GET_OP_LIST +#include "src/Dialect/ONNX/ONNXOps.cpp.inc" + >; + +template +inline void foreachONNXOpImpl(Action &&act, std::variant) { + (act(Ts()), ...); +} +} // namespace detail + +template +struct OpTypeToken { + using OpType = OP; +}; + +template +inline void foreachONNXOp(Action &&act) { + return detail::foreachONNXOpImpl(act, detail::ONNXOpsT()); +} + +} // end namespace mlir diff --git a/src/Dialect/ONNX/ONNXOps.td.inc b/src/Dialect/ONNX/ONNXOps.td.inc index c3e58f795b..a946fa1cc6 100644 --- a/src/Dialect/ONNX/ONNXOps.td.inc +++ b/src/Dialect/ONNX/ONNXOps.td.inc @@ -5,7 +5,7 @@ //******************************************************** def ONNXAbsOp:ONNX_Op<"Abs", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Abs operation"; let description = [{ Absolute takes one input data (Tensor) and produces one output data @@ -23,30 +23,33 @@ def ONNXAbsOp:ONNX_Op<"Abs", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 1; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXAbsOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Abs"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 1; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXAbsOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXAcosOp:ONNX_Op<"Acos", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Acos operation"; let description = [{ Calculates the arccosine (inverse of cosine) of the given input tensor, element-wise. @@ -54,6 +57,9 @@ def ONNXAcosOp:ONNX_Op<"Acos", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Acos"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; static int getNumberOfOperands() { return 1; } @@ -75,7 +81,7 @@ def ONNXAcosOp:ONNX_Op<"Acos", } def ONNXAcoshOp:ONNX_Op<"Acosh", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Acosh operation"; let description = [{ Calculates the hyperbolic arccosine of the given input tensor element-wise. @@ -83,6 +89,9 @@ def ONNXAcoshOp:ONNX_Op<"Acosh", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Acosh"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 9; static int getNumberOfOperands() { return 1; } @@ -104,8 +113,7 @@ def ONNXAcoshOp:ONNX_Op<"Acosh", } def ONNXAddOp:ONNX_Op<"Add", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Add operation"; let description = [{ Performs element-wise binary addition (with Numpy-style broadcasting support). @@ -136,31 +144,35 @@ def ONNXAddOp:ONNX_Op<"Add", resultType = UnrankedTensorType::get(lhsTy.cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXAddOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Add"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXAddOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; + let hasCanonicalizer = 1; } def ONNXAndOp:ONNX_Op<"And", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX And operation"; let description = [{ Returns the tensor resulted from performing the `and` logical operation @@ -190,31 +202,34 @@ def ONNXAndOp:ONNX_Op<"And", resultType = UnrankedTensorType::get(lhsTy.cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {9}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXAndOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "And"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {9}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXAndOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; } def ONNXArgMaxOp:ONNX_Op<"ArgMax", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ArgMax operation"; let description = [{ Computes the indices of the max elements of the input tensor's element along the @@ -231,6 +246,9 @@ def ONNXArgMaxOp:ONNX_Op<"ArgMax", DefaultValuedAttr:$select_last_index); let results = (outs TensorOf<[I64]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ArgMax"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -253,7 +271,7 @@ def ONNXArgMaxOp:ONNX_Op<"ArgMax", } def ONNXArgMinOp:ONNX_Op<"ArgMin", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ArgMin operation"; let description = [{ Computes the indices of the min elements of the input tensor's element along the @@ -270,6 +288,9 @@ def ONNXArgMinOp:ONNX_Op<"ArgMin", DefaultValuedAttr:$select_last_index); let results = (outs TensorOf<[I64]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ArgMin"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -292,7 +313,7 @@ def ONNXArgMinOp:ONNX_Op<"ArgMin", } def ONNXAsinOp:ONNX_Op<"Asin", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Asin operation"; let description = [{ Calculates the arcsine (inverse of sine) of the given input tensor, element-wise. @@ -300,6 +321,9 @@ def ONNXAsinOp:ONNX_Op<"Asin", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Asin"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; static int getNumberOfOperands() { return 1; } @@ -321,7 +345,7 @@ def ONNXAsinOp:ONNX_Op<"Asin", } def ONNXAsinhOp:ONNX_Op<"Asinh", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Asinh operation"; let description = [{ Calculates the hyperbolic arcsine of the given input tensor element-wise. @@ -329,6 +353,9 @@ def ONNXAsinhOp:ONNX_Op<"Asinh", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Asinh"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 9; static int getNumberOfOperands() { return 1; } @@ -350,7 +377,7 @@ def ONNXAsinhOp:ONNX_Op<"Asinh", } def ONNXAtanOp:ONNX_Op<"Atan", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Atan operation"; let description = [{ Calculates the arctangent (inverse of tangent) of the given input tensor, element-wise. @@ -358,6 +385,9 @@ def ONNXAtanOp:ONNX_Op<"Atan", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Atan"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; static int getNumberOfOperands() { return 1; } @@ -379,7 +409,7 @@ def ONNXAtanOp:ONNX_Op<"Atan", } def ONNXAtanhOp:ONNX_Op<"Atanh", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Atanh operation"; let description = [{ Calculates the hyperbolic arctangent of the given input tensor element-wise. @@ -387,6 +417,9 @@ def ONNXAtanhOp:ONNX_Op<"Atanh", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Atanh"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 9; static int getNumberOfOperands() { return 1; } @@ -408,7 +441,7 @@ def ONNXAtanhOp:ONNX_Op<"Atanh", } def ONNXAveragePoolOp:ONNX_Op<"AveragePool", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX AveragePool operation"; let description = [{ AveragePool consumes an input tensor X and applies average pooling across @@ -450,6 +483,9 @@ def ONNXAveragePoolOp:ONNX_Op<"AveragePool", OptionalAttr:$strides); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "AveragePool"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 1; } @@ -472,7 +508,7 @@ def ONNXAveragePoolOp:ONNX_Op<"AveragePool", } def ONNXBatchNormalizationOp:ONNX_Op<"BatchNormalization", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX BatchNormalization operation"; let description = [{ Carries out batch normalization as described in the paper @@ -529,6 +565,9 @@ def ONNXBatchNormalizationOp:ONNX_Op<"BatchNormalization", AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>, NoneType]>:$running_mean, AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>, NoneType]>:$running_var); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "BatchNormalization"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 15; static int getNumberOfOperands() { return 5; } @@ -550,7 +589,7 @@ def ONNXBatchNormalizationOp:ONNX_Op<"BatchNormalization", } def ONNXBernoulliOp:ONNX_Op<"Bernoulli", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Bernoulli operation"; let description = [{ Draws binary random numbers (0 or 1) from a Bernoulli distribution. The input tensor should be a tensor @@ -565,6 +604,9 @@ def ONNXBernoulliOp:ONNX_Op<"Bernoulli", OptionalAttr:$seed); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[I1]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Bernoulli"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 15; static int getNumberOfOperands() { return 1; } @@ -586,7 +628,7 @@ def ONNXBernoulliOp:ONNX_Op<"Bernoulli", } def ONNXBitShiftOp:ONNX_Op<"BitShift", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX BitShift operation"; let description = [{ Bitwise shift operator performs element-wise operation. For each input element, if the @@ -607,6 +649,9 @@ def ONNXBitShiftOp:ONNX_Op<"BitShift", StrAttr:$direction); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>]>:$Z); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "BitShift"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 2; } @@ -629,7 +674,7 @@ def ONNXBitShiftOp:ONNX_Op<"BitShift", } def ONNXBitwiseAndOp:ONNX_Op<"BitwiseAnd", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX BitwiseAnd operation"; let description = [{ Returns the tensor resulting from performing the bitwise `and` operation @@ -641,6 +686,9 @@ def ONNXBitwiseAndOp:ONNX_Op<"BitwiseAnd", AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$B); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$C); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "BitwiseAnd"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 2; } @@ -663,7 +711,7 @@ def ONNXBitwiseAndOp:ONNX_Op<"BitwiseAnd", } def ONNXBitwiseNotOp:ONNX_Op<"BitwiseNot", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX BitwiseNot operation"; let description = [{ Returns the bitwise not of the input tensor element-wise. @@ -671,6 +719,9 @@ def ONNXBitwiseNotOp:ONNX_Op<"BitwiseNot", let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "BitwiseNot"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 1; } @@ -692,7 +743,7 @@ def ONNXBitwiseNotOp:ONNX_Op<"BitwiseNot", } def ONNXBitwiseOrOp:ONNX_Op<"BitwiseOr", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX BitwiseOr operation"; let description = [{ Returns the tensor resulting from performing the bitwise `or` operation @@ -704,6 +755,9 @@ def ONNXBitwiseOrOp:ONNX_Op<"BitwiseOr", AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$B); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$C); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "BitwiseOr"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 2; } @@ -726,7 +780,7 @@ def ONNXBitwiseOrOp:ONNX_Op<"BitwiseOr", } def ONNXBitwiseXorOp:ONNX_Op<"BitwiseXor", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX BitwiseXor operation"; let description = [{ Returns the tensor resulting from performing the bitwise `xor` operation @@ -738,6 +792,9 @@ def ONNXBitwiseXorOp:ONNX_Op<"BitwiseXor", AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$B); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$C); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "BitwiseXor"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 2; } @@ -760,7 +817,7 @@ def ONNXBitwiseXorOp:ONNX_Op<"BitwiseXor", } def ONNXBlackmanWindowOp:ONNX_Op<"BlackmanWindow", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX BlackmanWindow operation"; let description = [{ Generates a Blackman window as described in the paper https://ieeexplore.ieee.org/document/1455106. @@ -770,6 +827,9 @@ def ONNXBlackmanWindowOp:ONNX_Op<"BlackmanWindow", DefaultValuedAttr:$periodic); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "BlackmanWindow"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 17; static int getNumberOfOperands() { return 1; } @@ -791,8 +851,7 @@ def ONNXBlackmanWindowOp:ONNX_Op<"BlackmanWindow", } def ONNXCastOp:ONNX_Op<"Cast", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"ResultTypeInferenceOpInterface">]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"ResultTypeInferenceOpInterface">]> { let summary = "ONNX Cast operation"; let description = [{ The operator casts the elements of a given input tensor to a data type @@ -835,6 +894,9 @@ def ONNXCastOp:ONNX_Op<"Cast", TypeAttr:$to); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>, TensorOf<[StringType]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Cast"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -858,16 +920,17 @@ def ONNXCastOp:ONNX_Op<"Cast", return sh; } }]; - let builders = [ - OpBuilder<(ins "Value":$input, "TypeAttr":$to), [{ - auto resultType = mlir::UnrankedTensorType::get(to.getValue()); - build($_builder, $_state, resultType, input, to); - }] > + let builders = [ + OpBuilder<(ins "Value":$input, "TypeAttr":$to), [{ + auto resultType = mlir::UnrankedTensorType::get(to.getValue()); + build($_builder, $_state, resultType, input, to); + }]> ]; + let hasCanonicalizer = 1; } def ONNXCastLikeOp:ONNX_Op<"CastLike", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX CastLike operation"; let description = [{ The operator casts the elements of a given input tensor (the first input) to @@ -878,6 +941,9 @@ def ONNXCastLikeOp:ONNX_Op<"CastLike", AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>, TensorOf<[StringType]>, TensorOf<[BF16]>]>:$target_type); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>, TensorOf<[StringType]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "CastLike"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 15; static int getNumberOfOperands() { return 2; } @@ -899,7 +965,7 @@ def ONNXCastLikeOp:ONNX_Op<"CastLike", } def ONNXCeilOp:ONNX_Op<"Ceil", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Ceil operation"; let description = [{ Ceil takes one input data (Tensor) and produces one output data @@ -909,6 +975,9 @@ def ONNXCeilOp:ONNX_Op<"Ceil", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Ceil"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -930,7 +999,7 @@ def ONNXCeilOp:ONNX_Op<"Ceil", } def ONNXCeluOp:ONNX_Op<"Celu", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Celu operation"; let description = [{ Continuously Differentiable Exponential Linear Units: @@ -945,6 +1014,9 @@ def ONNXCeluOp:ONNX_Op<"Celu", DefaultValuedAttr:$alpha); let results = (outs TensorOf<[F32]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Celu"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 12; static int getNumberOfOperands() { return 1; } @@ -966,7 +1038,7 @@ def ONNXCeluOp:ONNX_Op<"Celu", } def ONNXCenterCropPadOp:ONNX_Op<"CenterCropPad", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX CenterCropPad operation"; let description = [{ Center crop or pad an input to given dimensions. @@ -983,6 +1055,9 @@ def ONNXCenterCropPadOp:ONNX_Op<"CenterCropPad", OptionalAttr:$axes); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output_data); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "CenterCropPad"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 2; } @@ -1004,7 +1079,7 @@ def ONNXCenterCropPadOp:ONNX_Op<"CenterCropPad", } def ONNXClipOp:ONNX_Op<"Clip", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Clip operation"; let description = [{ Clip operator limits the given input within an interval. The interval is @@ -1016,6 +1091,9 @@ def ONNXClipOp:ONNX_Op<"Clip", AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>, NoneType]>:$max); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Clip"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 3; } @@ -1037,7 +1115,7 @@ def ONNXClipOp:ONNX_Op<"Clip", } def ONNXClipV12Op:ONNX_Op<"ClipV12", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Clip operation"; let description = [{ Clip operator limits the given input within an interval. The interval is @@ -1049,6 +1127,9 @@ def ONNXClipV12Op:ONNX_Op<"ClipV12", AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$max); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Clip"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 12; static int getNumberOfOperands() { return 3; } @@ -1070,7 +1151,7 @@ def ONNXClipV12Op:ONNX_Op<"ClipV12", } def ONNXClipV11Op:ONNX_Op<"ClipV11", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Clip operation"; let description = [{ Clip operator limits the given input within an interval. The interval is @@ -1082,6 +1163,9 @@ def ONNXClipV11Op:ONNX_Op<"ClipV11", AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$max); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Clip"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 3; } @@ -1103,7 +1187,7 @@ def ONNXClipV11Op:ONNX_Op<"ClipV11", } def ONNXClipV6Op:ONNX_Op<"ClipV6", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Clip operation"; let description = [{ Clip operator limits the given input within an interval. The interval is @@ -1115,6 +1199,9 @@ def ONNXClipV6Op:ONNX_Op<"ClipV6", DefaultValuedAttr:$min); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Clip"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 6; static int getNumberOfOperands() { return 1; } @@ -1136,7 +1223,7 @@ def ONNXClipV6Op:ONNX_Op<"ClipV6", } def ONNXCol2ImOp:ONNX_Op<"Col2Im", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Col2Im operation"; let description = [{ The operator rearranges column blocks back into a multidimensional image @@ -1158,6 +1245,9 @@ def ONNXCol2ImOp:ONNX_Op<"Col2Im", OptionalAttr:$strides); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Col2Im"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 3; } @@ -1179,7 +1269,7 @@ def ONNXCol2ImOp:ONNX_Op<"Col2Im", } def ONNXCompressOp:ONNX_Op<"Compress", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Compress operation"; let description = [{ Selects slices from an input tensor along a given axis where condition evaluates to True for each axis index. @@ -1192,6 +1282,9 @@ def ONNXCompressOp:ONNX_Op<"Compress", OptionalAttr:$axis); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Compress"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 2; } @@ -1214,7 +1307,7 @@ def ONNXCompressOp:ONNX_Op<"Compress", } def ONNXConcatOp:ONNX_Op<"Concat", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Concat operation"; let description = [{ Concatenate a list of tensors into a single tensor. All input tensors must have the same shape, except for the dimension size of the axis to concatenate on. @@ -1223,6 +1316,9 @@ def ONNXConcatOp:ONNX_Op<"Concat", SI64Attr:$axis); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$concat_result); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Concat"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return -1; } @@ -1245,7 +1341,7 @@ def ONNXConcatOp:ONNX_Op<"Concat", } def ONNXConcatFromSequenceOp:ONNX_Op<"ConcatFromSequence", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ConcatFromSequence operation"; let description = [{ Concatenate a sequence of tensors into a single tensor. @@ -1258,6 +1354,9 @@ def ONNXConcatFromSequenceOp:ONNX_Op<"ConcatFromSequence", DefaultValuedAttr:$new_axis); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$concat_result); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ConcatFromSequence"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 1; } @@ -1280,9 +1379,7 @@ def ONNXConcatFromSequenceOp:ONNX_Op<"ConcatFromSequence", } def ONNXConstantOp:ONNX_Op<"Constant", - [Pure, ConstantLike, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"ResultTypeInferenceOpInterface">]> { - let hasCustomAssemblyFormat = 1; - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, ConstantLike, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"ResultTypeInferenceOpInterface">]> { let summary = "ONNX Constant operation"; let description = [{ This operator produces a constant tensor. Exactly one of the provided attributes, either value, sparse_value, @@ -1298,6 +1395,9 @@ def ONNXConstantOp:ONNX_Op<"Constant", OptionalAttr:$value_strings); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Constant"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 0; } @@ -1343,24 +1443,25 @@ def ONNXConstantOp:ONNX_Op<"Constant", } }]; let builders = [ - OpBuilder<(ins "Attribute":$sparse_value, "Attribute":$value), [{ - if (value) { - auto tensorType = value.cast().getType(); - build($_builder, $_state, tensorType, sparse_value, value, - FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr()); - } else { - auto tensorType = sparse_value.cast().getType(); - build($_builder, $_state, tensorType, sparse_value, value, - FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr()); - } - }]> + OpBuilder<(ins "Attribute":$sparse_value, "Attribute":$value), [{ + if (value) { + auto tensorType = value.cast().getType(); + build($_builder, $_state, tensorType, sparse_value, value, + FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr()); + } else { + auto tensorType = sparse_value.cast().getType(); + build($_builder, $_state, tensorType, sparse_value, value, + FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr()); + } + }]> ]; let hasFolder = 1; + let hasCanonicalizer = 1; + let hasCustomAssemblyFormat = 1; } def ONNXConstantOfShapeOp:ONNX_Op<"ConstantOfShape", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"ResultTypeInferenceOpInterface">]> { - let hasCustomAssemblyFormat = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"ResultTypeInferenceOpInterface">]> { let summary = "ONNX ConstantOfShape operation"; let description = [{ Generate a tensor with given value and shape. @@ -1369,6 +1470,9 @@ def ONNXConstantOfShapeOp:ONNX_Op<"ConstantOfShape", OptionalAttr:$value); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ConstantOfShape"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 9; static int getNumberOfOperands() { return 1; } @@ -1399,10 +1503,11 @@ def ONNXConstantOfShapeOp:ONNX_Op<"ConstantOfShape", } }]; let hasVerifier = 1; + let hasCustomAssemblyFormat = 1; } def ONNXConvOp:ONNX_Op<"Conv", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Conv operation"; let description = [{ The convolution operator consumes an input tensor and a filter, and @@ -1427,31 +1532,34 @@ def ONNXConvOp:ONNX_Op<"Conv", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 3; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXConvOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Conv"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; + static int getNumberOfOperands() { + return 3; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXConvOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; } def ONNXConvIntegerOp:ONNX_Op<"ConvInteger", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ConvInteger operation"; let description = [{ The integer convolution operator consumes an input tensor, its zero-point, a filter, and its zero-point, @@ -1469,6 +1577,9 @@ def ONNXConvIntegerOp:ONNX_Op<"ConvInteger", OptionalAttr:$strides); let results = (outs TensorOf<[I32]>:$y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ConvInteger"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 10; static int getNumberOfOperands() { return 4; } @@ -1490,7 +1601,7 @@ def ONNXConvIntegerOp:ONNX_Op<"ConvInteger", } def ONNXConvTransposeOp:ONNX_Op<"ConvTranspose", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ConvTranspose operation"; let description = [{ The convolution transpose operator consumes an input tensor and a filter, @@ -1521,6 +1632,9 @@ def ONNXConvTransposeOp:ONNX_Op<"ConvTranspose", OptionalAttr:$strides); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ConvTranspose"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 3; } @@ -1543,7 +1657,7 @@ def ONNXConvTransposeOp:ONNX_Op<"ConvTranspose", } def ONNXCosOp:ONNX_Op<"Cos", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Cos operation"; let description = [{ Calculates the cosine of the given input tensor, element-wise. @@ -1551,6 +1665,9 @@ def ONNXCosOp:ONNX_Op<"Cos", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Cos"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; static int getNumberOfOperands() { return 1; } @@ -1572,7 +1689,7 @@ def ONNXCosOp:ONNX_Op<"Cos", } def ONNXCoshOp:ONNX_Op<"Cosh", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Cosh operation"; let description = [{ Calculates the hyperbolic cosine of the given input tensor element-wise. @@ -1580,6 +1697,9 @@ def ONNXCoshOp:ONNX_Op<"Cosh", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Cosh"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 9; static int getNumberOfOperands() { return 1; } @@ -1601,7 +1721,7 @@ def ONNXCoshOp:ONNX_Op<"Cosh", } def ONNXCumSumOp:ONNX_Op<"CumSum", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX CumSum operation"; let description = [{ Performs cumulative sum of the input elements along the given axis. @@ -1631,6 +1751,9 @@ def ONNXCumSumOp:ONNX_Op<"CumSum", DefaultValuedAttr:$reverse); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "CumSum"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; static int getNumberOfOperands() { return 2; } @@ -1652,7 +1775,7 @@ def ONNXCumSumOp:ONNX_Op<"CumSum", } def ONNXDFTOp:ONNX_Op<"DFT", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX DFT operation"; let description = [{ Computes the discrete Fourier transform of input. @@ -1664,6 +1787,9 @@ def ONNXDFTOp:ONNX_Op<"DFT", DefaultValuedAttr:$onesided); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "DFT"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 17; static int getNumberOfOperands() { return 2; } @@ -1685,8 +1811,7 @@ def ONNXDFTOp:ONNX_Op<"DFT", } def ONNXDepthToSpaceOp:ONNX_Op<"DepthToSpace", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX DepthToSpace operation"; let description = [{ DepthToSpace rearranges (permutes) data from depth into blocks of spatial data. @@ -1722,6 +1847,9 @@ def ONNXDepthToSpaceOp:ONNX_Op<"DepthToSpace", DefaultValuedStrAttr:$mode); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "DepthToSpace"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -1741,10 +1869,11 @@ def ONNXDepthToSpaceOp:ONNX_Op<"DepthToSpace", } }]; let hasVerifier = 1; + let hasCanonicalizer = 1; } def ONNXDequantizeLinearOp:ONNX_Op<"DequantizeLinear", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX DequantizeLinear operation"; let description = [{ The linear dequantization operator. It consumes a quantized tensor, a scale, and a zero point to compute the full precision tensor. @@ -1759,6 +1888,9 @@ def ONNXDequantizeLinearOp:ONNX_Op<"DequantizeLinear", DefaultValuedAttr:$axis); let results = (outs TensorOf<[F32]>:$y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "DequantizeLinear"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 3; } @@ -1781,7 +1913,7 @@ def ONNXDequantizeLinearOp:ONNX_Op<"DequantizeLinear", } def ONNXDetOp:ONNX_Op<"Det", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Det operation"; let description = [{ Det calculates determinant of a square matrix or batches of square matrices. @@ -1793,6 +1925,9 @@ def ONNXDetOp:ONNX_Op<"Det", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Det"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 1; } @@ -1814,7 +1949,7 @@ def ONNXDetOp:ONNX_Op<"Det", } def ONNXDivOp:ONNX_Op<"Div", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Div operation"; let description = [{ Performs element-wise binary division (with Numpy-style broadcasting support). @@ -1845,32 +1980,34 @@ def ONNXDivOp:ONNX_Op<"Div", resultType = UnrankedTensorType::get(lhsTy.cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXDivOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Div"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXDivOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; } def ONNXDropoutOp:ONNX_Op<"Dropout", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Dropout operation"; let description = [{ Dropout takes an input floating-point tensor, an optional input ratio (floating-point scalar) and an optional input training_mode (boolean scalar). It produces two tensor outputs, @@ -1893,6 +2030,9 @@ def ONNXDropoutOp:ONNX_Op<"Dropout", let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output, AnyTypeOf<[TensorOf<[I1]>, NoneType]>:$mask); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Dropout"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 3; } @@ -1911,10 +2051,11 @@ def ONNXDropoutOp:ONNX_Op<"Dropout", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXDynamicQuantizeLinearOp:ONNX_Op<"DynamicQuantizeLinear", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX DynamicQuantizeLinear operation"; let description = [{ A Function to fuse calculation for Scale, Zero Point and FP32->8Bit convertion of FP32 Input data. @@ -1945,6 +2086,9 @@ def ONNXDynamicQuantizeLinearOp:ONNX_Op<"DynamicQuantizeLinear", TensorOf<[F32]>:$y_scale, TensorOf<[UI8]>:$y_zero_point); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "DynamicQuantizeLinear"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 1; } @@ -1966,7 +2110,7 @@ def ONNXDynamicQuantizeLinearOp:ONNX_Op<"DynamicQuantizeLinear", } def ONNXEinsumOp:ONNX_Op<"Einsum", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Einsum operation"; let description = [{ An einsum of the form ```term1, term2 -> output-term``` produces an output tensor using the following equation @@ -1997,6 +2141,9 @@ def ONNXEinsumOp:ONNX_Op<"Einsum", StrAttr:$equation); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Einsum"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 12; static int getNumberOfOperands() { return -1; } @@ -2019,7 +2166,7 @@ def ONNXEinsumOp:ONNX_Op<"Einsum", } def ONNXEluOp:ONNX_Op<"Elu", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Elu operation"; let description = [{ Elu takes one input data (Tensor) and produces one output data @@ -2031,6 +2178,9 @@ def ONNXEluOp:ONNX_Op<"Elu", DefaultValuedAttr:$alpha); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Elu"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 6; static int getNumberOfOperands() { return 1; } @@ -2052,7 +2202,7 @@ def ONNXEluOp:ONNX_Op<"Elu", } def ONNXEqualOp:ONNX_Op<"Equal", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Equal operation"; let description = [{ Returns the tensor resulted from performing the `equal` logical operation @@ -2084,31 +2234,34 @@ def ONNXEqualOp:ONNX_Op<"Equal", resultType = UnrankedTensorType::get(elTy); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {9}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXEqualOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Equal"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {9}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXEqualOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; } def ONNXErfOp:ONNX_Op<"Erf", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Erf operation"; let description = [{ Computes the error function of the given input tensor element-wise. @@ -2116,6 +2269,9 @@ def ONNXErfOp:ONNX_Op<"Erf", let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Erf"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -2137,7 +2293,7 @@ def ONNXErfOp:ONNX_Op<"Erf", } def ONNXExpOp:ONNX_Op<"Exp", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Exp operation"; let description = [{ Calculates the exponential of the given input tensor, element-wise. @@ -2153,30 +2309,33 @@ def ONNXExpOp:ONNX_Op<"Exp", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 1; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXExpOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Exp"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 1; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXExpOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXExpandOp:ONNX_Op<"Expand", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Expand operation"; let description = [{ Broadcast the input tensor following the given shape and the broadcast rule. @@ -2192,6 +2351,9 @@ def ONNXExpandOp:ONNX_Op<"Expand", TensorOf<[I64]>:$shape); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Expand"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 2; } @@ -2214,7 +2376,7 @@ def ONNXExpandOp:ONNX_Op<"Expand", } def ONNXEyeLikeOp:ONNX_Op<"EyeLike", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX EyeLike operation"; let description = [{ Generate a 2D tensor (matrix) with ones on the diagonal and zeros everywhere else. Only 2D @@ -2230,6 +2392,9 @@ def ONNXEyeLikeOp:ONNX_Op<"EyeLike", DefaultValuedAttr:$k); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I1]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "EyeLike"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 9; static int getNumberOfOperands() { return 1; } @@ -2251,7 +2416,7 @@ def ONNXEyeLikeOp:ONNX_Op<"EyeLike", } def ONNXFlattenOp:ONNX_Op<"Flatten", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Flatten operation"; let description = [{ Flattens the input tensor into a 2D matrix. If input tensor has shape @@ -2262,6 +2427,9 @@ def ONNXFlattenOp:ONNX_Op<"Flatten", DefaultValuedAttr:$axis); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Flatten"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -2284,7 +2452,7 @@ def ONNXFlattenOp:ONNX_Op<"Flatten", } def ONNXFloorOp:ONNX_Op<"Floor", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Floor operation"; let description = [{ Floor takes one input data (Tensor) and produces one output data @@ -2294,6 +2462,9 @@ def ONNXFloorOp:ONNX_Op<"Floor", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Floor"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -2315,8 +2486,7 @@ def ONNXFloorOp:ONNX_Op<"Floor", } def ONNXGRUOp:ONNX_Op<"GRU", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GRU operation"; let description = [{ Computes an one-layer GRU. This operator is usually supported via some custom @@ -2410,6 +2580,9 @@ def ONNXGRUOp:ONNX_Op<"GRU", let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y, AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_h); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "GRU"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; static int getNumberOfOperands() { return 6; } @@ -2428,10 +2601,11 @@ def ONNXGRUOp:ONNX_Op<"GRU", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXGatherOp:ONNX_Op<"Gather", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Gather operation"; let description = [{ Given `data` tensor of rank r >= 1, and `indices` tensor of rank q, gather @@ -2495,6 +2669,9 @@ def ONNXGatherOp:ONNX_Op<"Gather", DefaultValuedAttr:$axis); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Gather"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 2; } @@ -2517,7 +2694,7 @@ def ONNXGatherOp:ONNX_Op<"Gather", } def ONNXGatherElementsOp:ONNX_Op<"GatherElements", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GatherElements operation"; let description = [{ GatherElements takes two inputs `data` and `indices` of the same rank r >= 1 @@ -2577,6 +2754,9 @@ def ONNXGatherElementsOp:ONNX_Op<"GatherElements", DefaultValuedAttr:$axis); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "GatherElements"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 2; } @@ -2599,7 +2779,7 @@ def ONNXGatherElementsOp:ONNX_Op<"GatherElements", } def ONNXGatherNDOp:ONNX_Op<"GatherND", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GatherND operation"; let description = [{ Given `data` tensor of rank `r` >= 1, `indices` tensor of rank `q` >= 1, and `batch_dims` integer `b`, this operator gathers @@ -2699,6 +2879,9 @@ def ONNXGatherNDOp:ONNX_Op<"GatherND", DefaultValuedAttr:$batch_dims); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "GatherND"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 2; } @@ -2721,7 +2904,7 @@ def ONNXGatherNDOp:ONNX_Op<"GatherND", } def ONNXGemmOp:ONNX_Op<"Gemm", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Gemm operation"; let description = [{ General Matrix multiplication: @@ -2747,6 +2930,9 @@ def ONNXGemmOp:ONNX_Op<"Gemm", DefaultValuedAttr:$transB); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Gemm"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 3; } @@ -2768,8 +2954,7 @@ def ONNXGemmOp:ONNX_Op<"Gemm", } def ONNXGlobalAveragePoolOp:ONNX_Op<"GlobalAveragePool", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GlobalAveragePool operation"; let description = [{ GlobalAveragePool consumes an input tensor X and applies average pooling across @@ -2779,6 +2964,9 @@ def ONNXGlobalAveragePoolOp:ONNX_Op<"GlobalAveragePool", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "GlobalAveragePool"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -2797,10 +2985,11 @@ def ONNXGlobalAveragePoolOp:ONNX_Op<"GlobalAveragePool", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXGlobalLpPoolOp:ONNX_Op<"GlobalLpPool", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GlobalLpPool operation"; let description = [{ GlobalLpPool consumes an input tensor X and applies lp pool pooling across @@ -2811,6 +3000,9 @@ def ONNXGlobalLpPoolOp:ONNX_Op<"GlobalLpPool", DefaultValuedAttr:$p); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "GlobalLpPool"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 2; static int getNumberOfOperands() { return 1; } @@ -2832,8 +3024,7 @@ def ONNXGlobalLpPoolOp:ONNX_Op<"GlobalLpPool", } def ONNXGlobalMaxPoolOp:ONNX_Op<"GlobalMaxPool", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GlobalMaxPool operation"; let description = [{ GlobalMaxPool consumes an input tensor X and applies max pooling across @@ -2843,6 +3034,9 @@ def ONNXGlobalMaxPoolOp:ONNX_Op<"GlobalMaxPool", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "GlobalMaxPool"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -2861,10 +3055,11 @@ def ONNXGlobalMaxPoolOp:ONNX_Op<"GlobalMaxPool", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXGreaterOp:ONNX_Op<"Greater", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Greater operation"; let description = [{ Returns the tensor resulted from performing the `greater` logical operation @@ -2896,31 +3091,34 @@ def ONNXGreaterOp:ONNX_Op<"Greater", resultType = UnrankedTensorType::get(elTy); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {9}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXGreaterOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Greater"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {9}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXGreaterOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; } def ONNXGreaterOrEqualOp:ONNX_Op<"GreaterOrEqual", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GreaterOrEqual operation"; let description = [{ Returns the tensor resulted from performing the `greater_equal` logical operation @@ -2932,6 +3130,9 @@ def ONNXGreaterOrEqualOp:ONNX_Op<"GreaterOrEqual", AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$B); let results = (outs TensorOf<[I1]>:$C); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "GreaterOrEqual"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; static int getNumberOfOperands() { return 2; } @@ -2954,7 +3155,7 @@ def ONNXGreaterOrEqualOp:ONNX_Op<"GreaterOrEqual", } def ONNXGridSampleOp:ONNX_Op<"GridSample", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GridSample operation"; let description = [{ Given an input `X` and a flow-field `grid`, computes the output `Y` using `X` values and pixel locations from `grid`. @@ -2978,6 +3179,9 @@ def ONNXGridSampleOp:ONNX_Op<"GridSample", DefaultValuedStrAttr:$padding_mode); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "GridSample"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; static int getNumberOfOperands() { return 2; } @@ -2999,7 +3203,7 @@ def ONNXGridSampleOp:ONNX_Op<"GridSample", } def ONNXGroupNormalizationOp:ONNX_Op<"GroupNormalization", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX GroupNormalization operation"; let description = [{ A GroupNormalization function. Carries out group normalization as described in @@ -3025,6 +3229,9 @@ def ONNXGroupNormalizationOp:ONNX_Op<"GroupNormalization", SI64Attr:$num_groups); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "GroupNormalization"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 3; } @@ -3046,7 +3253,7 @@ def ONNXGroupNormalizationOp:ONNX_Op<"GroupNormalization", } def ONNXHammingWindowOp:ONNX_Op<"HammingWindow", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX HammingWindow operation"; let description = [{ Generates a Hamming window as described in the paper https://ieeexplore.ieee.org/document/1455106. @@ -3056,6 +3263,9 @@ def ONNXHammingWindowOp:ONNX_Op<"HammingWindow", DefaultValuedAttr:$periodic); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "HammingWindow"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 17; static int getNumberOfOperands() { return 1; } @@ -3077,7 +3287,7 @@ def ONNXHammingWindowOp:ONNX_Op<"HammingWindow", } def ONNXHannWindowOp:ONNX_Op<"HannWindow", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX HannWindow operation"; let description = [{ Generates a Hann window as described in the paper https://ieeexplore.ieee.org/document/1455106. @@ -3087,6 +3297,9 @@ def ONNXHannWindowOp:ONNX_Op<"HannWindow", DefaultValuedAttr:$periodic); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "HannWindow"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 17; static int getNumberOfOperands() { return 1; } @@ -3108,7 +3321,7 @@ def ONNXHannWindowOp:ONNX_Op<"HannWindow", } def ONNXHardSigmoidOp:ONNX_Op<"HardSigmoid", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX HardSigmoid operation"; let description = [{ HardSigmoid takes one input data (Tensor) and produces one output data @@ -3120,6 +3333,9 @@ def ONNXHardSigmoidOp:ONNX_Op<"HardSigmoid", DefaultValuedAttr:$beta); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "HardSigmoid"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 6; static int getNumberOfOperands() { return 1; } @@ -3141,7 +3357,7 @@ def ONNXHardSigmoidOp:ONNX_Op<"HardSigmoid", } def ONNXHardSwishOp:ONNX_Op<"HardSwish", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX HardSwish operation"; let description = [{ HardSwish takes one input data (Tensor) and produces one output data (Tensor) where @@ -3151,6 +3367,9 @@ def ONNXHardSwishOp:ONNX_Op<"HardSwish", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "HardSwish"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; static int getNumberOfOperands() { return 1; } @@ -3172,7 +3391,7 @@ def ONNXHardSwishOp:ONNX_Op<"HardSwish", } def ONNXHardmaxOp:ONNX_Op<"Hardmax", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Hardmax operation"; let description = [{ The operator computes the hardmax values for the given input: @@ -3187,6 +3406,9 @@ def ONNXHardmaxOp:ONNX_Op<"Hardmax", DefaultValuedAttr:$axis); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Hardmax"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -3209,8 +3431,7 @@ def ONNXHardmaxOp:ONNX_Op<"Hardmax", } def ONNXIdentityOp:ONNX_Op<"Identity", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Identity operation"; let description = [{ Identity operator @@ -3226,30 +3447,34 @@ def ONNXIdentityOp:ONNX_Op<"Identity", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 1; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXIdentityOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Identity"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; + static int getNumberOfOperands() { + return 1; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXIdentityOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasCanonicalizer = 1; } def ONNXIfOp:ONNX_Op<"If", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"HasOnnxSubgraphOpInterface">]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"HasOnnxSubgraphOpInterface">]> { let summary = "ONNX If operation"; let description = [{ If conditional @@ -3259,6 +3484,9 @@ def ONNXIfOp:ONNX_Op<"If", let regions = (region SizedRegion<1>:$then_branch, SizedRegion<1>:$else_branch); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "If"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; static int getNumberOfOperands() { return 1; } @@ -3268,11 +3496,11 @@ def ONNXIfOp:ONNX_Op<"If", static std::vector getTypeMap() { return {-1}; } - int64_t getSubgraphRegionIdx(const std::string& name) { - if (name == "then_branch") return 0; - if (name == "else_branch") return 1; - llvm_unreachable("region with the specified name does not exist"); - } + int64_t getSubgraphRegionIdx(const std::string& name) { + if (name == "then_branch") return 0; + if (name == "else_branch") return 1; + llvm_unreachable("region with the specified name does not exist"); + } }]; let extraClassDefinition = [{ onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, @@ -3286,7 +3514,7 @@ def ONNXIfOp:ONNX_Op<"If", } def ONNXInstanceNormalizationOp:ONNX_Op<"InstanceNormalization", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX InstanceNormalization operation"; let description = [{ Carries out instance normalization as described in the paper @@ -3302,6 +3530,9 @@ def ONNXInstanceNormalizationOp:ONNX_Op<"InstanceNormalization", DefaultValuedAttr:$epsilon); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "InstanceNormalization"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 6; static int getNumberOfOperands() { return 3; } @@ -3324,7 +3555,7 @@ def ONNXInstanceNormalizationOp:ONNX_Op<"InstanceNormalization", } def ONNXIsInfOp:ONNX_Op<"IsInf", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX IsInf operation"; let description = [{ Map infinity to true and other values to false. @@ -3334,6 +3565,9 @@ def ONNXIsInfOp:ONNX_Op<"IsInf", DefaultValuedAttr:$detect_positive); let results = (outs TensorOf<[I1]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "IsInf"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 10; static int getNumberOfOperands() { return 1; } @@ -3355,7 +3589,7 @@ def ONNXIsInfOp:ONNX_Op<"IsInf", } def ONNXIsNaNOp:ONNX_Op<"IsNaN", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX IsNaN operation"; let description = [{ Returns which elements of the input are NaN. @@ -3363,6 +3597,9 @@ def ONNXIsNaNOp:ONNX_Op<"IsNaN", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X); let results = (outs TensorOf<[I1]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "IsNaN"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -3384,7 +3621,7 @@ def ONNXIsNaNOp:ONNX_Op<"IsNaN", } def ONNXLRNOp:ONNX_Op<"LRN", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LRN operation"; let description = [{ Local Response Normalization proposed in the [AlexNet paper](https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf). @@ -3405,6 +3642,9 @@ def ONNXLRNOp:ONNX_Op<"LRN", SI64Attr:$size); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LRN"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -3426,8 +3666,7 @@ def ONNXLRNOp:ONNX_Op<"LRN", } def ONNXLSTMOp:ONNX_Op<"LSTM", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LSTM operation"; let description = [{ Computes an one-layer LSTM. This operator is usually supported via some @@ -3532,6 +3771,9 @@ def ONNXLSTMOp:ONNX_Op<"LSTM", AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_h, AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_c); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LSTM"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; static int getNumberOfOperands() { return 8; } @@ -3550,10 +3792,11 @@ def ONNXLSTMOp:ONNX_Op<"LSTM", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXLayerNormalizationOp:ONNX_Op<"LayerNormalization", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LayerNormalization operation"; let description = [{ This is layer normalization defined in ONNX as function. @@ -3606,6 +3849,9 @@ def ONNXLayerNormalizationOp:ONNX_Op<"LayerNormalization", AnyTypeOf<[TensorOf<[F32]>, TensorOf<[BF16]>, NoneType]>:$Mean, AnyTypeOf<[TensorOf<[F32]>, TensorOf<[BF16]>, NoneType]>:$InvStdDev); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LayerNormalization"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 17; static int getNumberOfOperands() { return 3; } @@ -3627,7 +3873,7 @@ def ONNXLayerNormalizationOp:ONNX_Op<"LayerNormalization", } def ONNXLeakyReluOp:ONNX_Op<"LeakyRelu", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LeakyRelu operation"; let description = [{ LeakyRelu takes input data (Tensor) and an argument alpha, and produces one @@ -3641,6 +3887,9 @@ def ONNXLeakyReluOp:ONNX_Op<"LeakyRelu", DefaultValuedAttr:$alpha); let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LeakyRelu"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; static int getNumberOfOperands() { return 1; } @@ -3662,8 +3911,7 @@ def ONNXLeakyReluOp:ONNX_Op<"LeakyRelu", } def ONNXLessOp:ONNX_Op<"Less", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Less operation"; let description = [{ Returns the tensor resulted from performing the `less` logical operation @@ -3695,35 +3943,39 @@ def ONNXLessOp:ONNX_Op<"Less", resultType = UnrankedTensorType::get(elTy); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {9}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXLessOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; -} - -def ONNXLessOrEqualOp:ONNX_Op<"LessOrEqual", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let summary = "ONNX LessOrEqual operation"; - let description = [{ - Returns the tensor resulted from performing the `less_equal` logical operation - elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Less"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {9}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXLessOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; + let hasCanonicalizer = 1; +} + +def ONNXLessOrEqualOp:ONNX_Op<"LessOrEqual", + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + let summary = "ONNX LessOrEqual operation"; + let description = [{ + Returns the tensor resulted from performing the `less_equal` logical operation + elementwise on the input tensors `A` and `B` (with Numpy-style broadcasting support). This operator supports **multidirectional (i.e., Numpy-style) broadcasting**; for more details please check [the doc](Broadcasting.md). }]; @@ -3731,6 +3983,9 @@ def ONNXLessOrEqualOp:ONNX_Op<"LessOrEqual", AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$B); let results = (outs TensorOf<[I1]>:$C); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LessOrEqual"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; static int getNumberOfOperands() { return 2; } @@ -3753,7 +4008,7 @@ def ONNXLessOrEqualOp:ONNX_Op<"LessOrEqual", } def ONNXLogOp:ONNX_Op<"Log", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Log operation"; let description = [{ Calculates the natural log of the given input tensor, element-wise. @@ -3761,6 +4016,9 @@ def ONNXLogOp:ONNX_Op<"Log", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Log"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -3782,7 +4040,7 @@ def ONNXLogOp:ONNX_Op<"Log", } def ONNXLogSoftmaxOp:ONNX_Op<"LogSoftmax", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LogSoftmax operation"; let description = [{ The operator computes the log of softmax values for the given input: @@ -3797,6 +4055,9 @@ def ONNXLogSoftmaxOp:ONNX_Op<"LogSoftmax", DefaultValuedAttr:$axis); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LogSoftmax"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -3819,8 +4080,7 @@ def ONNXLogSoftmaxOp:ONNX_Op<"LogSoftmax", } def ONNXLoopOp:ONNX_Op<"Loop", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"HasOnnxSubgraphOpInterface">]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"HasOnnxSubgraphOpInterface">]> { let summary = "ONNX Loop operation"; let description = [{ Generic Looping construct. This loop has multiple termination conditions: @@ -3965,6 +4225,9 @@ def ONNXLoopOp:ONNX_Op<"Loop", let results = (outs Variadic, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>, SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[BF16]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>]>>, OptOf]>]>>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf]>>, OptOf]>>]>>:$v_final_and_scan_outputs); let regions = (region SizedRegion<1>:$body); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Loop"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; static int getNumberOfOperands() { return -1; } @@ -3974,13 +4237,12 @@ def ONNXLoopOp:ONNX_Op<"Loop", static std::vector getTypeMap() { return {22}; } - mlir::Operation::result_range v_final(); mlir::Operation::result_range scan_outputs(); int64_t getSubgraphRegionIdx(const std::string& name) { - if (name == "body") return 0; - llvm_unreachable("region with the specified name does not exist"); - } + if (name == "body") return 0; + llvm_unreachable("region with the specified name does not exist"); + } }]; let extraClassDefinition = [{ onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, @@ -3990,10 +4252,11 @@ def ONNXLoopOp:ONNX_Op<"Loop", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXLpNormalizationOp:ONNX_Op<"LpNormalization", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LpNormalization operation"; let description = [{ Given a matrix, apply Lp-normalization along the provided axis. @@ -4003,6 +4266,9 @@ def ONNXLpNormalizationOp:ONNX_Op<"LpNormalization", DefaultValuedAttr:$p); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LpNormalization"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -4024,7 +4290,7 @@ def ONNXLpNormalizationOp:ONNX_Op<"LpNormalization", } def ONNXLpPoolOp:ONNX_Op<"LpPool", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LpPool operation"; let description = [{ LpPool consumes an input tensor X and applies Lp pooling across @@ -4065,6 +4331,9 @@ def ONNXLpPoolOp:ONNX_Op<"LpPool", OptionalAttr:$strides); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LpPool"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 1; } @@ -4086,7 +4355,7 @@ def ONNXLpPoolOp:ONNX_Op<"LpPool", } def ONNXMatMulOp:ONNX_Op<"MatMul", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX MatMul operation"; let description = [{ Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html @@ -4095,6 +4364,9 @@ def ONNXMatMulOp:ONNX_Op<"MatMul", AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>]>:$B); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "MatMul"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 2; } @@ -4116,7 +4388,7 @@ def ONNXMatMulOp:ONNX_Op<"MatMul", } def ONNXMatMulIntegerOp:ONNX_Op<"MatMulInteger", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX MatMulInteger operation"; let description = [{ Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. @@ -4128,6 +4400,9 @@ def ONNXMatMulIntegerOp:ONNX_Op<"MatMulInteger", AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>, NoneType]>:$b_zero_point); let results = (outs TensorOf<[I32]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "MatMulInteger"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 10; static int getNumberOfOperands() { return 4; } @@ -4150,7 +4425,7 @@ def ONNXMatMulIntegerOp:ONNX_Op<"MatMulInteger", } def ONNXMaxOp:ONNX_Op<"Max", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Max operation"; let description = [{ Element-wise max of each of the input tensors (with Numpy-style broadcasting support). @@ -4160,6 +4435,9 @@ def ONNXMaxOp:ONNX_Op<"Max", let arguments = (ins Variadic, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>>:$data_0); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$max); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Max"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return -1; } @@ -4182,7 +4460,7 @@ def ONNXMaxOp:ONNX_Op<"Max", } def ONNXMaxPoolOp:ONNX_Op<"MaxPool", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX MaxPool operation"; let description = [{ MaxPool consumes an input tensor X and applies max pooling across @@ -4226,6 +4504,9 @@ def ONNXMaxPoolOp:ONNX_Op<"MaxPool", let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I8]>, TensorOf<[UI8]>]>:$Y, AnyTypeOf<[TensorOf<[I64]>, NoneType]>:$Indices); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "MaxPool"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 12; static int getNumberOfOperands() { return 1; } @@ -4247,7 +4528,7 @@ def ONNXMaxPoolOp:ONNX_Op<"MaxPool", } def ONNXMaxRoiPoolOp:ONNX_Op<"MaxRoiPool", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX MaxRoiPool operation"; let description = [{ ROI max pool consumes an input tensor X and region of interests (RoIs) to @@ -4260,6 +4541,9 @@ def ONNXMaxRoiPoolOp:ONNX_Op<"MaxRoiPool", DefaultValuedAttr:$spatial_scale); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "MaxRoiPool"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 2; } @@ -4281,7 +4565,7 @@ def ONNXMaxRoiPoolOp:ONNX_Op<"MaxRoiPool", } def ONNXMaxUnpoolOp:ONNX_Op<"MaxUnpool", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX MaxUnpool operation"; let description = [{ MaxUnpool essentially computes the partial inverse of the MaxPool op. @@ -4311,6 +4595,9 @@ def ONNXMaxUnpoolOp:ONNX_Op<"MaxUnpool", OptionalAttr:$strides); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "MaxUnpool"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 3; } @@ -4332,7 +4619,7 @@ def ONNXMaxUnpoolOp:ONNX_Op<"MaxUnpool", } def ONNXMeanOp:ONNX_Op<"Mean", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Mean operation"; let description = [{ Element-wise mean of each of the input tensors (with Numpy-style broadcasting support). @@ -4342,6 +4629,9 @@ def ONNXMeanOp:ONNX_Op<"Mean", let arguments = (ins Variadic, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>>:$data_0); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$mean); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Mean"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return -1; } @@ -4364,7 +4654,7 @@ def ONNXMeanOp:ONNX_Op<"Mean", } def ONNXMeanVarianceNormalizationOp:ONNX_Op<"MeanVarianceNormalization", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX MeanVarianceNormalization operation"; let description = [{ A MeanVarianceNormalization Function: Perform mean variance normalization @@ -4374,6 +4664,9 @@ def ONNXMeanVarianceNormalizationOp:ONNX_Op<"MeanVarianceNormalization", DefaultValuedAttr:$axes); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "MeanVarianceNormalization"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -4395,7 +4688,7 @@ def ONNXMeanVarianceNormalizationOp:ONNX_Op<"MeanVarianceNormalization", } def ONNXMelWeightMatrixOp:ONNX_Op<"MelWeightMatrix", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX MelWeightMatrix operation"; let description = [{ Generate a MelWeightMatrix that can be used to re-weight a Tensor containing a linearly sampled frequency spectra (from DFT or STFT) into num_mel_bins frequency information based on the [lower_edge_hertz, upper_edge_hertz] range on the mel scale. @@ -4415,6 +4708,9 @@ def ONNXMelWeightMatrixOp:ONNX_Op<"MelWeightMatrix", DefaultValuedAttr:$output_datatype); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "MelWeightMatrix"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 17; static int getNumberOfOperands() { return 5; } @@ -4436,7 +4732,7 @@ def ONNXMelWeightMatrixOp:ONNX_Op<"MelWeightMatrix", } def ONNXMinOp:ONNX_Op<"Min", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Min operation"; let description = [{ Element-wise min of each of the input tensors (with Numpy-style broadcasting support). @@ -4446,6 +4742,9 @@ def ONNXMinOp:ONNX_Op<"Min", let arguments = (ins Variadic, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>>:$data_0); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$min); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Min"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return -1; } @@ -4468,7 +4767,7 @@ def ONNXMinOp:ONNX_Op<"Min", } def ONNXMishOp:ONNX_Op<"Mish", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Mish operation"; let description = [{ Mish: A Self Regularized Non-Monotonic Neural Activation Function. @@ -4482,6 +4781,9 @@ def ONNXMishOp:ONNX_Op<"Mish", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Mish"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 1; } @@ -4503,7 +4805,7 @@ def ONNXMishOp:ONNX_Op<"Mish", } def ONNXModOp:ONNX_Op<"Mod", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Mod operation"; let description = [{ Performs element-wise binary modulus (with Numpy-style broadcasting support). @@ -4525,6 +4827,9 @@ def ONNXModOp:ONNX_Op<"Mod", DefaultValuedAttr:$fmod); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$C); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Mod"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 2; } @@ -4547,8 +4852,7 @@ def ONNXModOp:ONNX_Op<"Mod", } def ONNXMulOp:ONNX_Op<"Mul", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Mul operation"; let description = [{ Performs element-wise binary multiplication (with Numpy-style broadcasting support). @@ -4579,31 +4883,35 @@ def ONNXMulOp:ONNX_Op<"Mul", resultType = UnrankedTensorType::get(lhsTy.cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXMulOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Mul"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXMulOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; + let hasCanonicalizer = 1; } def ONNXMultinomialOp:ONNX_Op<"Multinomial", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Multinomial operation"; let description = [{ Generate a tensor of samples from a multinomial distribution according to the probabilities @@ -4615,6 +4923,9 @@ def ONNXMultinomialOp:ONNX_Op<"Multinomial", OptionalAttr:$seed); let results = (outs AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Multinomial"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; static int getNumberOfOperands() { return 1; } @@ -4636,7 +4947,7 @@ def ONNXMultinomialOp:ONNX_Op<"Multinomial", } def ONNXNegOp:ONNX_Op<"Neg", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Neg operation"; let description = [{ Neg takes one input data (Tensor) and produces one output data @@ -4654,30 +4965,33 @@ def ONNXNegOp:ONNX_Op<"Neg", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 1; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXNegOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Neg"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 1; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXNegOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXNegativeLogLikelihoodLossOp:ONNX_Op<"NegativeLogLikelihoodLoss", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX NegativeLogLikelihoodLoss operation"; let description = [{ A NegativeLogLikelihoodLoss operator computes (weighted) negative log likelihood loss. @@ -4774,6 +5088,9 @@ def ONNXNegativeLogLikelihoodLossOp:ONNX_Op<"NegativeLogLikelihoodLoss", DefaultValuedStrAttr:$reduction); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$loss); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "NegativeLogLikelihoodLoss"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 3; } @@ -4795,7 +5112,7 @@ def ONNXNegativeLogLikelihoodLossOp:ONNX_Op<"NegativeLogLikelihoodLoss", } def ONNXNonMaxSuppressionOp:ONNX_Op<"NonMaxSuppression", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX NonMaxSuppression operation"; let description = [{ Filter out boxes that have high intersection-over-union (IOU) overlap with previously selected boxes. @@ -4814,6 +5131,9 @@ def ONNXNonMaxSuppressionOp:ONNX_Op<"NonMaxSuppression", DefaultValuedAttr:$center_point_box); let results = (outs TensorOf<[I64]>:$selected_indices); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "NonMaxSuppression"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 5; } @@ -4836,7 +5156,7 @@ def ONNXNonMaxSuppressionOp:ONNX_Op<"NonMaxSuppression", } def ONNXNonZeroOp:ONNX_Op<"NonZero", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX NonZero operation"; let description = [{ Returns the indices of the elements that are non-zero @@ -4848,6 +5168,9 @@ def ONNXNonZeroOp:ONNX_Op<"NonZero", let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$X); let results = (outs TensorOf<[I64]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "NonZero"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -4869,7 +5192,7 @@ def ONNXNonZeroOp:ONNX_Op<"NonZero", } def ONNXNotOp:ONNX_Op<"Not", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Not operation"; let description = [{ Returns the negation of the input tensor element-wise. @@ -4877,6 +5200,9 @@ def ONNXNotOp:ONNX_Op<"Not", let arguments = (ins TensorOf<[I1]>:$X); let results = (outs TensorOf<[I1]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Not"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -4898,7 +5224,7 @@ def ONNXNotOp:ONNX_Op<"Not", } def ONNXOneHotOp:ONNX_Op<"OneHot", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX OneHot operation"; let description = [{ Produces a one-hot tensor based on inputs. @@ -4927,6 +5253,9 @@ def ONNXOneHotOp:ONNX_Op<"OneHot", DefaultValuedAttr:$axis); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "OneHot"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 3; } @@ -4949,7 +5278,7 @@ def ONNXOneHotOp:ONNX_Op<"OneHot", } def ONNXOptionalOp:ONNX_Op<"Optional", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Optional operation"; let description = [{ Constructs an optional-type value containing either an empty optional of a certain type specified by the attribute, @@ -4959,6 +5288,9 @@ def ONNXOptionalOp:ONNX_Op<"Optional", OptionalAttr:$type); let results = (outs AnyTypeOf<[OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>]>>, OptOf]>]>>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf]>>, OptOf]>>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Optional"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 15; static int getNumberOfOperands() { return 1; } @@ -4981,7 +5313,7 @@ def ONNXOptionalOp:ONNX_Op<"Optional", } def ONNXOptionalGetElementOp:ONNX_Op<"OptionalGetElement", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX OptionalGetElement operation"; let description = [{ If the input is a tensor or sequence type, it returns the input. @@ -4991,6 +5323,9 @@ def ONNXOptionalGetElementOp:ONNX_Op<"OptionalGetElement", let arguments = (ins AnyTypeOf<[OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>]>>, OptOf]>]>>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf]>>, OptOf]>>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>, SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>, SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "OptionalGetElement"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 1; } @@ -5013,7 +5348,7 @@ def ONNXOptionalGetElementOp:ONNX_Op<"OptionalGetElement", } def ONNXOptionalHasElementOp:ONNX_Op<"OptionalHasElement", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX OptionalHasElement operation"; let description = [{ Returns true if (1) the input is an optional-type and contains an element, @@ -5023,6 +5358,9 @@ def ONNXOptionalHasElementOp:ONNX_Op<"OptionalHasElement", let arguments = (ins AnyTypeOf<[OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>>, OptOf]>]>>, OptOf]>]>>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf>, OptOf]>>, OptOf]>>, TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>, SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>, NoneType]>:$input); let results = (outs TensorOf<[I1]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "OptionalHasElement"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 1; } @@ -5045,7 +5383,7 @@ def ONNXOptionalHasElementOp:ONNX_Op<"OptionalHasElement", } def ONNXOrOp:ONNX_Op<"Or", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Or operation"; let description = [{ Returns the tensor resulted from performing the `or` logical operation @@ -5075,31 +5413,34 @@ def ONNXOrOp:ONNX_Op<"Or", resultType = UnrankedTensorType::get(lhsTy.cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {9}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXOrOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Or"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {9}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXOrOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; } def ONNXPReluOp:ONNX_Op<"PRelu", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX PRelu operation"; let description = [{ PRelu takes input data (Tensor) and slope tensor as input, and produces one @@ -5114,6 +5455,9 @@ def ONNXPReluOp:ONNX_Op<"PRelu", AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$slope); let results = (outs AnyTypeOf<[TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "PRelu"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; static int getNumberOfOperands() { return 2; } @@ -5136,7 +5480,7 @@ def ONNXPReluOp:ONNX_Op<"PRelu", } def ONNXPadOp:ONNX_Op<"Pad", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Pad operation"; let description = [{ Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`, @@ -5230,31 +5574,34 @@ def ONNXPadOp:ONNX_Op<"Pad", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 4; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXPadOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Pad"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; + static int getNumberOfOperands() { + return 4; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXPadOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; } def ONNXPadV13Op:ONNX_Op<"PadV13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Pad operation"; let description = [{ Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`, @@ -5339,6 +5686,9 @@ def ONNXPadV13Op:ONNX_Op<"PadV13", DefaultValuedStrAttr:$mode); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Pad"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 3; } @@ -5360,7 +5710,7 @@ def ONNXPadV13Op:ONNX_Op<"PadV13", } def ONNXPadV11Op:ONNX_Op<"PadV11", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Pad operation"; let description = [{ Given a tensor containing the data to be padded (`data`), a tensor containing the number of start and end pad values for axis (`pads`), (optionally) a `mode`, and (optionally) `constant_value`, @@ -5445,6 +5795,9 @@ def ONNXPadV11Op:ONNX_Op<"PadV11", DefaultValuedStrAttr:$mode); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Pad"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 3; } @@ -5466,7 +5819,7 @@ def ONNXPadV11Op:ONNX_Op<"PadV11", } def ONNXPadV2Op:ONNX_Op<"PadV2", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Pad operation"; let description = [{ Given `data` tensor, pads, mode, and value. @@ -5492,6 +5845,9 @@ def ONNXPadV2Op:ONNX_Op<"PadV2", DefaultValuedAttr:$value); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Pad"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 2; static int getNumberOfOperands() { return 1; } @@ -5513,8 +5869,7 @@ def ONNXPadV2Op:ONNX_Op<"PadV2", } def ONNXPowOp:ONNX_Op<"Pow", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Pow operation"; let description = [{ Pow takes input data (Tensor) and exponent Tensor, and @@ -5544,31 +5899,35 @@ def ONNXPowOp:ONNX_Op<"Pow", resultType = UnrankedTensorType::get(lhsTy.cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXPowOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Pow"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 15; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXPowOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; + let hasCanonicalizer = 1; } def ONNXQLinearConvOp:ONNX_Op<"QLinearConv", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX QLinearConv operation"; let description = [{ The convolution operator consumes a quantized input tensor, its scale and zero point, @@ -5596,6 +5955,9 @@ def ONNXQLinearConvOp:ONNX_Op<"QLinearConv", OptionalAttr:$strides); let results = (outs AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>]>:$y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "QLinearConv"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 10; static int getNumberOfOperands() { return 9; } @@ -5617,7 +5979,7 @@ def ONNXQLinearConvOp:ONNX_Op<"QLinearConv", } def ONNXQLinearMatMulOp:ONNX_Op<"QLinearMatMul", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX QLinearMatMul operation"; let description = [{ Matrix product that behaves like numpy.matmul: https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html. @@ -5642,6 +6004,9 @@ def ONNXQLinearMatMulOp:ONNX_Op<"QLinearMatMul", AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>]>:$y_zero_point); let results = (outs AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>]>:$y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "QLinearMatMul"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 10; static int getNumberOfOperands() { return 8; } @@ -5663,7 +6028,7 @@ def ONNXQLinearMatMulOp:ONNX_Op<"QLinearMatMul", } def ONNXQuantizeLinearOp:ONNX_Op<"QuantizeLinear", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX QuantizeLinear operation"; let description = [{ The linear quantization operator. It consumes a high precision tensor, a scale, and a zero point to compute the low precision / quantized tensor. @@ -5678,6 +6043,9 @@ def ONNXQuantizeLinearOp:ONNX_Op<"QuantizeLinear", DefaultValuedAttr:$axis); let results = (outs AnyTypeOf<[TensorOf<[I8]>, TensorOf<[UI8]>]>:$y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "QuantizeLinear"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 3; } @@ -5699,8 +6067,7 @@ def ONNXQuantizeLinearOp:ONNX_Op<"QuantizeLinear", } def ONNXRNNOp:ONNX_Op<"RNN", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX RNN operation"; let description = [{ Computes an one-layer simple RNN. This operator is usually supported @@ -5781,6 +6148,9 @@ def ONNXRNNOp:ONNX_Op<"RNN", let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y, AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, NoneType]>:$Y_h); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "RNN"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; static int getNumberOfOperands() { return 6; } @@ -5799,10 +6169,11 @@ def ONNXRNNOp:ONNX_Op<"RNN", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXRandomNormalOp:ONNX_Op<"RandomNormal", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"ResultTypeInferenceOpInterface">]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"ResultTypeInferenceOpInterface">]> { let summary = "ONNX RandomNormal operation"; let description = [{ Generate a tensor with random values drawn from a normal distribution. The shape @@ -5820,6 +6191,9 @@ def ONNXRandomNormalOp:ONNX_Op<"RandomNormal", I64ArrayAttr:$shape); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "RandomNormal"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 0; } @@ -5862,7 +6236,7 @@ def ONNXRandomNormalOp:ONNX_Op<"RandomNormal", } def ONNXRandomNormalLikeOp:ONNX_Op<"RandomNormalLike", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX RandomNormalLike operation"; let description = [{ Generate a tensor with random values drawn from a normal distribution. @@ -5880,6 +6254,9 @@ def ONNXRandomNormalLikeOp:ONNX_Op<"RandomNormalLike", OptionalAttr:$seed); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "RandomNormalLike"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -5902,7 +6279,7 @@ def ONNXRandomNormalLikeOp:ONNX_Op<"RandomNormalLike", } def ONNXRandomUniformOp:ONNX_Op<"RandomUniform", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX RandomUniform operation"; let description = [{ Generate a tensor with random values drawn from a uniform distribution. The shape @@ -5919,6 +6296,9 @@ def ONNXRandomUniformOp:ONNX_Op<"RandomUniform", I64ArrayAttr:$shape); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "RandomUniform"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 0; } @@ -5940,7 +6320,7 @@ def ONNXRandomUniformOp:ONNX_Op<"RandomUniform", } def ONNXRandomUniformLikeOp:ONNX_Op<"RandomUniformLike", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX RandomUniformLike operation"; let description = [{ Generate a tensor with random values drawn from a uniform distribution. @@ -5958,6 +6338,9 @@ def ONNXRandomUniformLikeOp:ONNX_Op<"RandomUniformLike", OptionalAttr:$seed); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "RandomUniformLike"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -5979,7 +6362,7 @@ def ONNXRandomUniformLikeOp:ONNX_Op<"RandomUniformLike", } def ONNXRangeOp:ONNX_Op<"Range", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Range operation"; let description = [{ Generate a tensor containing a sequence of numbers that begin at `start` and extends by increments of `delta` @@ -6013,6 +6396,9 @@ def ONNXRangeOp:ONNX_Op<"Range", AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$delta); let results = (outs AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Range"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 3; } @@ -6035,7 +6421,7 @@ def ONNXRangeOp:ONNX_Op<"Range", } def ONNXReciprocalOp:ONNX_Op<"Reciprocal", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Reciprocal operation"; let description = [{ Reciprocal takes one input data (Tensor) and produces one output data @@ -6045,6 +6431,9 @@ def ONNXReciprocalOp:ONNX_Op<"Reciprocal", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Reciprocal"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -6066,7 +6455,7 @@ def ONNXReciprocalOp:ONNX_Op<"Reciprocal", } def ONNXReduceL1Op:ONNX_Op<"ReduceL1", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceL1 operation"; let description = [{ Computes the L1 norm of the input tensor's element along the provided axes. The resulting @@ -6082,6 +6471,9 @@ def ONNXReduceL1Op:ONNX_Op<"ReduceL1", DefaultValuedAttr:$noop_with_empty_axes); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceL1"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 2; } @@ -6103,7 +6495,7 @@ def ONNXReduceL1Op:ONNX_Op<"ReduceL1", } def ONNXReduceL1V13Op:ONNX_Op<"ReduceL1V13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceL1 operation"; let description = [{ Computes the L1 norm of the input tensor's element along the provided axes. The resulting @@ -6118,6 +6510,9 @@ def ONNXReduceL1V13Op:ONNX_Op<"ReduceL1V13", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceL1"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -6139,7 +6534,7 @@ def ONNXReduceL1V13Op:ONNX_Op<"ReduceL1V13", } def ONNXReduceL2Op:ONNX_Op<"ReduceL2", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceL2 operation"; let description = [{ Computes the L2 norm of the input tensor's element along the provided axes. The resulting @@ -6155,6 +6550,9 @@ def ONNXReduceL2Op:ONNX_Op<"ReduceL2", DefaultValuedAttr:$noop_with_empty_axes); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceL2"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 2; } @@ -6176,7 +6574,7 @@ def ONNXReduceL2Op:ONNX_Op<"ReduceL2", } def ONNXReduceL2V13Op:ONNX_Op<"ReduceL2V13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceL2 operation"; let description = [{ Computes the L2 norm of the input tensor's element along the provided axes. The resulting @@ -6191,6 +6589,9 @@ def ONNXReduceL2V13Op:ONNX_Op<"ReduceL2V13", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceL2"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -6212,7 +6613,7 @@ def ONNXReduceL2V13Op:ONNX_Op<"ReduceL2V13", } def ONNXReduceLogSumOp:ONNX_Op<"ReduceLogSum", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceLogSum operation"; let description = [{ Computes the log sum of the input tensor's element along the provided axes. The resulting @@ -6236,30 +6637,33 @@ def ONNXReduceLogSumOp:ONNX_Op<"ReduceLogSum", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceLogSumOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceLogSum"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceLogSumOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXReduceLogSumV13Op:ONNX_Op<"ReduceLogSumV13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceLogSum operation"; let description = [{ Computes the log sum of the input tensor's element along the provided axes. The resulting @@ -6274,6 +6678,9 @@ def ONNXReduceLogSumV13Op:ONNX_Op<"ReduceLogSumV13", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceLogSum"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -6295,7 +6702,7 @@ def ONNXReduceLogSumV13Op:ONNX_Op<"ReduceLogSumV13", } def ONNXReduceLogSumExpOp:ONNX_Op<"ReduceLogSumExp", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceLogSumExp operation"; let description = [{ Computes the log sum exponent of the input tensor's element along the provided axes. The resulting @@ -6311,6 +6718,9 @@ def ONNXReduceLogSumExpOp:ONNX_Op<"ReduceLogSumExp", DefaultValuedAttr:$noop_with_empty_axes); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceLogSumExp"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 2; } @@ -6332,7 +6742,7 @@ def ONNXReduceLogSumExpOp:ONNX_Op<"ReduceLogSumExp", } def ONNXReduceLogSumExpV13Op:ONNX_Op<"ReduceLogSumExpV13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceLogSumExp operation"; let description = [{ Computes the log sum exponent of the input tensor's element along the provided axes. The resulting @@ -6347,6 +6757,9 @@ def ONNXReduceLogSumExpV13Op:ONNX_Op<"ReduceLogSumExpV13", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceLogSumExp"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -6368,7 +6781,7 @@ def ONNXReduceLogSumExpV13Op:ONNX_Op<"ReduceLogSumExpV13", } def ONNXReduceMaxOp:ONNX_Op<"ReduceMax", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceMax operation"; let description = [{ Computes the max of the input tensor's element along the provided axes. The resulting @@ -6392,30 +6805,33 @@ def ONNXReduceMaxOp:ONNX_Op<"ReduceMax", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceMaxOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceMax"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceMaxOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXReduceMaxV13Op:ONNX_Op<"ReduceMaxV13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceMax operation"; let description = [{ Computes the max of the input tensor's element along the provided axes. The resulting @@ -6438,30 +6854,33 @@ def ONNXReduceMaxV13Op:ONNX_Op<"ReduceMaxV13", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 1; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceMaxV13OpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceMax"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 1; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceMaxV13OpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXReduceMeanOp:ONNX_Op<"ReduceMean", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceMean operation"; let description = [{ Computes the mean of the input tensor's element along the provided axes. The resulting @@ -6477,6 +6896,9 @@ def ONNXReduceMeanOp:ONNX_Op<"ReduceMean", DefaultValuedAttr:$noop_with_empty_axes); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceMean"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 2; } @@ -6498,7 +6920,7 @@ def ONNXReduceMeanOp:ONNX_Op<"ReduceMean", } def ONNXReduceMeanV13Op:ONNX_Op<"ReduceMeanV13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceMean operation"; let description = [{ Computes the mean of the input tensor's element along the provided axes. The resulting @@ -6513,6 +6935,9 @@ def ONNXReduceMeanV13Op:ONNX_Op<"ReduceMeanV13", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceMean"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -6534,7 +6959,7 @@ def ONNXReduceMeanV13Op:ONNX_Op<"ReduceMeanV13", } def ONNXReduceMinOp:ONNX_Op<"ReduceMin", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceMin operation"; let description = [{ Computes the min of the input tensor's element along the provided axes. The resulting @@ -6550,6 +6975,9 @@ def ONNXReduceMinOp:ONNX_Op<"ReduceMin", DefaultValuedAttr:$noop_with_empty_axes); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>, TensorOf<[UI8]>, TensorOf<[I8]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceMin"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 2; } @@ -6571,7 +6999,7 @@ def ONNXReduceMinOp:ONNX_Op<"ReduceMin", } def ONNXReduceMinV13Op:ONNX_Op<"ReduceMinV13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceMin operation"; let description = [{ Computes the min of the input tensor's element along the provided axes. The resulting @@ -6586,6 +7014,9 @@ def ONNXReduceMinV13Op:ONNX_Op<"ReduceMinV13", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>, TensorOf<[UI8]>, TensorOf<[I8]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceMin"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -6607,7 +7038,7 @@ def ONNXReduceMinV13Op:ONNX_Op<"ReduceMinV13", } def ONNXReduceProdOp:ONNX_Op<"ReduceProd", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceProd operation"; let description = [{ Computes the product of the input tensor's element along the provided axes. The resulting @@ -6623,6 +7054,9 @@ def ONNXReduceProdOp:ONNX_Op<"ReduceProd", DefaultValuedAttr:$noop_with_empty_axes); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceProd"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 2; } @@ -6644,7 +7078,7 @@ def ONNXReduceProdOp:ONNX_Op<"ReduceProd", } def ONNXReduceProdV13Op:ONNX_Op<"ReduceProdV13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceProd operation"; let description = [{ Computes the product of the input tensor's element along the provided axes. The resulting @@ -6659,6 +7093,9 @@ def ONNXReduceProdV13Op:ONNX_Op<"ReduceProdV13", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceProd"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -6680,7 +7117,7 @@ def ONNXReduceProdV13Op:ONNX_Op<"ReduceProdV13", } def ONNXReduceSumOp:ONNX_Op<"ReduceSum", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceSum operation"; let description = [{ Computes the sum of the input tensor's element along the provided axes. The resulting @@ -6704,30 +7141,33 @@ def ONNXReduceSumOp:ONNX_Op<"ReduceSum", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceSumOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceSum"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceSumOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXReduceSumV11Op:ONNX_Op<"ReduceSumV11", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceSum operation"; let description = [{ Computes the sum of the input tensor's element along the provided axes. The resulting @@ -6750,30 +7190,33 @@ def ONNXReduceSumV11Op:ONNX_Op<"ReduceSumV11", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 1; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceSumV11OpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; -} - -def ONNXReduceSumSquareOp:ONNX_Op<"ReduceSumSquare", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceSum"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; + static int getNumberOfOperands() { + return 1; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceSumV11OpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; +} + +def ONNXReduceSumSquareOp:ONNX_Op<"ReduceSumSquare", + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceSumSquare operation"; let description = [{ Computes the sum square of the input tensor's element along the provided axes. The resulting @@ -6797,30 +7240,33 @@ def ONNXReduceSumSquareOp:ONNX_Op<"ReduceSumSquare", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceSumSquareOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceSumSquare"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXReduceSumSquareOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXReduceSumSquareV13Op:ONNX_Op<"ReduceSumSquareV13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReduceSumSquare operation"; let description = [{ Computes the sum square of the input tensor's element along the provided axes. The resulting @@ -6835,6 +7281,9 @@ def ONNXReduceSumSquareV13Op:ONNX_Op<"ReduceSumSquareV13", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$reduced); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReduceSumSquare"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -6856,7 +7305,7 @@ def ONNXReduceSumSquareV13Op:ONNX_Op<"ReduceSumSquareV13", } def ONNXReluOp:ONNX_Op<"Relu", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Relu operation"; let description = [{ Relu takes one input data (Tensor) and produces one output data @@ -6866,6 +7315,9 @@ def ONNXReluOp:ONNX_Op<"Relu", let arguments = (ins AnyTypeOf<[TensorOf<[F32]>, TensorOf<[I32]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F32]>, TensorOf<[I32]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Relu"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; static int getNumberOfOperands() { return 1; } @@ -6887,8 +7339,7 @@ def ONNXReluOp:ONNX_Op<"Relu", } def ONNXReshapeOp:ONNX_Op<"Reshape", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Reshape operation"; let description = [{ Reshape the input tensor similar to numpy.reshape. @@ -6910,6 +7361,9 @@ def ONNXReshapeOp:ONNX_Op<"Reshape", DefaultValuedAttr:$allowzero); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$reshaped); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Reshape"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; static int getNumberOfOperands() { return 2; } @@ -6928,10 +7382,11 @@ def ONNXReshapeOp:ONNX_Op<"Reshape", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXResizeOp:ONNX_Op<"Resize", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Resize operation"; let description = [{ Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. @@ -6954,6 +7409,9 @@ def ONNXResizeOp:ONNX_Op<"Resize", DefaultValuedStrAttr:$nearest_mode); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Resize"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 4; } @@ -6976,7 +7434,7 @@ def ONNXResizeOp:ONNX_Op<"Resize", } def ONNXResizeV13Op:ONNX_Op<"ResizeV13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Resize operation"; let description = [{ Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. @@ -6995,6 +7453,9 @@ def ONNXResizeV13Op:ONNX_Op<"ResizeV13", DefaultValuedStrAttr:$nearest_mode); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Resize"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 4; } @@ -7016,7 +7477,7 @@ def ONNXResizeV13Op:ONNX_Op<"ResizeV13", } def ONNXResizeV11Op:ONNX_Op<"ResizeV11", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Resize operation"; let description = [{ Resize the input tensor. In general, it calculates every value in the output tensor as a weighted average of neighborhood (a.k.a. sampling locations) in the input tensor. @@ -7035,6 +7496,9 @@ def ONNXResizeV11Op:ONNX_Op<"ResizeV11", DefaultValuedStrAttr:$nearest_mode); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Resize"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 4; } @@ -7056,7 +7520,7 @@ def ONNXResizeV11Op:ONNX_Op<"ResizeV11", } def ONNXResizeV10Op:ONNX_Op<"ResizeV10", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Resize operation"; let description = [{ Resize the input tensor. @@ -7068,6 +7532,9 @@ def ONNXResizeV10Op:ONNX_Op<"ResizeV10", DefaultValuedStrAttr:$mode); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Resize"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 10; static int getNumberOfOperands() { return 2; } @@ -7089,7 +7556,7 @@ def ONNXResizeV10Op:ONNX_Op<"ResizeV10", } def ONNXReverseSequenceOp:ONNX_Op<"ReverseSequence", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ReverseSequence operation"; let description = [{ Reverse batch of sequences having different lengths specified by `sequence_lens`. @@ -7132,6 +7599,9 @@ def ONNXReverseSequenceOp:ONNX_Op<"ReverseSequence", DefaultValuedAttr:$time_axis); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ReverseSequence"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 10; static int getNumberOfOperands() { return 2; } @@ -7154,7 +7624,7 @@ def ONNXReverseSequenceOp:ONNX_Op<"ReverseSequence", } def ONNXRoiAlignOp:ONNX_Op<"RoiAlign", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX RoiAlign operation"; let description = [{ Region of Interest (RoI) align operation described in the @@ -7180,6 +7650,9 @@ def ONNXRoiAlignOp:ONNX_Op<"RoiAlign", DefaultValuedAttr:$spatial_scale); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "RoiAlign"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; static int getNumberOfOperands() { return 3; } @@ -7202,7 +7675,7 @@ def ONNXRoiAlignOp:ONNX_Op<"RoiAlign", } def ONNXRoundOp:ONNX_Op<"Round", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Round operation"; let description = [{ Round takes one input Tensor and rounds the values, element-wise, meaning @@ -7222,6 +7695,9 @@ def ONNXRoundOp:ONNX_Op<"Round", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Round"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 1; } @@ -7243,7 +7719,7 @@ def ONNXRoundOp:ONNX_Op<"Round", } def ONNXSTFTOp:ONNX_Op<"STFT", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX STFT operation"; let description = [{ Computes the Short-time Fourier Transform of the signal. @@ -7255,6 +7731,9 @@ def ONNXSTFTOp:ONNX_Op<"STFT", DefaultValuedAttr:$onesided); let results = (outs AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F16]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "STFT"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 17; static int getNumberOfOperands() { return 4; } @@ -7276,7 +7755,7 @@ def ONNXSTFTOp:ONNX_Op<"STFT", } def ONNXScanOp:ONNX_Op<"Scan", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"HasOnnxSubgraphOpInterface">]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"HasOnnxSubgraphOpInterface">]> { let summary = "ONNX Scan operation"; let description = [{ Scan can be used to iterate over one or more scan_input tensors, @@ -7410,6 +7889,9 @@ def ONNXScanOp:ONNX_Op<"Scan", let results = (outs Variadic, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>>:$final_state_and_scan_outputs); let regions = (region SizedRegion<1>:$body); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Scan"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; static int getNumberOfOperands() { return -1; } @@ -7419,15 +7901,14 @@ def ONNXScanOp:ONNX_Op<"Scan", static std::vector getTypeMap() { return {20}; } - mlir::Operation::operand_range getVInitial(); mlir::Operation::result_range v_final(); mlir::Operation::operand_range scan_inputs(); mlir::Operation::result_range scan_outputs(); int64_t getSubgraphRegionIdx(const std::string& name) { - if (name == "body") return 0; - llvm_unreachable("region with the specified name does not exist"); - } + if (name == "body") return 0; + llvm_unreachable("region with the specified name does not exist"); + } }]; let extraClassDefinition = [{ onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, @@ -7440,7 +7921,7 @@ def ONNXScanOp:ONNX_Op<"Scan", } def ONNXScatterOp:ONNX_Op<"Scatter", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Scatter operation"; let description = [{ This operator is deprecated. Please use ScatterElements, which provides the same functionality. @@ -7503,6 +7984,9 @@ def ONNXScatterOp:ONNX_Op<"Scatter", DefaultValuedAttr:$axis); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Scatter"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 3; } @@ -7524,7 +8008,7 @@ def ONNXScatterOp:ONNX_Op<"Scatter", } def ONNXScatterElementsOp:ONNX_Op<"ScatterElements", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ScatterElements operation"; let description = [{ ScatterElements takes three inputs `data`, `updates`, and `indices` of the same @@ -7598,6 +8082,9 @@ def ONNXScatterElementsOp:ONNX_Op<"ScatterElements", DefaultValuedStrAttr:$reduction); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ScatterElements"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 3; } @@ -7620,7 +8107,7 @@ def ONNXScatterElementsOp:ONNX_Op<"ScatterElements", } def ONNXScatterNDOp:ONNX_Op<"ScatterND", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ScatterND operation"; let description = [{ ScatterND takes three inputs `data` tensor of rank r >= 1, `indices` tensor of rank q >= 1, @@ -7701,6 +8188,9 @@ def ONNXScatterNDOp:ONNX_Op<"ScatterND", DefaultValuedStrAttr:$reduction); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ScatterND"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; static int getNumberOfOperands() { return 3; } @@ -7723,7 +8213,7 @@ def ONNXScatterNDOp:ONNX_Op<"ScatterND", } def ONNXSeluOp:ONNX_Op<"Selu", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Selu operation"; let description = [{ Selu takes one input data (Tensor) and produces one output data @@ -7736,6 +8226,9 @@ def ONNXSeluOp:ONNX_Op<"Selu", DefaultValuedAttr:$gamma); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Selu"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 6; static int getNumberOfOperands() { return 1; } @@ -7757,7 +8250,7 @@ def ONNXSeluOp:ONNX_Op<"Selu", } def ONNXSequenceAtOp:ONNX_Op<"SequenceAt", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SequenceAt operation"; let description = [{ Outputs a tensor copy from the tensor at 'position' in 'input_sequence'. @@ -7768,6 +8261,9 @@ def ONNXSequenceAtOp:ONNX_Op<"SequenceAt", AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>]>:$position); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$tensor); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SequenceAt"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 2; } @@ -7789,7 +8285,7 @@ def ONNXSequenceAtOp:ONNX_Op<"SequenceAt", } def ONNXSequenceConstructOp:ONNX_Op<"SequenceConstruct", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SequenceConstruct operation"; let description = [{ Construct a tensor sequence containing 'inputs' tensors. @@ -7798,6 +8294,9 @@ def ONNXSequenceConstructOp:ONNX_Op<"SequenceConstruct", let arguments = (ins Variadic, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>>:$inputs); let results = (outs AnyTypeOf<[SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>]>:$output_sequence); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SequenceConstruct"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return -1; } @@ -7819,7 +8318,7 @@ def ONNXSequenceConstructOp:ONNX_Op<"SequenceConstruct", } def ONNXSequenceEmptyOp:ONNX_Op<"SequenceEmpty", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SequenceEmpty operation"; let description = [{ Construct an empty tensor sequence, with given data type. @@ -7827,6 +8326,9 @@ def ONNXSequenceEmptyOp:ONNX_Op<"SequenceEmpty", let arguments = (ins OptionalAttr:$dtype); let results = (outs AnyTypeOf<[SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SequenceEmpty"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 0; } @@ -7849,7 +8351,7 @@ def ONNXSequenceEmptyOp:ONNX_Op<"SequenceEmpty", } def ONNXSequenceEraseOp:ONNX_Op<"SequenceErase", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SequenceErase operation"; let description = [{ Outputs a tensor sequence that removes the tensor at 'position' from 'input_sequence'. @@ -7861,6 +8363,9 @@ def ONNXSequenceEraseOp:ONNX_Op<"SequenceErase", AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>, NoneType]>:$position); let results = (outs AnyTypeOf<[SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>]>:$output_sequence); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SequenceErase"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 2; } @@ -7882,7 +8387,7 @@ def ONNXSequenceEraseOp:ONNX_Op<"SequenceErase", } def ONNXSequenceInsertOp:ONNX_Op<"SequenceInsert", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SequenceInsert operation"; let description = [{ Outputs a tensor sequence that inserts 'tensor' into 'input_sequence' at 'position'. @@ -7896,6 +8401,9 @@ def ONNXSequenceInsertOp:ONNX_Op<"SequenceInsert", AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>, NoneType]>:$position); let results = (outs AnyTypeOf<[SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>]>:$output_sequence); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SequenceInsert"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 3; } @@ -7918,7 +8426,7 @@ def ONNXSequenceInsertOp:ONNX_Op<"SequenceInsert", } def ONNXSequenceLengthOp:ONNX_Op<"SequenceLength", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SequenceLength operation"; let description = [{ Produces a scalar(tensor of empty shape) containing the number of tensors in 'input_sequence'. @@ -7926,6 +8434,9 @@ def ONNXSequenceLengthOp:ONNX_Op<"SequenceLength", let arguments = (ins AnyTypeOf<[SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>]>:$input_sequence); let results = (outs TensorOf<[I64]>:$length); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SequenceLength"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 1; } @@ -7947,7 +8458,7 @@ def ONNXSequenceLengthOp:ONNX_Op<"SequenceLength", } def ONNXSequenceMapOp:ONNX_Op<"SequenceMap", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"HasOnnxSubgraphOpInterface">]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods, OpInterface<"HasOnnxSubgraphOpInterface">]> { let summary = "ONNX SequenceMap operation"; let description = [{ Applies a sub-graph to each sample in the input sequence(s). @@ -7970,6 +8481,9 @@ def ONNXSequenceMapOp:ONNX_Op<"SequenceMap", let results = (outs Variadic]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>]>>:$out_sequence); let regions = (region SizedRegion<1>:$body); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SequenceMap"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 17; static int getNumberOfOperands() { return -1; } @@ -7979,10 +8493,10 @@ def ONNXSequenceMapOp:ONNX_Op<"SequenceMap", static std::vector getTypeMap() { return {20}; } - int64_t getSubgraphRegionIdx(const std::string& name) { - if (name == "body") return 0; - llvm_unreachable("region with the specified name does not exist"); - } + int64_t getSubgraphRegionIdx(const std::string& name) { + if (name == "body") return 0; + llvm_unreachable("region with the specified name does not exist"); + } }]; let extraClassDefinition = [{ onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, @@ -7995,8 +8509,7 @@ def ONNXSequenceMapOp:ONNX_Op<"SequenceMap", } def ONNXShapeOp:ONNX_Op<"Shape", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Shape operation"; let description = [{ Takes a tensor as input and outputs an 1D int64 tensor containing the shape of the input tensor. @@ -8034,6 +8547,9 @@ def ONNXShapeOp:ONNX_Op<"Shape", DefaultValuedAttr:$start); let results = (outs TensorOf<[I64]>:$shape); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Shape"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 15; static int getNumberOfOperands() { return 1; } @@ -8053,10 +8569,11 @@ def ONNXShapeOp:ONNX_Op<"Shape", } }]; let hasVerifier = 1; + let hasCanonicalizer = 1; } def ONNXShrinkOp:ONNX_Op<"Shrink", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Shrink operation"; let description = [{ Shrink takes one input data (Tensor) and produces one Tensor output, @@ -8069,6 +8586,9 @@ def ONNXShrinkOp:ONNX_Op<"Shrink", DefaultValuedAttr:$lambd); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Shrink"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 9; static int getNumberOfOperands() { return 1; } @@ -8090,7 +8610,7 @@ def ONNXShrinkOp:ONNX_Op<"Shrink", } def ONNXSigmoidOp:ONNX_Op<"Sigmoid", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Sigmoid operation"; let description = [{ Sigmoid takes one input data (Tensor) and produces one output data @@ -8100,6 +8620,9 @@ def ONNXSigmoidOp:ONNX_Op<"Sigmoid", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Sigmoid"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -8121,7 +8644,7 @@ def ONNXSigmoidOp:ONNX_Op<"Sigmoid", } def ONNXSignOp:ONNX_Op<"Sign", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Sign operation"; let description = [{ Calculate the sign of the given input tensor element-wise. @@ -8130,6 +8653,9 @@ def ONNXSignOp:ONNX_Op<"Sign", let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Sign"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -8151,7 +8677,7 @@ def ONNXSignOp:ONNX_Op<"Sign", } def ONNXSinOp:ONNX_Op<"Sin", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Sin operation"; let description = [{ Calculates the sine of the given input tensor, element-wise. @@ -8159,6 +8685,9 @@ def ONNXSinOp:ONNX_Op<"Sin", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Sin"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; static int getNumberOfOperands() { return 1; } @@ -8180,7 +8709,7 @@ def ONNXSinOp:ONNX_Op<"Sin", } def ONNXSinhOp:ONNX_Op<"Sinh", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Sinh operation"; let description = [{ Calculates the hyperbolic sine of the given input tensor element-wise. @@ -8188,6 +8717,9 @@ def ONNXSinhOp:ONNX_Op<"Sinh", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Sinh"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 9; static int getNumberOfOperands() { return 1; } @@ -8209,8 +8741,7 @@ def ONNXSinhOp:ONNX_Op<"Sinh", } def ONNXSizeOp:ONNX_Op<"Size", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Size operation"; let description = [{ Takes a tensor as input and outputs a int64 scalar that equals to the total number of elements of the input tensor. @@ -8218,6 +8749,9 @@ def ONNXSizeOp:ONNX_Op<"Size", let arguments = (ins AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$data); let results = (outs TensorOf<[I64]>:$size); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Size"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -8236,10 +8770,11 @@ def ONNXSizeOp:ONNX_Op<"Size", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXSliceOp:ONNX_Op<"Slice", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Slice operation"; let description = [{ Produces a slice of the input tensor along multiple axes. Similar to numpy: @@ -8305,6 +8840,9 @@ def ONNXSliceOp:ONNX_Op<"Slice", AnyTypeOf<[TensorOf<[I32]>, TensorOf<[I64]>, NoneType]>:$steps); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Slice"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 5; } @@ -8326,7 +8864,7 @@ def ONNXSliceOp:ONNX_Op<"Slice", } def ONNXSoftmaxOp:ONNX_Op<"Softmax", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Softmax operation"; let description = [{ The operator computes the normalized exponential values for the given input: @@ -8349,31 +8887,33 @@ def ONNXSoftmaxOp:ONNX_Op<"Softmax", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 1; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSoftmaxOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Softmax"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 1; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSoftmaxOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXSoftmaxV11Op:ONNX_Op<"SoftmaxV11", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Softmax operation"; let description = [{ The operator computes the softmax (normalized exponential) values for each layer in the batch @@ -8395,6 +8935,9 @@ def ONNXSoftmaxV11Op:ONNX_Op<"SoftmaxV11", DefaultValuedAttr:$axis); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Softmax"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 1; } @@ -8413,10 +8956,11 @@ def ONNXSoftmaxV11Op:ONNX_Op<"SoftmaxV11", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXSoftmaxCrossEntropyLossOp:ONNX_Op<"SoftmaxCrossEntropyLoss", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SoftmaxCrossEntropyLoss operation"; let description = [{ Loss function that measures the softmax cross entropy @@ -8459,6 +9003,9 @@ def ONNXSoftmaxCrossEntropyLossOp:ONNX_Op<"SoftmaxCrossEntropyLoss", let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output, AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>, NoneType]>:$log_prob); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SoftmaxCrossEntropyLoss"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 3; } @@ -8480,7 +9027,7 @@ def ONNXSoftmaxCrossEntropyLossOp:ONNX_Op<"SoftmaxCrossEntropyLoss", } def ONNXSoftplusOp:ONNX_Op<"Softplus", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Softplus operation"; let description = [{ Softplus takes one input data (Tensor) and produces one output data @@ -8490,6 +9037,9 @@ def ONNXSoftplusOp:ONNX_Op<"Softplus", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$X); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Softplus"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -8511,7 +9061,7 @@ def ONNXSoftplusOp:ONNX_Op<"Softplus", } def ONNXSoftsignOp:ONNX_Op<"Softsign", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Softsign operation"; let description = [{ Calculates the softsign (x/(1+|x|)) of the given input tensor element-wise. @@ -8519,6 +9069,9 @@ def ONNXSoftsignOp:ONNX_Op<"Softsign", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Softsign"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -8540,8 +9093,7 @@ def ONNXSoftsignOp:ONNX_Op<"Softsign", } def ONNXSpaceToDepthOp:ONNX_Op<"SpaceToDepth", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SpaceToDepth operation"; let description = [{ SpaceToDepth rearranges blocks of spatial data into depth. More specifically, @@ -8552,6 +9104,9 @@ def ONNXSpaceToDepthOp:ONNX_Op<"SpaceToDepth", SI64Attr:$blocksize); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SpaceToDepth"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -8571,10 +9126,11 @@ def ONNXSpaceToDepthOp:ONNX_Op<"SpaceToDepth", } }]; let hasVerifier = 1; + let hasCanonicalizer = 1; } def ONNXSplitOp:ONNX_Op<"Split", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Split operation"; let description = [{ Split a tensor into a list of tensors, along the specified 'axis'. @@ -8597,31 +9153,34 @@ def ONNXSplitOp:ONNX_Op<"Split", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return -1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSplitOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Split"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 18; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return -1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSplitOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; } def ONNXSplitV13Op:ONNX_Op<"SplitV13", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Split operation"; let description = [{ Split a tensor into a list of tensors, along the specified @@ -8641,30 +9200,33 @@ def ONNXSplitV13Op:ONNX_Op<"SplitV13", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return -1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSplitV13OpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Split"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return -1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSplitV13OpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXSplitV11Op:ONNX_Op<"SplitV11", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Split operation"; let description = [{ Split a tensor into a list of tensors, along the specified @@ -8676,6 +9238,9 @@ def ONNXSplitV11Op:ONNX_Op<"SplitV11", OptionalAttr:$split); let results = (outs Variadic, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>>:$outputs); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Split"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 1; } @@ -8697,7 +9262,7 @@ def ONNXSplitV11Op:ONNX_Op<"SplitV11", } def ONNXSplitToSequenceOp:ONNX_Op<"SplitToSequence", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SplitToSequence operation"; let description = [{ Split a tensor into a sequence of tensors, along the specified @@ -8717,6 +9282,9 @@ def ONNXSplitToSequenceOp:ONNX_Op<"SplitToSequence", DefaultValuedAttr:$keepdims); let results = (outs AnyTypeOf<[SeqOf<[TensorOf<[UI8]>]>, SeqOf<[TensorOf<[UI16]>]>, SeqOf<[TensorOf<[UI32]>]>, SeqOf<[TensorOf<[UI64]>]>, SeqOf<[TensorOf<[I8]>]>, SeqOf<[TensorOf<[I16]>]>, SeqOf<[TensorOf<[I32]>]>, SeqOf<[TensorOf<[I64]>]>, SeqOf<[TensorOf<[F16]>]>, SeqOf<[TensorOf<[F32]>]>, SeqOf<[TensorOf<[F64]>]>, SeqOf<[TensorOf<[StringType]>]>, SeqOf<[TensorOf<[I1]>]>, SeqOf<[TensorOf<[Complex]>]>, SeqOf<[TensorOf<[Complex]>]>]>:$output_sequence); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SplitToSequence"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 2; } @@ -8739,7 +9307,7 @@ def ONNXSplitToSequenceOp:ONNX_Op<"SplitToSequence", } def ONNXSqrtOp:ONNX_Op<"Sqrt", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Sqrt operation"; let description = [{ Square root takes one input data (Tensor) and produces one output data @@ -8757,31 +9325,33 @@ def ONNXSqrtOp:ONNX_Op<"Sqrt", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 1; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSqrtOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Sqrt"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 1; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSqrtOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; } def ONNXSqueezeOp:ONNX_Op<"Squeeze", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Squeeze operation"; let description = [{ Remove single-dimensional entries from the shape of a tensor. @@ -8801,32 +9371,35 @@ def ONNXSqueezeOp:ONNX_Op<"Squeeze", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSqueezeOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasFolder = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Squeeze"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSqueezeOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasFolder = 1; + let hasCanonicalizer = 1; } def ONNXSqueezeV11Op:ONNX_Op<"SqueezeV11", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Squeeze operation"; let description = [{ Remove single-dimensional entries from the shape of a tensor. @@ -8846,31 +9419,35 @@ def ONNXSqueezeV11Op:ONNX_Op<"SqueezeV11", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 1; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSqueezeV11OpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasFolder = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Squeeze"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; + static int getNumberOfOperands() { + return 1; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSqueezeV11OpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasFolder = 1; + let hasCanonicalizer = 1; } def ONNXStringNormalizerOp:ONNX_Op<"StringNormalizer", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX StringNormalizer operation"; let description = [{ StringNormalization performs string operations for basic cleaning. @@ -8890,6 +9467,9 @@ def ONNXStringNormalizerOp:ONNX_Op<"StringNormalizer", OptionalAttr:$stopwords); let results = (outs TensorOf<[StringType]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "StringNormalizer"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 10; static int getNumberOfOperands() { return 1; } @@ -8911,7 +9491,7 @@ def ONNXStringNormalizerOp:ONNX_Op<"StringNormalizer", } def ONNXSubOp:ONNX_Op<"Sub", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Sub operation"; let description = [{ Performs element-wise binary subtraction (with Numpy-style broadcasting support). @@ -8942,31 +9522,34 @@ def ONNXSubOp:ONNX_Op<"Sub", resultType = UnrankedTensorType::get(lhsTy.cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSubOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Sub"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXSubOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; } def ONNXSumOp:ONNX_Op<"Sum", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Sum operation"; let description = [{ Element-wise sum of each of the input tensors (with Numpy-style broadcasting support). @@ -8976,6 +9559,9 @@ def ONNXSumOp:ONNX_Op<"Sum", let arguments = (ins Variadic, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>>:$data_0); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$sum); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Sum"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return -1; } @@ -8998,7 +9584,7 @@ def ONNXSumOp:ONNX_Op<"Sum", } def ONNXTanOp:ONNX_Op<"Tan", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Tan operation"; let description = [{ Calculates the tangent of the given input tensor, element-wise. @@ -9006,6 +9592,9 @@ def ONNXTanOp:ONNX_Op<"Tan", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Tan"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; static int getNumberOfOperands() { return 1; } @@ -9027,7 +9616,7 @@ def ONNXTanOp:ONNX_Op<"Tan", } def ONNXTanhOp:ONNX_Op<"Tanh", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Tanh operation"; let description = [{ Calculates the hyperbolic tangent of the given input tensor element-wise. @@ -9035,6 +9624,9 @@ def ONNXTanhOp:ONNX_Op<"Tanh", let arguments = (ins AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$input); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[BF16]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Tanh"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -9056,7 +9648,7 @@ def ONNXTanhOp:ONNX_Op<"Tanh", } def ONNXTfIdfVectorizerOp:ONNX_Op<"TfIdfVectorizer", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX TfIdfVectorizer operation"; let description = [{ This transform extracts n-grams from the input sequence and save them as a vector. Input can @@ -9099,6 +9691,9 @@ def ONNXTfIdfVectorizerOp:ONNX_Op<"TfIdfVectorizer", OptionalAttr:$weights); let results = (outs TensorOf<[F32]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "TfIdfVectorizer"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 9; static int getNumberOfOperands() { return 1; } @@ -9120,7 +9715,7 @@ def ONNXTfIdfVectorizerOp:ONNX_Op<"TfIdfVectorizer", } def ONNXThresholdedReluOp:ONNX_Op<"ThresholdedRelu", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ThresholdedRelu operation"; let description = [{ ThresholdedRelu takes one input data (Tensor) and produces one output data @@ -9131,6 +9726,9 @@ def ONNXThresholdedReluOp:ONNX_Op<"ThresholdedRelu", DefaultValuedAttr:$alpha); let results = (outs AnyTypeOf<[TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ThresholdedRelu"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 10; static int getNumberOfOperands() { return 1; } @@ -9152,7 +9750,7 @@ def ONNXThresholdedReluOp:ONNX_Op<"ThresholdedRelu", } def ONNXTileOp:ONNX_Op<"Tile", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Tile operation"; let description = [{ Constructs a tensor by tiling a given tensor. @@ -9163,6 +9761,9 @@ def ONNXTileOp:ONNX_Op<"Tile", TensorOf<[I64]>:$repeats); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Tile"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 2; } @@ -9184,7 +9785,7 @@ def ONNXTileOp:ONNX_Op<"Tile", } def ONNXTopKOp:ONNX_Op<"TopK", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX TopK operation"; let description = [{ Retrieve the top-K largest or smallest elements along a specified axis. Given an input tensor of @@ -9210,6 +9811,9 @@ def ONNXTopKOp:ONNX_Op<"TopK", let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>]>:$Values, TensorOf<[I64]>:$Indices); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "TopK"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 2; } @@ -9232,8 +9836,7 @@ def ONNXTopKOp:ONNX_Op<"TopK", } def ONNXTransposeOp:ONNX_Op<"Transpose", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Transpose operation"; let description = [{ Transpose the input tensor similar to numpy.transpose. For example, when @@ -9244,6 +9847,9 @@ def ONNXTransposeOp:ONNX_Op<"Transpose", OptionalAttr:$perm); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$transposed); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Transpose"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 1; } @@ -9262,10 +9868,11 @@ def ONNXTransposeOp:ONNX_Op<"Transpose", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXTriluOp:ONNX_Op<"Trilu", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Trilu operation"; let description = [{ Given a 2-D matrix or batches of 2-D matrices, returns the upper or lower triangular part of the tensor(s). @@ -9286,6 +9893,9 @@ def ONNXTriluOp:ONNX_Op<"Trilu", DefaultValuedAttr:$upper); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Trilu"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 14; static int getNumberOfOperands() { return 2; } @@ -9307,7 +9917,7 @@ def ONNXTriluOp:ONNX_Op<"Trilu", } def ONNXUniqueOp:ONNX_Op<"Unique", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Unique operation"; let description = [{ Find the unique elements of a tensor. When an optional attribute 'axis' is provided, unique subtensors sliced along the 'axis' are returned. @@ -9394,6 +10004,9 @@ def ONNXUniqueOp:ONNX_Op<"Unique", AnyTypeOf<[TensorOf<[I64]>, NoneType]>:$inverse_indices, AnyTypeOf<[TensorOf<[I64]>, NoneType]>:$counts); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Unique"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; static int getNumberOfOperands() { return 1; } @@ -9416,8 +10029,7 @@ def ONNXUniqueOp:ONNX_Op<"Unique", } def ONNXUnsqueezeOp:ONNX_Op<"Unsqueeze", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Unsqueeze operation"; let description = [{ Insert single-dimensional entries to the shape of an input tensor (`data`). @@ -9437,6 +10049,9 @@ def ONNXUnsqueezeOp:ONNX_Op<"Unsqueeze", TensorOf<[I64]>:$axes); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$expanded); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Unsqueeze"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 13; static int getNumberOfOperands() { return 2; } @@ -9455,11 +10070,11 @@ def ONNXUnsqueezeOp:ONNX_Op<"Unsqueeze", return sh; } }]; + let hasCanonicalizer = 1; } def ONNXUnsqueezeV11Op:ONNX_Op<"UnsqueezeV11", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { - let hasCanonicalizer = 1; + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Unsqueeze operation"; let description = [{ Insert single-dimensional entries to the shape of an input tensor (`data`). @@ -9487,30 +10102,34 @@ def ONNXUnsqueezeV11Op:ONNX_Op<"UnsqueezeV11", auto resultType = UnrankedTensorType::get(operands[0].getType().cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 1; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {20}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXUnsqueezeV11OpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Unsqueeze"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 11; + static int getNumberOfOperands() { + return 1; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {20}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXUnsqueezeV11OpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasCanonicalizer = 1; } def ONNXUpsampleOp:ONNX_Op<"Upsample", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Upsample operation"; let description = [{ Upsample the input tensor. @@ -9522,6 +10141,9 @@ def ONNXUpsampleOp:ONNX_Op<"Upsample", DefaultValuedStrAttr:$mode); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Upsample"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 9; static int getNumberOfOperands() { return 2; } @@ -9544,7 +10166,7 @@ def ONNXUpsampleOp:ONNX_Op<"Upsample", } def ONNXUpsampleV7Op:ONNX_Op<"UpsampleV7", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Upsample operation"; let description = [{ Upsample the input tensor. @@ -9556,6 +10178,9 @@ def ONNXUpsampleV7Op:ONNX_Op<"UpsampleV7", F32ArrayAttr:$scales); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Upsample"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; static int getNumberOfOperands() { return 1; } @@ -9577,7 +10202,7 @@ def ONNXUpsampleV7Op:ONNX_Op<"UpsampleV7", } def ONNXWhereOp:ONNX_Op<"Where", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Where operation"; let description = [{ Return elements, either from X or Y, depending on condition. @@ -9595,6 +10220,9 @@ def ONNXWhereOp:ONNX_Op<"Where", AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$Y); let results = (outs AnyTypeOf<[TensorOf<[UI8]>, TensorOf<[UI16]>, TensorOf<[UI32]>, TensorOf<[UI64]>, TensorOf<[I8]>, TensorOf<[I16]>, TensorOf<[I32]>, TensorOf<[I64]>, TensorOf<[BF16]>, TensorOf<[F16]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>, TensorOf<[I1]>, TensorOf<[Complex]>, TensorOf<[Complex]>]>:$output); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Where"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 16; static int getNumberOfOperands() { return 3; } @@ -9617,7 +10245,7 @@ def ONNXWhereOp:ONNX_Op<"Where", } def ONNXXorOp:ONNX_Op<"Xor", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Xor operation"; let description = [{ Returns the tensor resulted from performing the `xor` logical operation @@ -9647,31 +10275,34 @@ def ONNXXorOp:ONNX_Op<"Xor", resultType = UnrankedTensorType::get(lhsTy.cast().getElementType()); build($_builder, $_state, {resultType}, operands, attributes); }]> - ]; - let extraClassDeclaration = [{ - static int getNumberOfOperands() { - return 2; - } - static int getNumberOfResults() { - return 1; - } - static std::vector getTypeMap() { - return {9}; - } - }]; - let extraClassDefinition = [{ - onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, - onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { - onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXXorOpShapeHelper(op, oper, ieb, scope); - assert(sh && "failed to allocate shape helper"); - return sh; - } - }]; - let hasVerifier = 1; + ]; + let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Xor"; + static constexpr StringRef onnxDomain = ""; + static constexpr int onnxSinceVersion = 7; + static int getNumberOfOperands() { + return 2; + } + static int getNumberOfResults() { + return 1; + } + static std::vector getTypeMap() { + return {9}; + } + }]; + let extraClassDefinition = [{ + onnx_mlir::ONNXOpShapeHelper * $cppClass::getShapeHelper(mlir::Operation *op, llvm::ArrayRef oper, + onnx_mlir::IndexExprBuilder *ieb, onnx_mlir::IndexExprScope *scope) { + onnx_mlir::ONNXOpShapeHelper *sh = new onnx_mlir::ONNXXorOpShapeHelper(op, oper, ieb, scope); + assert(sh && "failed to allocate shape helper"); + return sh; + } + }]; + let hasVerifier = 1; } def ONNXArrayFeatureExtractorOp:ONNX_Op<"ArrayFeatureExtractor", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ArrayFeatureExtractor operation"; let description = [{ Select elements of the input tensor based on the indices passed.
@@ -9681,6 +10312,9 @@ def ONNXArrayFeatureExtractorOp:ONNX_Op<"ArrayFeatureExtractor", TensorOf<[I64]>:$Y); let results = (outs AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I64]>, TensorOf<[I32]>, TensorOf<[StringType]>]>:$Z); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ArrayFeatureExtractor"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 2; } @@ -9702,7 +10336,7 @@ def ONNXArrayFeatureExtractorOp:ONNX_Op<"ArrayFeatureExtractor", } def ONNXBinarizerOp:ONNX_Op<"Binarizer", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Binarizer operation"; let description = [{ Maps the values of the input tensor to either 0 or 1, element-wise, based on the outcome of a comparison against a threshold value. @@ -9711,6 +10345,9 @@ def ONNXBinarizerOp:ONNX_Op<"Binarizer", DefaultValuedAttr:$threshold); let results = (outs AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I64]>, TensorOf<[I32]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Binarizer"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -9732,7 +10369,7 @@ def ONNXBinarizerOp:ONNX_Op<"Binarizer", } def ONNXCastMapOp:ONNX_Op<"CastMap", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX CastMap operation"; let description = [{ Converts a map to a tensor.
The map key must be an int64 and the values will be ordered @@ -9745,6 +10382,9 @@ def ONNXCastMapOp:ONNX_Op<"CastMap", DefaultValuedAttr:$max_map); let results = (outs AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[F32]>, TensorOf<[I64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "CastMap"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -9766,7 +10406,7 @@ def ONNXCastMapOp:ONNX_Op<"CastMap", } def ONNXCategoryMapperOp:ONNX_Op<"CategoryMapper", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX CategoryMapper operation"; let description = [{ Converts strings to integers and vice versa.
@@ -9785,6 +10425,9 @@ def ONNXCategoryMapperOp:ONNX_Op<"CategoryMapper", DefaultValuedStrAttr:$default_string); let results = (outs AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I64]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "CategoryMapper"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -9807,7 +10450,7 @@ def ONNXCategoryMapperOp:ONNX_Op<"CategoryMapper", } def ONNXDictVectorizerOp:ONNX_Op<"DictVectorizer", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX DictVectorizer operation"; let description = [{ Uses an index mapping to convert a dictionary to an array.
@@ -9828,6 +10471,9 @@ def ONNXDictVectorizerOp:ONNX_Op<"DictVectorizer", OptionalAttr:$string_vocabulary); let results = (outs AnyTypeOf<[TensorOf<[I64]>, TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[StringType]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "DictVectorizer"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -9849,7 +10495,7 @@ def ONNXDictVectorizerOp:ONNX_Op<"DictVectorizer", } def ONNXFeatureVectorizerOp:ONNX_Op<"FeatureVectorizer", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX FeatureVectorizer operation"; let description = [{ Concatenates input tensors into one continuous output.
@@ -9861,6 +10507,9 @@ def ONNXFeatureVectorizerOp:ONNX_Op<"FeatureVectorizer", OptionalAttr:$inputdimensions); let results = (outs TensorOf<[F32]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "FeatureVectorizer"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return -1; } @@ -9882,7 +10531,7 @@ def ONNXFeatureVectorizerOp:ONNX_Op<"FeatureVectorizer", } def ONNXImputerOp:ONNX_Op<"Imputer", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Imputer operation"; let description = [{ Replaces inputs that equal one value with another, leaving all other elements alone.
@@ -9901,6 +10550,9 @@ def ONNXImputerOp:ONNX_Op<"Imputer", DefaultValuedAttr:$replaced_value_int64); let results = (outs AnyTypeOf<[TensorOf<[F32]>, TensorOf<[F64]>, TensorOf<[I64]>, TensorOf<[I32]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Imputer"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -9922,7 +10574,7 @@ def ONNXImputerOp:ONNX_Op<"Imputer", } def ONNXLabelEncoderOp:ONNX_Op<"LabelEncoder", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LabelEncoder operation"; let description = [{ Maps each element in the input tensor to another value.
@@ -9955,6 +10607,9 @@ def ONNXLabelEncoderOp:ONNX_Op<"LabelEncoder", OptionalAttr:$values_strings); let results = (outs AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I64]>, TensorOf<[F32]>]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LabelEncoder"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 2; static int getNumberOfOperands() { return 1; } @@ -9976,7 +10631,7 @@ def ONNXLabelEncoderOp:ONNX_Op<"LabelEncoder", } def ONNXLinearClassifierOp:ONNX_Op<"LinearClassifier", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LinearClassifier operation"; let description = [{ Linear classifier @@ -9991,6 +10646,9 @@ def ONNXLinearClassifierOp:ONNX_Op<"LinearClassifier", let results = (outs AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I64]>]>:$Y, TensorOf<[F32]>:$Z); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LinearClassifier"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -10012,7 +10670,7 @@ def ONNXLinearClassifierOp:ONNX_Op<"LinearClassifier", } def ONNXLinearRegressorOp:ONNX_Op<"LinearRegressor", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX LinearRegressor operation"; let description = [{ Generalized linear regression evaluation.
@@ -10029,6 +10687,9 @@ def ONNXLinearRegressorOp:ONNX_Op<"LinearRegressor", DefaultValuedAttr:$targets); let results = (outs TensorOf<[F32]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "LinearRegressor"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -10050,7 +10711,7 @@ def ONNXLinearRegressorOp:ONNX_Op<"LinearRegressor", } def ONNXNormalizerOp:ONNX_Op<"Normalizer", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Normalizer operation"; let description = [{ Normalize the input. There are three normalization modes, which have the corresponding formulas, @@ -10068,6 +10729,9 @@ def ONNXNormalizerOp:ONNX_Op<"Normalizer", DefaultValuedStrAttr:$norm); let results = (outs TensorOf<[F32]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Normalizer"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -10089,7 +10753,7 @@ def ONNXNormalizerOp:ONNX_Op<"Normalizer", } def ONNXOneHotEncoderOp:ONNX_Op<"OneHotEncoder", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX OneHotEncoder operation"; let description = [{ Replace each input element with an array of ones and zeros, where a single @@ -10107,6 +10771,9 @@ def ONNXOneHotEncoderOp:ONNX_Op<"OneHotEncoder", DefaultValuedAttr:$zeros); let results = (outs TensorOf<[F32]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "OneHotEncoder"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -10129,7 +10796,7 @@ def ONNXOneHotEncoderOp:ONNX_Op<"OneHotEncoder", } def ONNXSVMClassifierOp:ONNX_Op<"SVMClassifier", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SVMClassifier operation"; let description = [{ Support Vector Machine classifier @@ -10149,6 +10816,9 @@ def ONNXSVMClassifierOp:ONNX_Op<"SVMClassifier", let results = (outs AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I64]>]>:$Y, TensorOf<[F32]>:$Z); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SVMClassifier"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -10170,7 +10840,7 @@ def ONNXSVMClassifierOp:ONNX_Op<"SVMClassifier", } def ONNXSVMRegressorOp:ONNX_Op<"SVMRegressor", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX SVMRegressor operation"; let description = [{ Support Vector Machine regression prediction and one-class SVM anomaly detection. @@ -10186,6 +10856,9 @@ def ONNXSVMRegressorOp:ONNX_Op<"SVMRegressor", OptionalAttr:$support_vectors); let results = (outs TensorOf<[F32]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "SVMRegressor"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -10207,7 +10880,7 @@ def ONNXSVMRegressorOp:ONNX_Op<"SVMRegressor", } def ONNXScalerOp:ONNX_Op<"Scaler", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Scaler operation"; let description = [{ Rescale input data, for example to standardize features by removing the mean and scaling to unit variance. @@ -10217,6 +10890,9 @@ def ONNXScalerOp:ONNX_Op<"Scaler", OptionalAttr:$scale); let results = (outs TensorOf<[F32]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Scaler"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -10238,7 +10914,7 @@ def ONNXScalerOp:ONNX_Op<"Scaler", } def ONNXTreeEnsembleClassifierOp:ONNX_Op<"TreeEnsembleClassifier", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX TreeEnsembleClassifier operation"; let description = [{ Tree Ensemble classifier. Returns the top class for each of N inputs.
@@ -10272,6 +10948,9 @@ def ONNXTreeEnsembleClassifierOp:ONNX_Op<"TreeEnsembleClassifier", let results = (outs AnyTypeOf<[TensorOf<[StringType]>, TensorOf<[I64]>]>:$Y, TensorOf<[F32]>:$Z); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "TreeEnsembleClassifier"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -10293,7 +10972,7 @@ def ONNXTreeEnsembleClassifierOp:ONNX_Op<"TreeEnsembleClassifier", } def ONNXTreeEnsembleRegressorOp:ONNX_Op<"TreeEnsembleRegressor", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX TreeEnsembleRegressor operation"; let description = [{ Tree Ensemble regressor. Returns the regressed values for each input in N.
@@ -10327,6 +11006,9 @@ def ONNXTreeEnsembleRegressorOp:ONNX_Op<"TreeEnsembleRegressor", OptionalAttr:$target_weights); let results = (outs TensorOf<[F32]>:$Y); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "TreeEnsembleRegressor"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -10348,7 +11030,7 @@ def ONNXTreeEnsembleRegressorOp:ONNX_Op<"TreeEnsembleRegressor", } def ONNXZipMapOp:ONNX_Op<"ZipMap", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX ZipMap operation"; let description = [{ Creates a map from the input and the attributes.
@@ -10361,6 +11043,9 @@ def ONNXZipMapOp:ONNX_Op<"ZipMap", OptionalAttr:$classlabels_strings); let results = (outs AnyTypeOf<[SeqOf<[TupleOf<[StringType, F32]>]>, SeqOf<[TupleOf<[I64, F32]>]>]>:$Z); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "ZipMap"; + static constexpr StringRef onnxDomain = "ai.onnx.ml"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return 1; } @@ -10382,7 +11067,7 @@ def ONNXZipMapOp:ONNX_Op<"ZipMap", } def ONNXAdagradOp:ONNX_Op<"Adagrad", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Adagrad operation"; let description = [{ Compute one iteration of ADAGRAD, a stochastic gradient based optimization @@ -10444,6 +11129,9 @@ def ONNXAdagradOp:ONNX_Op<"Adagrad", DefaultValuedAttr:$norm_coefficient); let results = (outs Variadic, TensorOf<[F64]>]>>:$outputs); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Adagrad"; + static constexpr StringRef onnxDomain = "ai.onnx.preview.training"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return -1; } @@ -10465,7 +11153,7 @@ def ONNXAdagradOp:ONNX_Op<"Adagrad", } def ONNXAdamOp:ONNX_Op<"Adam", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Adam operation"; let description = [{ Compute one iteration of Adam, a stochastic gradient based optimization @@ -10540,6 +11228,9 @@ def ONNXAdamOp:ONNX_Op<"Adam", DefaultValuedAttr:$norm_coefficient_post); let results = (outs Variadic, TensorOf<[F64]>]>>:$outputs); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Adam"; + static constexpr StringRef onnxDomain = "ai.onnx.preview.training"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return -1; } @@ -10561,7 +11252,7 @@ def ONNXAdamOp:ONNX_Op<"Adam", } def ONNXGradientOp:ONNX_Op<"Gradient", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Gradient operation"; let description = [{ Gradient operator computes the partial derivatives of a specific tensor w.r.t. @@ -10694,6 +11385,9 @@ def ONNXGradientOp:ONNX_Op<"Gradient", OptionalAttr:$zs); let results = (outs Variadic, TensorOf<[F32]>, TensorOf<[F64]>]>>:$Outputs); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Gradient"; + static constexpr StringRef onnxDomain = "ai.onnx.preview.training"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return -1; } @@ -10715,7 +11409,7 @@ def ONNXGradientOp:ONNX_Op<"Gradient", } def ONNXMomentumOp:ONNX_Op<"Momentum", - [Pure, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { + [Pure, ONNXOperationTrait, DeclareOpInterfaceMethods, DeclareOpInterfaceMethods]> { let summary = "ONNX Momentum operation"; let description = [{ Compute one iteration of stochastic gradient update with momentum. @@ -10788,6 +11482,9 @@ def ONNXMomentumOp:ONNX_Op<"Momentum", F32Attr:$norm_coefficient); let results = (outs Variadic, TensorOf<[F64]>]>>:$outputs); let extraClassDeclaration = [{ + static constexpr StringRef onnxName = "Momentum"; + static constexpr StringRef onnxDomain = "ai.onnx.preview.training"; + static constexpr int onnxSinceVersion = 1; static int getNumberOfOperands() { return -1; } diff --git a/src/Interface/ONNXOperationTrait.hpp b/src/Interface/ONNXOperationTrait.hpp new file mode 100644 index 0000000000..06421df4b7 --- /dev/null +++ b/src/Interface/ONNXOperationTrait.hpp @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + */ + +//===----------------------- ONNXOperationTrait.hpp ----------------------===// +// +// Declares ONNXOperationTrait. +// +//===----------------------------------------------------------------------===// + +#pragma once + +#include "mlir/IR/OpDefinition.h" + +namespace mlir { + +/// A trait that specifies an ONNX operation type's name and version. +/// Assumes ConcreteType has static onnxName, onnxDomain, onnxSinceVersion +/// fields. +template +class ONNXOperationTrait : public OpTrait::TraitBase { +public: + static StringRef getONNXName() { return OP::onnxName; } + static StringRef getONNXDomain() { return OP::onnxDomain; } + static int getONNXSinceVersion() { return OP::onnxSinceVersion; } +}; + +} // namespace mlir diff --git a/src/Interface/ONNXOperationTrait.td b/src/Interface/ONNXOperationTrait.td new file mode 100644 index 0000000000..1c8fdd5101 --- /dev/null +++ b/src/Interface/ONNXOperationTrait.td @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: Apache-2.0 + +//===================- ONNXOperationTrait.td -*- tablegen -===================// +// +// ONNXOperationTrait defines static ONNX operation properties. +// +//===----------------------------------------------------------------------===// + +#ifdef ONNX_OPERATION_TRAIT +#else +#define ONNX_OPERATION_TRAIT + +#ifdef OP_BASE +#else +include "mlir/IR/OpBase.td" +#endif // OP_BASE + +def ONNXOperationTrait : NativeOpTrait<"ONNXOperationTrait"> { + let cppNamespace = "::mlir"; +} + +#endif // ONNX_OPERATION_TRAIT diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 6d2df0f602..232025c64d 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -14,17 +14,6 @@ add_custom_target(OMONNXOpsTableGenIncGen ${ONNX_MLIR_SRC_ROOT}/src/Dialect/ONNX/ONNXOps.td.inc DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/ONNXOps.td.inc) -# OpBuildTable.inc -> src/Builder/OpBuildTable.inc -add_custom_target(OMONNXOpsBuildTableIncGen - COMMAND ${CMAKE_COMMAND} -E rename - ${CMAKE_CURRENT_SOURCE_DIR}/OpBuildTable.inc - ${ONNX_MLIR_SRC_ROOT}/src/Builder/OpBuildTable.inc - DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/OpBuildTable.inc) - -add_custom_target(OMONNXOpsIncTranslation - DEPENDS OMONNXOpsTableGenIncGen - OMONNXOpsBuildTableIncGen) - add_custom_target(OMONNXCheckVersion COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_LIST_DIR}/gen_onnx_mlir.py --check-operation-version) diff --git a/utils/gen_onnx_mlir.py b/utils/gen_onnx_mlir.py index f122a6dbb0..73c56fe7f1 100755 --- a/utils/gen_onnx_mlir.py +++ b/utils/gen_onnx_mlir.py @@ -2,7 +2,7 @@ # After modifying this file, the script will need to run to rebuild the # onnx-mlir ONNX Dialect. This is performed by calling -# `make OMONNXOpsIncTranslation` in the build dir. +# `make OMONNXOpsTableGenIncGen` in the build dir. # If the changes are not seen, then you need to rebuild the entire onnx-mlir. # After changes that impact the documentation of the ops, run @@ -37,10 +37,6 @@ help="Output ONNXOps.td.inc content to stdout.", action="store_true", default=False) -parser.add_argument("--dry-run-op-build-table", - help="Output OpBuildTable.inc content to stdout.", - action="store_true", - default=False) parser.add_argument("--check-operation-version", help="check whether the imported onnx package has new operation or " " newer version of operation compared with version stored in version_dicts", @@ -290,18 +286,6 @@ ("then_branch", "else_branch"), } -# Special operation importing handlers. -special_op_handler = dict([ - ("BatchNormalization", "ImportNodeBatchNormalization"), - ("CategoryMapper", "ImportCategoryMapper"), - ("Dropout", "ImportNodeDropout"), - ("Cast", "ImportNodeCast"), - ("MaxPool", "ImportNodeMaxPool"), - ("Pad", "ImportNodePad"), - ("Slice", "ImportNodeSlice"), - ("Scan", "ImportScan"), -]) - # Operations with custom assembly format (alphabetical order). OpsWithCustomAssemblyFormat = [ 'Constant', @@ -426,16 +410,14 @@ # Op with Helper functions # Here the functions are for data flow analysis. OpsWithHelpers = { - "Loop": """ - mlir::Operation::result_range v_final(); - mlir::Operation::result_range scan_outputs(); - """, - "Scan": """ - mlir::Operation::operand_range getVInitial(); - mlir::Operation::result_range v_final(); - mlir::Operation::operand_range scan_inputs(); - mlir::Operation::result_range scan_outputs(); - """ + "Loop": + '{indent}mlir::Operation::result_range v_final();\n' + + '{indent}mlir::Operation::result_range scan_outputs();\n', + "Scan": + '{indent}mlir::Operation::operand_range getVInitial();\n' + + '{indent}mlir::Operation::result_range v_final();\n' + + '{indent}mlir::Operation::operand_range scan_inputs();\n' + + '{indent}mlir::Operation::result_range scan_outputs();\n' } # Type inference are usually done with the type string for Op definition @@ -550,28 +532,29 @@ custom_builder_ops_list = custom_builder_unranked_ops_list + custom_builder_broadcast_ops_list # A dictionary to add any special definition for an operation. -custom_definition_misc = dict([ ('Constant', - ''' let builders = [ - OpBuilder<(ins "Attribute":$sparse_value, "Attribute":$value), [{ - if (value) { - auto tensorType = value.cast().getType(); - build($_builder, $_state, tensorType, sparse_value, value, - FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr()); - } else { - auto tensorType = sparse_value.cast().getType(); - build($_builder, $_state, tensorType, sparse_value, value, - FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr()); - } - }]> - ];'''), - ('Cast', - ''' let builders = [ - OpBuilder<(ins "Value":$input, "TypeAttr":$to), [{ - auto resultType = mlir::UnrankedTensorType::get(to.getValue()); - build($_builder, $_state, resultType, input, to); - }] > - ];''' - )]) +custom_definition_misc = dict([ + ('Constant', + '{indent}let builders = [\n' + \ + '{indent} OpBuilder<(ins "Attribute":$sparse_value, "Attribute":$value), [{{\n' + \ + '{indent} if (value) {{\n' + \ + '{indent} auto tensorType = value.cast().getType();\n' + \ + '{indent} build($_builder, $_state, tensorType, sparse_value, value,\n' + \ + '{indent} FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());\n' + \ + '{indent} }} else {{\n' + \ + '{indent} auto tensorType = sparse_value.cast().getType();\n' + \ + '{indent} build($_builder, $_state, tensorType, sparse_value, value,\n' + \ + '{indent} FloatAttr(), ArrayAttr(), IntegerAttr(), ArrayAttr(), StringAttr(), ArrayAttr());\n' + \ + '{indent} }}\n' + \ + '{indent} }}]>\n' + \ + '{indent}];\n'), + ('Cast', + '{indent}let builders = [\n' + \ + '{indent} OpBuilder<(ins "Value":$input, "TypeAttr":$to), [{{\n' + \ + '{indent} auto resultType = mlir::UnrankedTensorType::get(to.getValue());\n' + \ + '{indent} build($_builder, $_state, resultType, input, to);\n' + \ + '{indent} }}]>\n' + \ + '{indent}];\n') +]) # Get this order from TensorProto in https://github.com/onnx/onnx/blob/main/onnx/onnx.in.proto#L481. # enum DataType { @@ -929,9 +912,15 @@ def get_output_type_mapping(schema): return mapping +def onnx_properties(indent, schema): + return ( + '{indent}static constexpr StringRef onnxName = "{name}";\n' + + '{indent}static constexpr StringRef onnxDomain = "{domain}";\n' + + '{indent}static constexpr int onnxSinceVersion = {since_version};\n' + ).format(name=schema.name, domain=schema.domain, since_version=schema.since_version, indent=indent) + def get_numberof_inout(s, indent, schema): expected_num_operands = get_numberof_list(schema.inputs) - indent = inc_indent(indent) s += indent + "static int getNumberOfOperands() {\n" indent = inc_indent(indent) s += indent + "return {};\n".format(expected_num_operands) @@ -954,7 +943,6 @@ def get_numberof_inout(s, indent, schema): return s - def get_promotable_const_operands_func(s, indent, const_operands_name_to_idx): cpp_name_to_idx_literal = "{" + ", ".join([ "{{\"{}\", {}}}".format(*name_to_idx) @@ -974,8 +962,6 @@ def get_promotable_const_operands_func(s, indent, const_operands_name_to_idx): return s def get_type_inference_func(s, indent, type_inference_code): - indent = inc_indent(indent) - s += indent + "std::vector resultTypeInference() {" + "\n" indent = inc_indent(indent) s += indent + "std::vector resultTypes;" + "\n" @@ -986,7 +972,6 @@ def get_type_inference_func(s, indent, type_inference_code): indent = dec_indent(indent) s += indent + "}" + "\n" - indent = dec_indent(indent) return s def parse_type_str(allowedType): @@ -1103,7 +1088,7 @@ def gen_op_def(schema, with_version = False): regions[attr.name] = "AnyRegion" # Generate decl for op traits. - traits = ["Pure"] + traits = ["Pure", "ONNXOperationTrait"] # Generate ConstantLike traits. if opName in OpsWithConstantLike: @@ -1123,14 +1108,6 @@ def gen_op_def(schema, with_version = False): indent = inc_indent(indent) - # Generate decl for custom assembly format. - if opName in OpsWithCustomAssemblyFormat: - s += indent + 'let hasCustomAssemblyFormat = 1;\n' - - # Generate decl for canonicalizer. - if opName in OpsWithCanonicalizer: - s += indent + 'let hasCanonicalizer = 1;\n' - # Generate decl for summary. s += indent + 'let summary = "ONNX {} operation";\n'.format(schema.name) @@ -1241,14 +1218,17 @@ def gen_op_def(schema, with_version = False): s += resultType.format(*operands, indent=indent) s += indent + 'build($_builder, $_state, {resultType}, operands, attributes);\n' indent = dec_indent(indent) - s += indent + '}]>' + s += indent + '}]>\n' - s += '\n' + indent + '];\n' + indent = dec_indent(indent) + s += indent + '];\n' ########################################### # Generate extraClassDeclaration. s += indent + "let extraClassDeclaration = [{\n" - #indent = inc_indent(indent) + indent = inc_indent(indent) + + s += onnx_properties(indent, schema) # Generate input/output number and output type mapping s = get_numberof_inout(s, indent, schema) @@ -1258,7 +1238,7 @@ def gen_op_def(schema, with_version = False): s, indent, OpsWithResultTypeInference[opName]) if opName in OpsWithHelpers: - s += OpsWithHelpers[opName] + s += OpsWithHelpers[opName].format(indent=indent) if len(regions): s += indent + "int64_t getSubgraphRegionIdx(const std::string& name) {\n" @@ -1268,6 +1248,8 @@ def gen_op_def(schema, with_version = False): s += indent + "llvm_unreachable(\"region with the specified name does not exist\");\n" indent = dec_indent(indent) s += indent + "}\n" + + indent = dec_indent(indent) s += indent + '}];\n' ########################################### @@ -1280,26 +1262,28 @@ def gen_op_def(schema, with_version = False): s += indent + '}];\n' if ( opName in custom_definition_misc) : - s += custom_definition_misc[opName] + '\n' + s += custom_definition_misc[opName].format(indent=indent) ########################################### # Generate decl for verifier. if opName in OpsWithVerifier: s += indent + 'let hasVerifier = 1;\n' + if opName in OpsWithFolder: s += indent + 'let hasFolder = 1;\n' + + # Generate decl for canonicalizer. + if opName in OpsWithCanonicalizer: + s += indent + 'let hasCanonicalizer = 1;\n' + + # Generate decl for custom assembly format. + if opName in OpsWithCustomAssemblyFormat: + s += indent + 'let hasCustomAssemblyFormat = 1;\n' + s += '}\n\n' return s -def gen_op_versions(file) : - indent = inc_indent() - s = "" - for key, item in version_dict.items() : - s += indent + 'op_dialect_version_map_["' + key +'"] = ' - s += "{" + "{}".format(", ".join(str(x) for x in item)) + "};\n" - file.write(s) - """ special cases: * Split: attr split default value: sizeof(output1) namely 1 @@ -1309,46 +1293,6 @@ def gen_op_versions(file) : """ -def gen_op_importer(schema, file, with_version=False): - indent = inc_indent() - if with_version : - opName = schema.name + "V"+str(schema.since_version) - else : - opName = schema.name - s = indent + 'import_handler_map_["' + opName +'"] = \n ' - - expected_num_operands = len(schema.inputs) - expected_num_results = len(schema.outputs) - for input in schema.inputs: - if OpSchema.FormalParameterOption.Variadic == input.option: - expected_num_operands = -1 - for output in schema.outputs: - if OpSchema.FormalParameterOption.Variadic == output.option: - expected_num_results = -1 - - # Only support special op handler for the op without version. - if with_version: - handler_func = "buildOperation".format(opName) - else: - handler_func = special_op_handler.get( - schema.name, "buildOperation".format(opName)) - - # Special handlers currently require expected num operands/results to be specified. - # TODO: remove special handlers. - args = ["node"] - """ - if expected_num_operands != -1 or expected_num_results != -1 or "buildOperation" not in handler_func: - args.append( - "/* expected_num_operands = */ {}".format(expected_num_operands)) - args.append( - '/* expected_num_results = */ {}'.format(expected_num_results)) - """ - s += inc_indent(indent) + '&onnx_mlir::detail::FrontendGenImpl::' - s += handler_func+';\n' - - file.write(s) - - def build_operator_schemas(): # domain -> support level -> name -> [schema] index = defaultdict(lambda: defaultdict(lambda: defaultdict( @@ -1433,10 +1377,6 @@ def main(args): # type: (Type[Args]) -> None op_def = args.op_def op_def.write(autogen_warning) - op_importer = args.op_importer - op_importer.write(autogen_warning) - gen_op_versions(op_importer) - new_version_dict = dict() for domain, support_map in build_operator_schemas(): for _, name_map in support_map: @@ -1446,7 +1386,6 @@ def main(args): # type: (Type[Args]) -> None new_version_dict[schema.name] = [schema.since_version] if not check_operation_version : with_version = previous_name == schema.name - gen_op_importer(schema, op_importer, with_version) r = gen_op_def(schema, with_version) op_def.write(r) previous_name = schema.name @@ -1465,20 +1404,17 @@ def main(args): # type: (Type[Args]) -> None curr_dir = os.path.dirname(os.path.realpath(__file__)) class Args(object): - dry_run = args.dry_run_onnx_ops or args.dry_run_op_build_table + dry_run = args.dry_run_onnx_ops - # If either dry_run_onnx_ops or dry_run_op_build_table is true, then treat + # If dry_run_onnx_ops is true, then treat # both of them as true. Otherwise, one of them runs as a dry-run and one # of them runs as a real run creating unnecessary artifacts in the wrong # locations in the build tree. if dry_run: op_def = StringIO() - op_importer = StringIO() else: op_def_file_path = os.path.join(curr_dir, 'ONNXOps.td.inc') op_def = io.open(op_def_file_path, 'w', newline='') - op_importer_file_path = os.path.join(curr_dir, 'OpBuildTable.inc') - op_importer = io.open(op_importer_file_path, 'w', newline='') main(Args) # This is based on diff.py from llvm-project (llvm\utils\lit\lit\builtin_commands\diff.py). @@ -1497,5 +1433,3 @@ class Args(object): # Only output the generated values for the specifically requested dry run. if args.dry_run_onnx_ops: sys.stdout.write(Args.op_def.getvalue()) - if args.dry_run_op_build_table: - sys.stdout.write(Args.op_importer.getvalue())