Skip to content

Commit

Permalink
Add a simple custom op example in C++ (#393)
Browse files Browse the repository at this point in the history
* add custom op example

* code format

* update CMakeLists.txt

---------

Co-authored-by: Your Name <[email protected]>
  • Loading branch information
mszhanyi and Your Name authored Mar 8, 2024
1 parent dfa685f commit 6f1cd56
Show file tree
Hide file tree
Showing 4 changed files with 163 additions and 0 deletions.
16 changes: 16 additions & 0 deletions c_cxx/customop/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
cmake_minimum_required(VERSION 3.13 FATAL_ERROR)
project(customop_example C CXX)

set(CMAKE_CXX_STANDARD 17)

option(ONNXRUNTIME_ROOTDIR "onnxruntime root dir")

add_executable(example example.cc)

target_include_directories(example PRIVATE "${ONNXRUNTIME_ROOTDIR}/include"
"${ONNXRUNTIME_ROOTDIR}/include/onnxruntime"
"${ONNXRUNTIME_ROOTDIR}/include/onnxruntime/core/session")

target_link_directories(example PRIVATE "${ONNXRUNTIME_ROOTDIR}/lib")

target_link_libraries(example onnxruntime)
16 changes: 16 additions & 0 deletions c_cxx/customop/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
This directory contains one C/C++ sample for demonstrating onnxruntime custom operators:

## Prerequisites
1. download onnxruntime binaries (onnxruntime-linux-xx.tgz or onnxruntime-win-x64-xx.zip) from [onnxruntime release site](https://github.com/microsoft/onnxruntime/releases)
2. cmake(version >=3.13)
3. onnx

## How to build and run the sample
1. Run python kenerl.py to generate the onnx model file which contains the custom op.
2. Unzip the onnxruntime binaries to any folder. The folder you unzip it to will be your ONNXRUNTIME_ROOTDIR path.
3. Open a terminal and change your current directory to samples/c_cxx/customop, and run the below command.
- mkdir build && cd build
- cmake .. -DONNXRUNTIME_ROOTDIR=/path/to/your/onnxruntime
- cmake --build . --config Release
4. Add the path of the ONNXRUNTIME_ROOTDIR in `LD_LIBRARY_PATH` on Linux or `PATH` on Windows.
5. Copy the model generated in step 1 and Run the executable file in the build folder.
84 changes: 84 additions & 0 deletions c_cxx/customop/example.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
#include <onnxruntime_lite_custom_op.h>

#include <ctime>
#include <iostream>
#include <vector>

using namespace std;
using namespace Ort::Custom;

void KernelOne(const Ort::Custom::Tensor<float>& X, const Ort::Custom::Tensor<float>& Y,
Ort::Custom::Tensor<float>& Z) {
auto input_shape = X.Shape();
auto x_raw = X.Data();
auto y_raw = Y.Data();
auto z_raw = Z.Allocate(input_shape);
for (int64_t i = 0; i < Z.NumberOfElement(); ++i) {
z_raw[i] = x_raw[i] + y_raw[i];
}
}

int main() {
Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test");
Ort::CustomOpDomain v1_domain{"v1"};
// please make sure that custom_op_one has the same lifetime as the consuming session
std::unique_ptr<OrtLiteCustomOp> custom_op_one{
Ort::Custom::CreateLiteCustomOp("CustomOpOne", "CPUExecutionProvider", KernelOne)};
v1_domain.Add(custom_op_one.get());
Ort::SessionOptions session_options;
session_options.Add(v1_domain);

#ifdef _WIN32
const wchar_t* model_path = L"custom_kernel_one_model.onnx";
#else
const char* model_path = "custom_kernel_one_model.onnx";
#endif

Ort::Session session(env, model_path, session_options);

// Get input/output node names
using AllocatedStringPtr = std::unique_ptr<char, Ort::detail::AllocatedFree>;
std::vector<const char*> input_names;
std::vector<AllocatedStringPtr> inputNodeNameAllocatedStrings;
std::vector<const char*> output_names;
std::vector<AllocatedStringPtr> outputNodeNameAllocatedStrings;
Ort::AllocatorWithDefaultOptions allocator;
size_t numInputNodes = session.GetInputCount();
for (int i = 0; i < numInputNodes; i++) {
auto input_name = session.GetInputNameAllocated(i, allocator);
inputNodeNameAllocatedStrings.push_back(std::move(input_name));
input_names.emplace_back(inputNodeNameAllocatedStrings.back().get());
}
size_t numOutputNodes = session.GetOutputCount();
for (int i = 0; i < numOutputNodes; i++) {
auto output_name = session.GetOutputNameAllocated(i, allocator);
outputNodeNameAllocatedStrings.push_back(std::move(output_name));
output_names.emplace_back(outputNodeNameAllocatedStrings.back().get());
}

std::vector<int64_t> input_shape = {3};
std::vector<float> input_values_1 = {1.0f, 2.0f, 3.0f};
std::vector<float> input_values_2 = {4.0f, 5.0f, 6.0f};
auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault);
Ort::Value input_tensor_1 = Ort::Value::CreateTensor<float>(memory_info, input_values_1.data(), input_values_1.size(),
input_shape.data(), input_shape.size());
Ort::Value input_tensor_2 = Ort::Value::CreateTensor<float>(memory_info, input_values_2.data(), input_values_2.size(),
input_shape.data(), input_shape.size());
std::vector<Ort::Value> input_tensors;
input_tensors.emplace_back(std::move(input_tensor_1));
input_tensors.emplace_back(std::move(input_tensor_2));

std::vector<Ort::Value> output_tensors =
session.Run(Ort::RunOptions{nullptr}, input_names.data(), input_tensors.data(), 2, output_names.data(), 1);

std::cout << std::fixed;
for (auto j = 0; j < output_tensors.size(); j++) {
const float* floatarr = output_tensors[j].GetTensorMutableData<float>();
for (int i = 0; i < 3; i++) {
std::cout << floatarr[i] << " ";
}
std::cout << std::endl;
}

return 0;
}
47 changes: 47 additions & 0 deletions c_cxx/customop/kernelone.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import numpy as np
import onnx
from onnx import TensorProto
from onnx.checker import check_model
from onnx.helper import make_graph, make_model, make_node, make_tensor_value_info


def create_custom_operator():
# Define input and output names
input_names = ["X", "Y"]
output_names = ["Z"]

# Create a custom operator node
custom_op_node = onnx.helper.make_node(
"CustomOpOne", # Custom operator name
input_names,
output_names,
domain="v1", # Custom domain name
)

# Create an ONNX graph
graph = onnx.helper.make_graph(
[custom_op_node],
"custom_opone_model",
[
onnx.helper.make_tensor_value_info(name, onnx.TensorProto.FLOAT, [3])
for name in input_names
],
[
onnx.helper.make_tensor_value_info(name, onnx.TensorProto.FLOAT, [3])
for name in output_names
],
)

# Create the ONNX model
model = onnx.helper.make_model(graph)

# check_model(model)

print(model)

# Save the model to a file
onnx.save(model, "custom_kernel_one_model.onnx")


if __name__ == "__main__":
create_custom_operator()

0 comments on commit 6f1cd56

Please sign in to comment.