forked from microsoft/onnxscript
-
Notifications
You must be signed in to change notification settings - Fork 0
/
02_plot_square_loss.py
56 lines (38 loc) · 1.43 KB
/
02_plot_square_loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""
Generating a ModelProto
=======================
This example demonstrates the use of *onnxscript* to define an ONNX model.
*onnxscript* behaves like a compiler. It converts a script into an ONNX model.
"""
# %%
# First, we define the implementation of a square-loss function in onnxscript.
import numpy as np
import onnx
from onnxruntime import InferenceSession
from onnxscript import FLOAT, script
from onnxscript import opset15 as op
@script()
def square_loss(X: FLOAT["N", 1], Y: FLOAT["N", 1]) -> FLOAT[1, 1]: # noqa: F821
diff = X - Y
return op.ReduceSum(diff * diff, keepdims=1)
# %%
# We can convert it to a model (an ONNX *ModelProto*) as follows:
model = square_loss.to_model_proto()
# %%
# Let's see what the generated model looks like.
print(onnx.printer.to_text(model))
# %%
# We can run shape-inference and type-check the model using the standard ONNX API.
model = onnx.shape_inference.infer_shapes(model)
onnx.checker.check_model(model)
# %%
# And finally, we can use *onnxruntime* to compute the outputs
# based on this model, using the standard onnxruntime API.
sess = InferenceSession(model.SerializeToString(), providers=("CPUExecutionProvider",))
X = np.array([[0, 1, 2]], dtype=np.float32).T
Y = np.array([[0.1, 1.2, 2.3]], dtype=np.float32).T
got = sess.run(None, {"X": X, "Y": Y})
expected = ((X - Y) ** 2).sum()
print(expected, got)