Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add one test failing for the optimizer after a model optimized and inlined #1465

Merged
merged 5 commits into from
Apr 26, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions testdata/dort_models/llama_forward.onnx
Git LFS file not shown
41 changes: 40 additions & 1 deletion tests/optimizer/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@

import numpy as np
import onnx
import onnx.inliner
import onnxruntime
import parameterized

from onnxscript import optimizer
from onnxscript.rewriter import onnxruntime as ort_rewriter
from onnxscript.utils import evaluation_utils

_SKIP_TABLE = {}
Expand Down Expand Up @@ -64,6 +66,43 @@ def test_model_runs_and_matches_accuracy_after_optimization(self, model_name):
for output, expected_output in zip(outputs, expected_outputs):
np.testing.assert_allclose(output, expected_output, rtol=1e-3, atol=1e-3)

def test_optimizer_after_inlining(self):
model_dir = pathlib.Path(model_folder_path) / ".." / "dort_models"
filename = model_dir / "llama_forward.onnx"
if not filename.exists():
self.skipTest(f"Model {filename!r} does not exist")
justinchuby marked this conversation as resolved.
Show resolved Hide resolved

onnx_model = onnx.load(filename)
onnxruntime.InferenceSession(
onnx_model.SerializeToString(), providers=["CPUExecutionProvider"]
)

# first time
onnx_model = optimizer.optimize(onnx_model)
onnxruntime.InferenceSession(
onnx_model.SerializeToString(), providers=["CPUExecutionProvider"]
)
onnx_model = ort_rewriter.rewrite(onnx_model)
onnxruntime.InferenceSession(
onnx_model.SerializeToString(), providers=["CPUExecutionProvider"]
)

# inline
onnx_model = onnx.inliner.inline_local_functions(onnx_model)
onnxruntime.InferenceSession(
onnx_model.SerializeToString(), providers=["CPUExecutionProvider"]
)

# second time
onnx_model = optimizer.optimize(onnx_model)
onnxruntime.InferenceSession(
onnx_model.SerializeToString(), providers=["CPUExecutionProvider"]
)
onnx_model = ort_rewriter.rewrite(onnx_model)
onnxruntime.InferenceSession(
onnx_model.SerializeToString(), providers=["CPUExecutionProvider"]
)


if __name__ == "__main__":
unittest.main()
unittest.main(verbosity=2)
Loading