From e73949a3327c7dce3880d2c2c072ce443b6d6101 Mon Sep 17 00:00:00 2001 From: Ti-Tai Wang Date: Fri, 26 Apr 2024 16:24:42 -0700 Subject: [PATCH] Turn shape type inference strict mode to false in optimizer (#1472) Fix #1443 In converter/dort, tensors retains their shape and type from PyTorch models, and it saves us some efforts to infer them all like we did in torchscript. However, when it comes to symbolic shapes, we still need ONNX shape type inference. Error is raised when the inferred shape and type are different from the carried ones. This is rare, but it happens when a corner case is revealed. For example, in #1443, PyTorch generates 2 outputs with size=0 when native_batch_norm is run with CUDA. This PR turn off the strict mode in ONNX shape type inference to avoid crash in optimizer. --- onnxscript/optimizer/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/onnxscript/optimizer/__init__.py b/onnxscript/optimizer/__init__.py index 7b98950e0..f70d4d35e 100644 --- a/onnxscript/optimizer/__init__.py +++ b/onnxscript/optimizer/__init__.py @@ -58,8 +58,12 @@ def optimize( for _ in range(num_iterations): if onnx_shape_inference: if model.ByteSize() < 1024 * 1024 * 1024 * 2: + # NOTE: strict mode is disabled because it crashes on the models + # that have different shapes inferred from the model carried shapes. + # The case can be found in: + # https://github.com/microsoft/onnxscript/issues/1443 model = onnx.shape_inference.infer_shapes( - model, check_type=True, strict_mode=True, data_prop=True + model, check_type=True, strict_mode=False, data_prop=True ) else: logger.warning(