Skip to content

Fix _aten_native_group_norm_onnx dtype | fix(torchlib) (#1171) #493

Fix _aten_native_group_norm_onnx dtype | fix(torchlib) (#1171)

Fix _aten_native_group_norm_onnx dtype | fix(torchlib) (#1171) #493

GitHub Actions / Test Results failed Nov 21, 2023 in 0s

55 fail, 2 796 skipped, 8 343 pass in 1h 6m 27s

         18 files  ±0         18 suites  ±0   1h 6m 27s ⏱️ +56s
  11 194 tests ±0    8 343 ✔️ ±0      2 796 💤  - 1       55 +1 
160 176 runs  ±0  36 743 ✔️ ±0  121 381 💤  - 3  2 052 +3 

Results for commit 1690793. ± Comparison against earlier commit 660b9f4.

Annotations

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__nn_functional_scaled_dot_product_attention_bool_mask_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[4,3,8] input_0, float16[4,6,8] input_1, float16[4,6,8] input_2) => (float16[4,3,8] _val_5) 
   <float16 _val_3, float[3,6] _val_4>
{
   _val_3 = pkg.onnxscript.torch_lib._attention_scale (input_0)
   _val_4 = pkg.onnxscript.torch_lib._causal_attention_mask (input_0, input_1)
   _val_5 = pkg.onnxscript.torch_lib._aten_scaled_dot_product_attention_float_mask_onnx <dropout_p: float = 0> (input_0, input_1, input_2, _val_4, _val_3)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_attention_scale (query) => (scale)
{
   tmp = Shape (query)
   int64_m1 = Constant <value: tensor = int64 int64_m1 {-1}> ()
   tmp_subscripted = Gather <axis: int = 0> (tmp, int64_m1)
   embedding_size = CastLike (tmp_subscripted, query)
   const = Constant <value: tensor = float const {1}> ()
   tmp_0 = Sqrt (embedding_size)
   const_cast = CastLike (const, tmp_0)
   scale = Div (const_cast, tmp_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_causal_attention_mask (query, key) => (attn_mask_10)
{
   tmp = Shape (query)
   int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
   int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
   int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
   int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
   target_length = Slice (tmp, int64_m2_1d, int64_m1_1d, int64_0_1d, int64_1_1d)
   tmp_0 = Shape (key)
   int64_0_1d_1 = Constant <value: tensor = int64[1] int64_0_1d_1 {0}> ()
   int64_1_1d_2 = Constant <value: tensor = int64[1] int64_1_1d_2 {1}> ()
   int64_m2_1d_3 = Constant <value: tensor = int64[1] int64_m2_1d_3 {-2}> ()
   int64_m1_1d_4 = Constant <value: tensor = int64[1] int64_m1_1d_4 {-1}> ()
   source_length = Slice (tmp_0, int64_m2_1d_3, int64_m1_1d_4, int64_0_1d_1, int64_1_1d_2)
   size = Concat <axis: int = 0> (target_length, source_length)
   const = Constant <value: tensor = float const {1}> ()
   attn_mask = Expand (const, size)
   attn_mask_5 = Trilu <upper: int = 0> (attn_mask)
   const_6 = Constant <value: tensor = float const_6 {0}> ()
   const_6_cast = CastLike (const_6, attn_mask_5)
   tmp_7 = Equal (attn_mask_5, const_6_cast)
   tmp_8 = Constant <value_float: float = -inf> ()
   const_9 = Constant <value: tensor = float const_9 {0}> ()
   const_9_cast = CastLike (const_9, tmp_8)
   attn_mask_10 = Where (tmp_7, tmp_8, const_9_cast)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_aten_scaled_dot_product_attention_float_mask_onnx <dropout_p>(query, key, value, attn_mask, scale) => (return_val)
{
   key_shape = Shape (key)
   int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
   int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
   int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
   int64_9223372036854775807_1d = Constant <value: tensor = int64[1] int64_9223372036854775807_1d {9223372036854775807}> ()
   key_last_dim = Slice (key_shape, int64_m1_1d, int64_9223372036854775807_1d, int64_0_1d, int64_1_1d)
   int64_0_1d_0 = Constant <value: tensor = int64[1] int64_0_1d_0 {0}> ()
   int64_1_1d_1 = Constant <value: tensor = int64[1] int64_1_1d_1 {1}> ()
   int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
   int64_m1_1d_2 = Constant <value: tensor = int64[1] int64_m1_1d_2 {-1}> ()
   key_second_last_dim = Slice (key_shape, int64_m2_1d, int64_m1_1d_2, int64_0_1d_0, int64_1_1d_1)
   int64_0_1d_3 = Constant <value: tensor = int64[1] int64_0_1d_3 {0}> ()
   int64_1_1d_4 = Constant <value: tensor = int64[1] int64_1_1d_4 {1}> ()
   int64_m2_1d_5 = Constant <value: tensor = int64[1] int64_m2_1d_5 {-2}> ()
   key_first_dims = Slice (key_shape, int64_0_1d_3, int64_m2_1d_5, int64_0_1d_3, int64_1_1d_4)
   tmp = Constant <value_ints: ints = [-1]> ()
   key_squeezed_shape = Concat <axis: int = 0> (tmp, key_second_last_dim, key_last_dim)
   key_squeezed = Reshape (key, key_squeezed_shape)
   key_squeezed_transposed = Transpose <perm: ints = [0, 2, 1]> (key_squeezed)
   key_transposed_shape = Concat <axis: int = 0> (key_first_dims, key_last_dim, key_second_last_dim)
   key_transposed = Reshape (key_squeezed_transposed, key_transposed_shape)
   tmp_6 = Sqrt (scale)
   query_scaled = Mul (query, tmp_6)
   tmp_7 = Sqrt (scale)
   key_transposed_scaled = Mul (key_transposed, tmp_7)
   tmp_8 = MatMul (query_scaled, key_transposed_scaled)
   tmp_9 = Add (tmp_8, attn_mask)
   attn_weight = Softmax <axis: int = -1> (tmp_9)
   dropout_p = Constant <value_float: float = @dropout_p> ()
   attn_weight_10, _ = Dropout (attn_weight, dropout_p)
   return_val = MatMul (attn_weight_10, value)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
RuntimeError: ONNX Runtime failed to evaluate:
Inputs:
{'input_0': array([[[ 2.04   ,  2.672  ,  4.613  ,  1.248  ,  3.639  ,  4.5    ,
          3.27   ,  4.965  ],
        [ 7.777  ,  2.268  , -3.242  , -4.867  ,  8.37   , -0.4746 ,
          0.0967 ,  2.89   ],
        [-2.54   , -2.117  , -0.501  , -5.863  , -0.4658 ,  3.578  ,
         -3.137  , -5.73   ]],

       [[ 0.06152,  1.063  ,  3.05   , -3.066  , -8.22   ,  5.688  ,
         -2.875  ,  2.77   ],
        [ 1.556  , -4.26   , -2.574  , -1.371  , -8.65   , -2.504  ,
          6.18   ,  8.1    ],
        [-4.598  , -0.58   ,  1.266  ,  1.758  , -0.8438 ,  8.89   ,
          6.355  , -4.957  ]],

       [[-2.574  , -2.398  , -0.879  , -1.02   ,  4.895  ,  4.797  ,
          2.215  , -5.582  ],
        [ 5.21   , -1.248  , -6.758  , -2.777  ,  7.156  ,  8.086  ,
         -5.055  , -8.92   ],
        [-1.767  , -1.995  ,  2.117  ,  2.197  , -1.301  ,  0.03516,
          1.037  , -0.0791 ]],

       [[ 7.03   , -5.723  ,  0.5625 , -7.727  , -7.04   ,  2.092  ,
         -7.453  ,  6.836  ],
        [-1.512  ,  2.469  , -8.45   ,  1.898  ,  7.496  , -1.74   ,
         -2.021  , -2.953  ],
        [ 4.043  ,  8.16   ,  5.35   , -8.086  ,  0.8613 , -4.516  ,
         -5.625  , -5.45   ]]], dtype=float16),
 'input_1': array([[[ 8.85   , -1.775  , -4.457  , -4.824  ,  8.58   , -2.777  ,
          7.58   ,  5.66   ],
        [ 7.637  , -2.232  ,  3.832  ,  0.1934 , -0.2461 ,  4.957  ,
         -3.059  , -2.734  ],
        [ 4.016  , -8.28   ,  1.266  ,  0.7383 ,  0.677  , -4.992  ,
          7.707  , -9.     ],
        [-8.56   , -2.988  , -2.707  ,  6.777  ,  3.91   , -5.062  ,
         -1.266  , -4.72   ],
        [ 7.023  , -8.71   ,  3.05   , -8.17   ,  0.624  ,  4.836  ,
         -7.656  , -6.812  ],
        [-3.086  , -5.16   , -7.973  , -2.232  ,  7.82   ,  2.68   ,
         -6.652  ,  8.44   ]],

       [[ 0.4658 , -6.934  , -5.59   , -0.3076 ,  6.44   , -2.303  ,
          7.242  , -5.484  ],
        [-3.523  , -2.268  ,  2.654  , -0.9316 ,  1.811  ,  2.004  ,
         -1.512  ,  7.99   ],
        [-3.93   , -8.35   , -5.188  , -8.1    ,  3.7    ,  6.18   ,
         -2.293  , -2.523  ],
        [-1.925  ,  2.68   , -8.15   ,  7.46   , -1.995  ,  2.936  ,
         -1.459  , -5.188  ],
        [-5.08   ,  8.73   ,  2.7    , -6.82   , -7.55   ,  4.22   ,
         -0.3604 ,  2.936  ],
        [-0.04395, -4.246  , -2.338  ,  0.923  ,  4.938  , -8.3    ,
         -7.84   , -2.004  ]],

       [[-1.099  , -7.797  , -7.39   ,  3.516  ,  2.89   , -2.11   ,
          4.457  ,  7.48   ],
        [-0.3604 , -8.41   , -4.21   ,  6.793  , -8.55   ,  3.945  ,
         -7.207  , -7.902  ],
        [ 6.555  , -8.63   ,  6.6    ,  8.52   ,  7.75   , -8.03   ,
         -2.32   ,  5.82   ],
        [ 1.6    , -1.556  , -8.17   ,  8.52   ,  3.277  ,  8.01   ,
          4.562  , -1.099  ],
        [-5.844  , -1.099  ,  6.11   , -6.54   ,  1.705  ,  7.586  ,
          1.705  , -3.146  ],
        [-8.19   , -3.102  ,  8.305  , -8.47   , -3.438  ,  0.4395 ,
          3.533  ,  6.926  ]],

       [[ 0.03516,  4.086  , -3.7    , -3.016  ,  7.277  , -4.316  ,
          3.55   , -1.644  ],
        [ 4.5    , -3.34   , -6.96   , -4.402  , -5.97   ,  0.3955 ,
         -4.21   ,  8.3    ],
        [ 0.677  ,  6.406  ,  7.137  ,  8.1    ,  0.633  , -2.031  ,
         -6.82   , -8.59   ],
        [ 1.055  , -7.13   , -6.906  ,  0.4834 , -5.934  , -8.07   ,
         -1.705  , -8.586  ],
        [ 5.027  , -6.047  ,  0.2197 , -1.942  ,  2.25   , -8.94   ,
         -3.516  ,  7.61   ],
        [ 2.215  ,  6.074  , -2.69   , -6.344  , -3.393  , -8.516  ,
         -2.629  , -4.387  ]]], dtype=float16),
 'input_2': array([[[-4.844  , -8.766  ,  8.63   , -8.32   ,  1.89   ,  3.383  ,
         -5.8    , -3.156  ],
        [-4.387  , -2.865  ,  2.734  , -1.248  ,  0.05273,  0.01758,
          5.47   , -0.9316 ],
        [-2.418  , -5.82   ,  6.594  ,  4.457  ,  8.83   ,  2.398  ,
          4.438  , -1.925  ],
        [-2.514  ,  7.75   ,  0.12305,  1.679  ,  8.65   ,  5.54   ,
         -4.746  , -8.766  ],
        [-2.734  ,  0.334  ,  8.37   ,  2.39   ,  2.021  , -8.25   ,
          4.156  , -7.902  ],
        [-1.872  , -4.29   , -7.734  ,  4.605  ,  1.8545 , -8.79   ,
          5.09   ,  3.453  ]],

       [[-8.42   , -6.96   , -8.05   ,  1.274  , -8.03   , -7.004  ,
         -8.03   ,  4.12   ],
        [-8.71   , -3.533  ,  6.812  ,  8.22   ,  3.234  , -2.434  ,
         -3.78   ,  4.86   ],
        [-5.273  , -3.621  ,  4.543  , -2.926  ,  2.469  ,  2.805  ,
          6.477  ,  3.885  ],
        [ 8.36   , -6.242  , -1.301  ,  8.484  ,  6.504  ,  3.305  ,
          2.531  ,  3.832  ],
        [-3.191  , -6.574  ,  6.23   ,  5.105  ,  4.414  , -3.523  ,
         -4.473  ,  3.066  ],
        [-1.6    ,  3.91   ,  7.285  , -5.934  ,  5.33   ,  5.83   ,
         -1.775  ,  1.195  ]],

       [[-2.338  , -1.107  , -6.875  , -4.234  ,  0.3428 , -6.996  ,
         -4.19   , -0.923  ],
        [ 1.951  , -8.95   ,  2.82   , -4.895  ,  6.426  , -8.35   ,
         -8.98   ,  7.438  ],
        [-3.332  , -7.973  , -1.266  ,  5.316  , -4.58   ,  8.766  ,
         -0.6855 , -3.965  ],
        [ 3.867  , -7.305  , -1.564  , -2.725  ,  3.438  ,  0.2197 ,
          3.814  , -7.49   ],
        [ 2.629  ,  5.66   , -6.145  ,  3.594  ,  1.028  , -1.661  ,
          6.906  , -2.645  ],
        [-7.03   , -4.332  , -8.016  , -7.777  , -4.094  , -9.     ,
         -8.22   , -3.262  ]],

       [[-6.195  ,  4.824  ,  7.066  ,  7.848  , -7.79   ,  4.484  ,
          7.62   , -1.582  ],
        [ 8.805  , -6.734  ,  5.906  , -1.081  , -2.945  ,  8.92   ,
         -8.92   , -6.715  ],
        [ 8.07   ,  5.703  , -7.496  , -1.116  , -8.89   ,  1.468  ,
          5.633  ,  5.23   ],
        [-6.188  ,  2.795  ,  5.38   , -6.117  , -2.11   , -2.18   ,
          8.52   , -1.002  ],
        [ 8.08   ,  4.824  ,  8.914  ,  3.674  ,  5.316  ,  0.826  ,
          7.17   ,  5.098  ],
        [-6.215  ,  8.39   , -6.934  ,  8.305  , -6.074  ,  7.77   ,
         -5.703  ,  4.023  ]]], dtype=float16)}
Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[4,3,8] input_0, float16[4,6,8] input_1, float16[4,6,8] input_2) => (float16[4,3,8] _val_4) 
   <float16 _val_3>
{
   _val_3 = pkg.onnxscript.torch_lib._attention_scale (input_0)
   _val_4 = pkg.onnxscript.torch_lib._aten_scaled_dot_product_attention_no_mask_onnx <dropout_p: float = 0> (input_0, input_1, input_2, _val_3)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_attention_scale (query) => (scale)
{
   tmp = Shape (query)
   int64_m1 = Constant <value: tensor = int64 int64_m1 {-1}> ()
   tmp_subscripted = Gather <axis: int = 0> (tmp, int64_m1)
   embedding_size = CastLike (tmp_subscripted, query)
   const = Constant <value: tensor = float const {1}> ()
   tmp_0 = Sqrt (embedding_size)
   const_cast = CastLike (const, tmp_0)
   scale = Div (const_cast, tmp_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_aten_scaled_dot_product_attention_no_mask_onnx <dropout_p>(query, key, value, scale) => (return_val)
{
   key_shape = Shape (key)
   int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
   int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
   int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
   int64_9223372036854775807_1d = Constant <value: tensor = int64[1] int64_9223372036854775807_1d {9223372036854775807}> ()
   key_last_dim = Slice (key_shape, int64_m1_1d, int64_9223372036854775807_1d, int64_0_1d, int64_1_1d)
   int64_0_1d_0 = Constant <value: tensor = int64[1] int64_0_1d_0 {0}> ()
   int64_1_1d_1 = Constant <value: tensor = int64[1] int64_1_1d_1 {1}> ()
   int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
   int64_m1_1d_2 = Constant <value: tensor = int64[1] int64_m1_1d_2 {-1}> ()
   key_second_last_dim = Slice (key_shape, int64_m2_1d, int64_m1_1d_2, int64_0_1d_0, int64_1_1d_1)
   int64_0_1d_3 = Constant <value: tensor = int64[1] int64_0_1d_3 {0}> ()
   int64_1_1d_4 = Constant <value: tensor = int64[1] int64_1_1d_4 {1}> ()
   int64_m2_1d_5 = Constant <value: tensor = int64[1] int64_m2_1d_5 {-2}> ()
   key_first_dims = Slice (key_shape, int64_0_1d_3, int64_m2_1d_5, int64_0_1d_3, int64_1_1d_4)
   tmp = Constant <value_ints: ints = [-1]> ()
   key_squeezed_shape = Concat <axis: int = 0> (tmp, key_second_last_dim, key_last_dim)
   key_squeezed = Reshape (key, key_squeezed_shape)
   key_squeezed_transposed = Transpose <perm: ints = [0, 2, 1]> (key_squeezed)
   key_transposed_shape = Concat <axis: int = 0> (key_first_dims, key_last_dim, key_second_last_dim)
   key_transposed = Reshape (key_squeezed_transposed, key_transposed_shape)
   tmp_6 = Sqrt (scale)
   query_scaled = Mul (query, tmp_6)
   tmp_7 = Sqrt (scale)
   key_transposed_scaled = Mul (key_transposed, tmp_7)
   tmp_8 = MatMul (query_scaled, key_transposed_scaled)
   attn_weight = Softmax <axis: int = -1> (tmp_8)
   dropout_p = Constant <value_float: float = @dropout_p> ()
   attn_weight_9, _ = Dropout (attn_weight, dropout_p)
   return_val = MatMul (attn_weight_9, value)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[4,4,3,8] input_0, float16[4,4,6,8] input_1, float16[4,4,6,8] input_2) => (float16[4,4,3,8] _val_5) 
   <float16 _val_3, float[3,6] _val_4>
{
   _val_3 = pkg.onnxscript.torch_lib._attention_scale (input_0)
   _val_4 = pkg.onnxscript.torch_lib._causal_attention_mask (input_0, input_1)
   _val_5 = pkg.onnxscript.torch_lib._aten_scaled_dot_product_attention_float_mask_onnx <dropout_p: float = 0> (input_0, input_1, input_2, _val_4, _val_3)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_attention_scale (query) => (scale)
{
   tmp = Shape (query)
   int64_m1 = Constant <value: tensor = int64 int64_m1 {-1}> ()
   tmp_subscripted = Gather <axis: int = 0> (tmp, int64_m1)
   embedding_size = CastLike (tmp_subscripted, query)
   const = Constant <value: tensor = float const {1}> ()
   tmp_0 = Sqrt (embedding_size)
   const_cast = CastLike (const, tmp_0)
   scale = Div (const_cast, tmp_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_causal_attention_mask (query, key) => (attn_mask_10)
{
   tmp = Shape (query)
   int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
   int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
   int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
   int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
   target_length = Slice (tmp, int64_m2_1d, int64_m1_1d, int64_0_1d, int64_1_1d)
   tmp_0 = Shape (key)
   int64_0_1d_1 = Constant <value: tensor = int64[1] int64_0_1d_1 {0}> ()
   int64_1_1d_2 = Constant <value: tensor = int64[1] int64_1_1d_2 {1}> ()
   int64_m2_1d_3 = Constant <value: tensor = int64[1] int64_m2_1d_3 {-2}> ()
   int64_m1_1d_4 = Constant <value: tensor = int64[1] int64_m1_1d_4 {-1}> ()
   source_length = Slice (tmp_0, int64_m2_1d_3, int64_m1_1d_4, int64_0_1d_1, int64_1_1d_2)
   size = Concat <axis: int = 0> (target_length, source_length)
   const = Constant <value: tensor = float const {1}> ()
   attn_mask = Expand (const, size)
   attn_mask_5 = Trilu <upper: int = 0> (attn_mask)
   const_6 = Constant <value: tensor = float const_6 {0}> ()
   const_6_cast = CastLike (const_6, attn_mask_5)
   tmp_7 = Equal (attn_mask_5, const_6_cast)
   tmp_8 = Constant <value_float: float = -inf> ()
   const_9 = Constant <value: tensor = float const_9 {0}> ()
   const_9_cast = CastLike (const_9, tmp_8)
   attn_mask_10 = Where (tmp_7, tmp_8, const_9_cast)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_aten_scaled_dot_product_attention_float_mask_onnx <dropout_p>(query, key, value, attn_mask, scale) => (return_val)
{
   key_shape = Shape (key)
   int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
   int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
   int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
   int64_9223372036854775807_1d = Constant <value: tensor = int64[1] int64_9223372036854775807_1d {9223372036854775807}> ()
   key_last_dim = Slice (key_shape, int64_m1_1d, int64_9223372036854775807_1d, int64_0_1d, int64_1_1d)
   int64_0_1d_0 = Constant <value: tensor = int64[1] int64_0_1d_0 {0}> ()
   int64_1_1d_1 = Constant <value: tensor = int64[1] int64_1_1d_1 {1}> ()
   int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
   int64_m1_1d_2 = Constant <value: tensor = int64[1] int64_m1_1d_2 {-1}> ()
   key_second_last_dim = Slice (key_shape, int64_m2_1d, int64_m1_1d_2, int64_0_1d_0, int64_1_1d_1)
   int64_0_1d_3 = Constant <value: tensor = int64[1] int64_0_1d_3 {0}> ()
   int64_1_1d_4 = Constant <value: tensor = int64[1] int64_1_1d_4 {1}> ()
   int64_m2_1d_5 = Constant <value: tensor = int64[1] int64_m2_1d_5 {-2}> ()
   key_first_dims = Slice (key_shape, int64_0_1d_3, int64_m2_1d_5, int64_0_1d_3, int64_1_1d_4)
   tmp = Constant <value_ints: ints = [-1]> ()
   key_squeezed_shape = Concat <axis: int = 0> (tmp, key_second_last_dim, key_last_dim)
   key_squeezed = Reshape (key, key_squeezed_shape)
   key_squeezed_transposed = Transpose <perm: ints = [0, 2, 1]> (key_squeezed)
   key_transposed_shape = Concat <axis: int = 0> (key_first_dims, key_last_dim, key_second_last_dim)
   key_transposed = Reshape (key_squeezed_transposed, key_transposed_shape)
   tmp_6 = Sqrt (scale)
   query_scaled = Mul (query, tmp_6)
   tmp_7 = Sqrt (scale)
   key_transposed_scaled = Mul (key_transposed, tmp_7)
   tmp_8 = MatMul (query_scaled, key_transposed_scaled)
   tmp_9 = Add (tmp_8, attn_mask)
   attn_weight = Softmax <axis: int = -1> (tmp_9)
   dropout_p = Constant <value_float: float = @dropout_p> ()
   attn_weight_10, _ = Dropout (attn_weight, dropout_p)
   return_val = MatMul (attn_weight_10, value)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
RuntimeError: ONNX Runtime failed to evaluate:
Inputs:
{'input_0': array([[[[ 1.283  , -8.02   , -3.604  ,  4.895  , -0.8877 ,  4.332  ,
           4.43   , -4.836  ],
         [-1.661  ,  7.812  , -6.625  ,  8.734  , -5.31   ,  7.875  ,
          -2.795  ,  3.217  ],
         [ 2.453  ,  2.479  , -8.66   ,  8.42   , -7.348  ,  1.433  ,
          -0.2725 ,  8.69   ]],

        [[ 4.703  , -4.297  ,  8.8    ,  0.02637,  7.33   , -6.46   ,
           8.37   , -5.203  ],
         [-4.867  ,  1.582  ,  2.172  , -3.488  , -6.555  , -1.301  ,
          -3.902  , -1.424  ],
         [-6.777  , -7.418  ,  7.285  , -3.023  , -1.758  ,  4.465  ,
           6.68   , -2.855  ]],

        [[-4.035  ,  2.855  ,  3.129  ,  7.242  ,  5.703  , -3.031  ,
          -5.57   ,  5.4    ],
         [ 8.2    ,  0.7383 ,  2.777  , -7.145  ,  4.516  , -5.633  ,
           6.062  , -6.004  ],
         [ 7.156  ,  8.46   ,  8.67   , -1.591  ,  0.3252 ,  8.875  ,
           4.484  , -5.316  ]],

        [[-2.408  ,  1.477  ,  8.47   ,  7.98   ,  3.006  ,  7.25   ,
          -6.32   ,  4.754  ],
         [-2.84   , -1.371  , -4.29   ,  0.9756 ,  3.604  ,  8.31   ,
          -7.2    ,  1.617  ],
         [ 3.348  , -4.65   ,  3.322  ,  0.4043 ,  7.777  ,  6.496  ,
           4.836  ,  4.633  ]]],


       [[[-4.58   ,  0.1846 , -7.137  , -6.285  , -3.2    ,  8.5    ,
          -2.39   , -6.617  ],
         [ 5.008  ,  8.914  ,  7.016  , -0.7646 ,  1.767  , -8.73   ,
          -5.117  , -7.586  ],
         [-2.268  , -6.777  , -4.43   , -0.4219 ,  5.71   ,  4.21   ,
          -8.92   , -2.629  ]],

        [[-4.57   , -2.11   ,  7.34   ,  4.914  , -5.176  ,  0.967  ,
          -7.664  ,  5.57   ],
         [-0.949  , -1.371  ,  0.8877 , -2.39   ,  7.312  ,  1.67   ,
           6.4    , -4.062  ],
         [ 7.06   ,  0.703  , -4.71   , -1.143  ,  0.7646 ,  1.696  ,
          -8.09   ,  6.875  ]],

        [[ 0.835  ,  3.709  ,  7.82   ,  1.731  ,  0.     , -0.1582 ,
          -5.43   ,  3.312  ],
         [-7.54   , -4.535  ,  5.598  , -6.258  ,  5.203  ,  3.664  ,
          -2.303  , -3.023  ],
         [-4.816  , -8.37   ,  1.23   , -3.895  , -6.707  ,  2.98   ,
           2.7    , -6.855  ]],

        [[ 4.516  ,  2.953  ,  5.047  , -4.08   ,  0.659  ,  7.727  ,
          -7.47   , -8.05   ],
         [-7.4    , -7.32   , -8.44   ,  7.453  , -0.545  ,  4.156  ,
           6.875  ,  0.87   ],
         [ 4.773  , -1.415  ,  1.116  ,  3.965  ,  8.02   , -5.766  ,
          -1.529  , -8.63   ]]],


       [[[ 7.76   ,  3.674  , -4.72   , -8.32   , -1.749  ,  7.03   ,
           2.363  , -3.086  ],
         [-2.848  , -2.338  ,  7.902  , -2.61   ,  2.76   , -0.879  ,
           7.47   , -1.081  ],
         [-1.195  , -0.2812 ,  8.3    ,  1.468  , -6.03   ,  8.89   ,
          -7.312  ,  3.973  ]],

        [[-6.523  ,  2.945  , -5.582  , -0.2197 , -4.395  ,  2.102  ,
           7.305  ,  8.414  ],
         [ 4.234  , -3.217  , -2.715  ,  5.195  , -1.178  ,  4.867  ,
           4.016  ,  0.7734 ],
         [ 5.793  ,  0.826  , -5.035  , -5.246  ,  1.318  ,  4.508  ,
           3.297  ,  0.1846 ]],

        [[-1.248  , -6.258  ,  8.75   , -1.626  , -4.445  ,  1.802  ,
           8.36   , -5.71   ],
         [-5.047  ,  5.492  ,  6.883  , -0.0879 , -3.78   ,  1.564  ,
           1.837  , -4.613  ],
         [-4.93   ,  7.375  ,  1.081  ,  8.72   , -8.016  ,  0.0967 ,
           1.099  ,  4.957  ]],

        [[-6.1    ,  7.91   ,  2.479  , -7.777  ,  3.516  , -1.081  ,
           0.8438 , -4.465  ],
         [-4.008  ,  8.11   , -1.573  ,  0.5977 , -7.973  , -1.204  ,
           0.51   ,  0.2812 ],
         [ 2.785  , -8.57   ,  7.727  ,  4.29   , -8.84   , -2.629  ,
          -7.277  ,  7.82   ]]],


       [[[ 3.945  ,  8.695  , -4.094  ,  5.96   , -5.035  , -6.47   ,
           1.23   ,  0.7295 ],
         [ 6.09   , -5.57   ,  5.188  , -7.117  ,  4.613  , -7.117  ,
           3.533  , -7.883  ],
         [ 7.215  ,  4.184  , -2.328  , -5.457  ,  0.2461 ,  6.953  ,
          -6.04   , -1.705  ]],

        [[-2.734  , -5.836  , -4.008  ,  3.438  , -7.094  ,  5.035  ,
           5.87   , -7.234  ],
         [-8.86   , -6.18   , -4.457  ,  5.     ,  2.848  ,  3.613  ,
           2.785  , -3.023  ],
         [ 8.47   ,  0.712  ,  4.156  ,  4.105  , -5.273  ,  8.3    ,
           6.414  ,  6.047  ]],

        [[ 1.784  ,  5.117  , -0.05273, -5.61   , -2.172  , -8.15   ,
           3.023  ,  7.047  ],
         [-7.18   ,  4.508  ,  5.582  ,  6.953  , -3.86   , -7.55   ,
          -8.81   , -7.656  ],
         [ 8.24   ,  3.85   ,  2.584  , -7.086  , -3.129  ,  4.344  ,
          -6.99   , -8.836  ]],

        [[ 8.664  , -4.15   , -0.659  , -7.707  ,  0.9404 , -5.47   ,
          -3.77   ,  4.234  ],
         [-5.78   ,  7.32   ,  3.629  ,  2.707  , -1.96   , -0.9404 ,
           7.33   ,  1.169  ],
         [ 6.312  ,  2.479  ,  6.83   , -8.37   , -4.78   ,  3.086  ,
          -4.086  ,  2.855  ]]]], dtype=float16),
 'input_1': array([[[[-2.4609e+00,  3.8848e+00, -8.1328e+00,  5.0977e-01,
          -4.5430e+00, -6.7422e+00, -5.3789e+00,  3.9648e+00],
         [ 5.6250e-01, -3.7793e+00,  1.3447e+00,  8.6484e+00,
          -6.6719e+00, -1.7930e+00,  6.8555e-01,  2.7598e+00],
         [-3.1914e+00, -6.8555e-01, -4.0859e+00, -9.4922e-01,
          -1.1777e+00,  2.1719e+00,  6.9336e+00, -1.3799e+00],
         [-3.6484e+00, -5.3711e+00, -8.7891e+00,  8.8281e+00,
          -6.5117e+00,  3.9375e+00, -1.2656e+00, -6.3633e+00],
         [ 5.8887e-01,  5.2734e-02, -1.8281e+00,  1.1953e+00,
           1.4326e+00, -8.2812e+00,  7.8750e+00,  5.7031e+00],
         [ 3.6836e+00,  6.3281e-01,  2.0742e+00, -8.6016e+00,
          -3.5781e+00, -8.5254e-01,  7.0234e+00, -6.7070e+00]],

        [[-7.4609e+00,  1.4502e+00, -3.2344e+00, -1.6084e+00,
          -5.7578e+00,  6.9766e+00, -8.5312e+00, -2.9453e+00],
         [ 3.6738e+00,  6.6367e+00,  4.4453e+00,  2.9883e+00,
           1.1074e+00,  3.5859e+00,  8.1094e+00, -5.7812e+00],
         [ 2.3730e+00, -3.3477e+00,  5.3086e+00, -5.6797e+00,
          -8.3672e+00,  8.1016e+00,  8.0938e+00, -9.6680e-01],
         [ 2.1719e+00, -1.4502e+00,  6.8906e+00, -8.6328e+00,
          -5.6953e+00,  3.5156e-01, -7.5156e+00,  8.3047e+00],
         [-7.1445e+00,  4.3242e+00,  5.9688e+00, -8.6641e+00,
          -5.7656e+00, -2.3555e+00, -7.6797e+00,  9.6680e-01],
         [ 4.2109e+00,  3.8242e+00,  4.0430e-01, -8.7891e-03,
           5.0469e+00, -2.5312e+00,  8.9297e+00,  3.2070e+00]],

        [[-8.4062e+00, -6.3828e+00, -7.1191e-01, -1.9600e+00,
          -6.4062e+00, -7.2266e+00, -8.4688e+00, -6.9434e-01],
         [-5.4492e-01,  7.2852e+00,  2.7500e+00,  1.4062e+00,
          -6.1016e+00,  4.6328e+00, -6.0391e+00,  5.3164e+00],
         [ 2.6641e+00,  4.4141e+00,  5.7031e+00,  2.9609e+00,
           4.8242e+00, -3.3926e+00, -6.5938e+00,  1.4326e+00],
         [ 3.8848e+00, -6.2031e+00, -3.8320e+00, -4.5781e+00,
           6.0195e+00, -5.6094e+00,  8.5156e+00, -1.3623e+00],
         [-1.6875e+00, -8.7891e-02,  7.7969e+00, -3.7090e+00,
           6.5820e+00,  6.8125e+00, -2.9355e+00, -4.6680e+00],
         [-4.7031e+00, -2.6719e+00,  6.3281e-01, -4.5352e+00,
          -5.5820e+00,  5.4297e+00,  7.5234e+00, -6.4141e+00]],

        [[ 3.2344e+00,  8.3906e+00, -2.0117e+00, -1.4062e+00,
           6.0898e+00, -4.1836e+00, -3.3047e+00,  7.4609e+00],
         [-4.0156e+00,  1.6348e+00, -1.5117e+00, -2.2422e+00,
          -5.0977e+00,  5.0000e+00, -5.8203e+00, -7.9297e+00],
         [ 1.6436e+00, -2.4883e+00,  8.9375e+00, -3.0312e+00,
          -5.3164e+00,  5.4922e+00,  2.2070e+00,  5.4297e+00],
         [ 4.8164e+00,  2.9355e+00, -8.2031e+00,  4.6484e+00,
           7.6016e+00, -8.4531e+00,  5.8086e+00, -4.5078e+00],
         [ 1.2129e+00,  6.1250e+00,  1.6172e+00,  1.7930e+00,
          -2.2227e+00,  1.8721e+00, -6.7578e+00, -7.8203e+00],
         [ 5.0273e+00, -1.3184e+00, -1.7842e+00, -8.2344e+00,
           7.3398e+00, -3.2520e-01,  1.8105e+00,  1.1250e+00]]],


       [[[-1.7754e+00,  1.7578e-02, -4.4824e-01, -7.9980e-01,
           6.6172e+00, -7.9453e+00,  3.3750e+00,  5.3867e+00],
         [ 3.2520e-01,  2.9883e+00, -1.4941e-01, -8.7891e-01,
          -3.3398e+00,  8.8594e+00, -2.7344e+00, -1.4502e+00],
         [-8.8281e+00, -6.8828e+00, -5.0078e+00, -5.4492e-01,
          -3.5508e+00, -6.3438e+00,  5.2305e+00,  5.2188e+00],
         [-1.9512e+00, -5.3613e-01, -8.4062e+00,  1.1250e+00,
          -7.2852e+00, -7.7773e+00,  3.8945e+00, -6.5234e+00],
         [-6.2656e+00, -2.4531e+00,  4.6953e+00,  5.8359e+00,
          -6.5742e+00, -8.4688e+00,  2.7949e+00,  7.8125e+00],
         [-8.5547e+00,  7.5156e+00, -7.6641e+00,  6.5742e+00,
           2.7246e+00,  7.1445e+00, -4.2266e+00, -4.0234e+00]],

        [[ 7.5859e+00,  3.4102e+00,  4.8086e+00, -3.5430e+00,
           8.7969e+00,  1.5469e+00,  3.1719e+00, -6.2500e+00],
         [-1.9160e+00, -1.5557e+00, -8.1562e+00, -1.2656e+00,
           1.8633e+00, -1.7227e+00, -7.8047e+00,  4.3945e-02],
         [-1.2305e-01, -8.0469e+00, -4.2031e+00,  3.3398e-01,
          -3.1914e+00, -5.2734e+00, -3.4727e+00, -6.7148e+00],
         [ 3.7344e+00, -8.2422e+00, -4.9219e+00, -8.3438e+00,
          -4.8672e+00,  8.5703e+00,  8.4531e+00, -5.3711e+00],
         [-1.7578e+00, -1.1426e+00, -2.6289e+00, -4.9922e+00,
           2.4961e+00,  2.0918e+00,  5.9414e+00,  5.2578e+00],
         [-3.2695e+00, -3.1016e+00,  4.8945e+00,  3.2617e+00,
          -4.6562e+00,  5.4766e+00,  8.0703e+00,  3.8672e-01]],

        [[ 3.5156e+00,  2.1621e+00,  5.4062e+00, -3.7617e+00,
          -5.0547e+00, -4.7461e+00, -3.3320e+00, -3.5586e+00],
         [ 1.4941e-01,  5.3164e+00, -3.1914e+00, -2.3477e+00,
          -6.4688e+00,  3.6289e+00, -2.6719e+00, -3.4023e+00],
         [-6.9453e+00, -5.3516e+00, -7.6465e-01, -4.6250e+00,
          -4.4824e-01, -3.4375e+00,  5.7031e+00,  8.8438e+00],
         [-4.3945e-02, -8.0000e+00, -8.1738e-01, -3.0859e+00,
           6.2578e+00,  2.0469e+00,  4.7383e+00,  8.7891e-02],
         [-7.1992e+00,  5.9609e+00, -6.8359e+00,  4.9062e+00,
          -2.3477e+00,  1.1074e+00, -7.7188e+00, -7.9727e+00],
         [ 8.0000e+00,  3.3477e+00, -5.3867e+00,  5.8281e+00,
           5.1250e+00, -5.8203e+00,  4.4648e+00, -7.8047e+00]],

        [[ 1.5293e+00,  2.5312e+00, -5.8887e-01,  4.2188e+00,
          -8.8438e+00, -7.2969e+00, -6.1094e+00,  8.2188e+00],
         [-7.4688e+00, -2.0215e-01, -5.0625e+00,  8.1250e+00,
          -6.9434e-01,  3.1016e+00,  4.3750e+00, -7.7695e+00],
         [-2.3477e+00,  7.8750e+00, -8.3496e-01, -8.1875e+00,
           8.2891e+00, -6.9062e+00, -7.2070e+00, -4.0859e+00],
         [ 3.7969e+00, -6.8555e+00, -1.6963e+00,  2.7773e+00,
          -8.7891e-01, -6.4141e+00, -3.3828e+00, -1.4854e+00],
         [ 2.4688e+00, -8.8594e+00, -8.3828e+00,  2.3555e+00,
           2.8477e+00,  6.9688e+00, -6.3281e+00,  3.9551e+00],
         [ 5.2734e-02,  2.7500e+00, -1.8281e+00,  8.9648e-01,
          -1.7930e+00, -7.3125e+00,  6.6094e+00,  5.0352e+00]]],


       [[[-4.3594e+00, -7.5078e+00,  4.7109e+00, -6.8047e+00,
           6.7148e+00,  3.3926e+00,  1.8105e+00, -4.2539e+00],
         [ 5.5195e+00,  8.8047e+00, -5.3516e+00, -2.0312e+00,
           3.3320e+00,  1.2568e+00, -8.7891e+00,  7.2148e+00],
         [-3.1367e+00,  6.3281e-01, -3.4531e+00,  7.2344e+00,
           7.3125e+00,  8.0859e-01, -4.5703e+00,  5.2461e+00],
         [-6.0547e+00, -5.2734e+00, -4.6250e+00,  3.1914e+00,
          -6.8555e-01, -3.4727e+00,  6.5391e+00,  1.7402e+00],
         [-2.6992e+00,  3.6211e+00, -6.0312e+00, -3.2168e+00,
          -5.5371e-01,  8.2266e+00, -5.2383e+00,  6.8750e+00],
         [-1.1074e+00, -7.7…4_1_1d_1 = Constant <value: tensor = int64[1] int64_1_1d_1 {1}> ()
E      int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
E      int64_m1_1d_2 = Constant <value: tensor = int64[1] int64_m1_1d_2 {-1}> ()
E      key_second_last_dim = Slice (key_shape, int64_m2_1d, int64_m1_1d_2, int64_0_1d_0, int64_1_1d_1)
E      int64_0_1d_3 = Constant <value: tensor = int64[1] int64_0_1d_3 {0}> ()
E      int64_1_1d_4 = Constant <value: tensor = int64[1] int64_1_1d_4 {1}> ()
E      int64_m2_1d_5 = Constant <value: tensor = int64[1] int64_m2_1d_5 {-2}> ()
E      key_first_dims = Slice (key_shape, int64_0_1d_3, int64_m2_1d_5, int64_0_1d_3, int64_1_1d_4)
E      tmp = Constant <value_ints: ints = [-1]> ()
E      key_squeezed_shape = Concat <axis: int = 0> (tmp, key_second_last_dim, key_last_dim)
E      key_squeezed = Reshape (key, key_squeezed_shape)
E      key_squeezed_transposed = Transpose <perm: ints = [0, 2, 1]> (key_squeezed)
E      key_transposed_shape = Concat <axis: int = 0> (key_first_dims, key_last_dim, key_second_last_dim)
E      key_transposed = Reshape (key_squeezed_transposed, key_transposed_shape)
E      tmp_6 = Sqrt (scale)
E      query_scaled = Mul (query, tmp_6)
E      tmp_7 = Sqrt (scale)
E      key_transposed_scaled = Mul (key_transposed, tmp_7)
E      tmp_8 = MatMul (query_scaled, key_transposed_scaled)
E      tmp_9 = Add (tmp_8, attn_mask)
E      attn_weight = Softmax <axis: int = -1> (tmp_9)
E      dropout_p = Constant <value_float: float = @dropout_p> ()
E      attn_weight_10, _ = Dropout (attn_weight, dropout_p)
E      return_val = MatMul (attn_weight_10, value)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:534: in _capture_graph_and_evaluate_torch_script_evaluator
    return _safe_ort_session_run(onnx_model.SerializeToString(), ort_inputs)
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:349: in _safe_ort_session_run
    raise return_dict["error"]
E   onnxruntime.capi.onnxruntime_pybind11_state.InvalidGraph: [ONNXRuntimeError] : 10 : INVALID_GRAPH : This is an invalid model. In Node, ("", ReduceMax, "", -1) : ("_inline__aten_scaled_dot_product_attention_no_mask_onnxtmp_8": tensor(float16),) -> ("_inline_SoftmaxX_ReduceMax",) , Error Unrecognized attribute: axes for operator ReduceMax

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:550: in _capture_graph_and_evaluate_torch_script_evaluator
    raise RuntimeError(
E   RuntimeError: ONNX Runtime failed to evaluate:
E   Inputs:
E   {'input_0': array([[[-2.89   ,  5.555  , -1.591  ,  4.395  , -3.7    , -3.895  ,
E             8.945  ,  8.04   ],
E           [-7.84   , -2.057  , -3.305  ,  2.75   ,  6.133  , -7.066  ,
E            -0.4307 ,  3.84   ],
E           [ 0.3955 ,  6.934  ,  1.679  ,  4.562  ,  3.322  , -2.082  ,
E            -2.936  ,  2.926  ]],
E   
E          [[-8.664  ,  2.566  , -2.945  , -7.586  ,  2.9    ,  7.777  ,
E             0.9053 , -7.594  ],
E           [ 7.016  ,  5.57   ,  0.4658 ,  8.25   ,  0.29   , -6.883  ,
E            -0.51   , -1.187  ],
E           [-8.016  , -0.0967 , -6.215  , -8.805  ,  4.895  , -1.767  ,
E             5.68   ,  1.028  ]],
E   
E          [[ 8.65   , -2.848  ,  5.64   , -0.05273,  8.2    ,  1.8545 ,
E             3.664  ,  0.9844 ],
E           [ 0.378  , -7.883  , -1.31   , -7.62   ,  6.89   , -1.74   ,
E             3.453  ,  7.61   ],
E           [-1.5205 , -4.508  ,  8.02   , -8.61   ,  0.9404 , -7.086  ,
E            -5.117  ,  7.285  ]],
E   
E          [[-6.242  , -7.453  , -7.004  ,  6.17   ,  8.63   ,  5.9    ,
E             7.953  , -6.664  ],
E           [-1.362  , -1.644  ,  3.322  , -4.71   , -2.84   , -2.97   ,
E             2.54   , -0.255  ],
E           [-5.87   ,  5.766  ,  6.53   , -1.345  ,  8.34   , -7.215  ,
E             1.863  , -4.156  ]]], dtype=float16),
E    'input_1': array([[[[ 6.906  , -6.17   , -3.79   , -7.418  , -4.625  ,  2.805  ,
E             -8.28   , -6.344  ],
E            [-6.707  , -2.117  ,  1.749  ,  2.102  , -0.9756 , -2.926  ,
E             -0.712  , -3.006  ],
E            [ 2.584  ,  4.492  , -2.953  ,  7.734  , -5.527  ,  6.574  ,
E              1.204  , -1.635  ],
E            [-1.292  , -1.775  ,  1.125  , -8.58   ,  5.934  , -2.074  ,
E             -6.434  ,  5.51   ],
E            [-4.297  , -6.273  , -4.29   ,  7.93   , -3.393  , -6.953  ,
E              1.828  , -3.691  ],
E            [-8.18   , -6.133  , -7.03   , -0.51   , -1.187  ,  6.953  ,
E             -8.914  ,  2.883  ]],
E   
E           [[ 0.0967 , -8.7    , -5.08   ,  3.965  ,  6.496  ,  1.925  ,
E             -5.133  , -3.27   ],
E            [ 4.695  , -3.78   ,  8.8    , -6.996  ,  8.21   , -5.984  ,
E              0.6943 ,  3.094  ],
E            [ 3.473  ,  2.54   ,  3.99   ,  7.4    ,  5.89   , -0.3164 ,
E              1.116  ,  6.16   ],
E            [-1.02   ,  4.766  ,  7.883  ,  3.146  , -4.375  ,  2.742  ,
E             -8.35   ,  4.79   ],
E            [ 7.65   , -7.285  ,  7.4    , -8.75   , -2.805  , -0.03516,
E             -8.87   ,  3.709  ],
E            [ 5.168  , -1.934  ,  5.203  ,  3.55   , -0.747  ,  5.582  ,
E              4.656  , -0.51   ]],
E   
E           [[ 5.387  ,  7.938  ,  0.668  , -6.996  , -0.5537 ,  2.602  ,
E              4.473  , -2.725  ],
E            [-0.3604 ,  8.484  , -3.973  ,  4.72   ,  5.117  ,  8.93   ,
E             -3.604  ,  8.59   ],
E            [-6.145  ,  3.182  ,  5.87   , -1.318  , -5.85   ,  6.195  ,
E              6.906  , -6.16   ],
E            [ 6.125  ,  2.855  ,  5.484  ,  7.242  , -7.56   ,  5.027  ,
E              5.3    , -1.995  ],
E            [-5.008  ,  8.51   ,  1.336  ,  6.47   ,  1.107  , -7.312  ,
E             -3.498  ,  6.793  ],
E            [-4.35   ,  0.0791 , -8.2    , -7.79   , -4.78   , -5.062  ,
E              2.918  , -1.371  ]],
E   
E           [[-6.02   ,  8.44   ,  0.4482 ,  6.203  , -4.914  ,  8.79   ,
E             -2.7    , -1.986  ],
E            [-4.836  , -5.484  ,  3.85   ,  7.707  , -3.691  , -0.8174 ,
E             -0.2461 , -0.9053 ],
E            [-1.793  , -3.234  , -3.754  ,  4.465  ,  1.336  , -4.492  ,
E              4.414  ,  7.496  ],
E            [ 7.91   ,  6.777  ,  5.934  ,  0.04395, -4.613  ,  5.8    ,
E             -2.707  ,  1.028  ],
E            [ 4.254  ,  8.914  , -3.727  , -3.156  ,  8.35   ,  5.96   ,
E             -4.895  , -7.594  ],
E            [-6.637  ,  0.4834 , -7.004  , -1.547  , -7.7    ,  0.3955 ,
E              1.257  ,  6.582  ]]],
E   
E   
E          [[[ 6.883  , -3.453  ,  2.98   , -5.38   ,  6.426  , -4.438  ,
E              3.016  , -5.61   ],
E            [ 4.15   , -6.594  , -5.906  ,  0.5713 ,  0.6855 , -3.754  ,
E              5.582  ,  1.767  ],
E            [ 8.586  ,  5.81   , -4.105  , -8.016  ,  0.791  , -7.77   ,
E              2.531  ,  7.91   ],
E            [-2.594  , -3.832  , -5.4    , -5.695  ,  3.367  ,  7.426  ,
E              1.327  ,  7.33   ],
E            [-2.408  , -3.498  ,  2.795  ,  3.234  ,  5.316  , -0.2637 ,
E              4.914  , -2.83   ],
E            [-4.95   , -8.91   ,  1.362  ,  6.004  , -4.816  , -0.756  ,
E             -5.484  , -1.907  ]],
E   
E           [[-7.453  ,  2.988  , -8.51   ,  7.508  ,  7.48   , -5.07   ,
E             -7.227  ,  6.117  ],
E            [ 5.555  , -2.523  , -7.234  ,  2.89   , -5.82   , -1.169  ,
E             -5.02   , -5.88   ],
E            [-7.11   , -0.4658 , -4.133  , -1.213  , -8.805  , -7.65   ,
E              1.626  , -6.82   ],
E            [-2.426  , -1.934  , -7.832  , -6.188  , -5.582  , -5.195  ,
E              0.1846 , -1.336  ],
E            [ 5.266  ,  5.836  ,  1.09   ,  0.6855 , -1.468  ,  8.5    ,
E             -8.56   ,  6.39   ],
E            [-7.46   , -1.362  , -6.785  , -0.5186 ,  2.152  , -2.514  ,
E              0.923  ,  1.6875 ]],
E   
E           [[ 2.77   , -6.355  , -4.19   ,  8.375  , -2.715  ,  8.2    ,
E             -2.162  , -6.652  ],
E            [ 1.485  , -0.1318 , -4.164  ,  3.99   ,  5.71   ,  2.559  ,
E              4.95   ,  7.016  ],
E            [ 7.03   ,  1.23   ,  1.274  ,  3.727  , -6.25   , -7.39   ,
E              5.83   , -2.742  ],
E            [-7.883  ,  2.514  ,  7.227  ,  2.215  ,  8.79   ,  6.83   ,
E             -2.855  , -5.617  ],
E            [ 2.012  , -6.625  ,  6.766  , -1.881  , -5.36   , -7.27   ,
E              2.83   ,  0.4834 ],
E            [-1.705  ,  1.775  ,  6.875  ,  5.45   , -3.488  , -3.7    ,
E              1.204  ,  3.7    ]],
E   
E           [[ 7.156  ,  0.4922 ,  4.93   , -0.1318 , -8.38   , -1.002  ,
E              8.02   , -3.79   ],
E            [ 3.262  ,  5.414  , -3.734  , -4.492  ,  7.656  , -8.27   ,
E             -0.2725 ,  0.3691 ],
E            [ 5.844  , -1.635  ,  8.2    , -3.684  , -1.89   , -7.84   ,
E              3.463  ,  0.5625 ],
E            [-6.46   , -5.703  , -2.25   , -4.906  ,  7.883  ,  1.863  ,
E              1.292  ,  5.027  ],
E            [-4.867  , -7.7    , -8.234  ,  2.84   ,  7.86   , -5.527  ,
E             -2.066  ,  3.543  ],
E            [ 0.712  ,  5.625  ,  2.504  ,  3.348  , -1.406  ,  6.953  ,
E              2.363  ,  2.559  ]]],
E   
E   
E          [[[ 3.973  ,  6.273  , -8.8    , -0.6064 ,  7.58   , -6.273  ,
E              4.043  ,  1.819  ],
E            [ 6.953  ,  6.293  , -3.172  , -5.16   ,  4.72   , -2.805  ,
E              2.945  , -5.3    ],
E            [ 6.582  , -6.188  ,  4.36   , -3.85   , -0.6855 ,  1.424  ,
E              7.117  ,  4.746  ],
E            [ 1.441  , -1.547  ,  6.125  ,  4.965  ,  2.584  , -8.64   ,
E              8.11   ,  2.496  ],
E            [-0.756  , -5.61   ,  8.88   ,  1.204  ,  1.635  ,  8.8    ,
E             -1.011  , -2.715  ],
E            [-5.22   , -4.14   ,  7.566  ,  3.648  , -1.978  , -5.266  ,
E              3.648  , -8.78   ]],
E   
E           [[ 3.252  , -5.625  , -8.516  ,  2.215  , -3.438  , -8.08   ,
E              1.134  , -1.547  ],
E            [-3.121  , -5.703  , -2.479  , -0.51   , -6.3    , -8.21   ,
E              3.445  ,  4.684  ],
E            [-1.96   , -8.04   ,  4.92   ,  5.625  , -3.867  , -6.02   ,
E              4.957  , -0.4043 ],
E            [-5.88   ,  7.832  , -7.53   , -4.113  ,  3.965  , -1.055  ,
E             -8.086  ,  8.99   ],
E            [-7.69   , -7.727  , -2.715  , -1.327  , -6.383  ,  1.459  ,
E              6.258  , -3.332  ],
E            [-2.654  ,  0.413  , -3.402  , -6.906  ,  0.05273, -4.93   ,
E             -5.027  , -2.117  ]],
E   
E           [[-1.125  , -5.75   ,  4.816  , -4.727  ,  1.846  ,  8.91   ,
E             -7.56   ,  1.09   ],
E            [ 2.953  , -7.32   , -2.594  , -7.13   , -3.965  ,  2.875  ,
E             -0.5977 , -5.66   ],
E            [-3.094  ,  5.07   , -1.995  , -6.906  ,  4.668  , -2.855  ,
E              0.712  ,  3.182  ],
E            [-4.57   , -7.234  , -0.923  ,  4.42   ,  5.652  , -8.69   ,
E             -3.348  ,  5.168  ],
E            [ 5.273  ,  8.55   ,  3.523  , -2.285  ,  8.945  ,  7.516  ,
E             -2.805  ,  1.538  ],
E            [-4.22   ,  5.     ,  4.094  ,  1.758  , -4.26   ,  4.797  ,
E             -0.0703 ,  8.77   ]],
E   
E           [[ 5.59   , -5.547  , -1.6875 ,  0.3604 , -3.762  , -5.555  ,
E             -2.855  ,  5.47   ],
E            [ 6.875  ,  8.734  , -5.133  ,  7.348  ,  5.414  ,  4.438  ,
E             -0.9404 , -6.195  ],
E            [-4.15   ,  1.881  ,  0.615  , -4.016  , -0.9316 , -6.934  ,
E              2.46   , -0.334  ],
E            [ 5.414  ,  7.918  ,  7.79   ,  1.415  ,  1.67   ,  0.0703 ,
E             -8.484  , -1.16   ],
E            [ 7.17   , -7.03   ,  7.188  , -2.855  , -3.895  ,  7.066  ,
E              2.514  , -0.2725 ],
E            [-7.523  ,  0.536  ,  3.059  ,  3.875  , -8.88   , -7.566  ,
E              3.684  , -2.355  ]]],
E   
E   
E          [[[ 7.438  ,  4.562  ,  5.844  ,  4.81   ,  4.914  ,  5.24   ,
E              0.2373 ,  6.215  ],
E            [-5.09   ,  1.819  ,  4.12   , -2.715  , -0.4307 , -1.811  ,
E             -8.09   , -2.488  ],
E            [-5.266  ,  0.879  ,  4.746  , -3.393  , -1.538  ,  4.58   ,
E              0.923  ,  5.96   ],
E            [ 8.586  , -2.25   ,  7.086  , -6.574  ,  0.5625 ,  6.875  ,
E             -1.099  , -7.355  ],
E            [ 2.777  ,  8.04   ,  1.204  ,  3.006  , -8.086  , -4.21   ,
E             -5.668  , -8.04   ],
E            [ 8.29   , -4.22   , -5.9    , -2.479  , -6.223  ,  2.328  ,
E              6.09   ,  7.48   ]],
E   
E           [[ 0.5625 , -6.082  ,  4.5    ,  1.16   , -3.805  , -2.127  ,
E              6.61   , -4.26   ],
E            [-2.549  ,  4.695  ,  4.35   , -3.55   , -3.568  ,  5.766  ,
E              3.234  ,  7.6    ],
E            [-1.02   ,  7.99   ,  5.71   ,  6.99   ,  7.11   , -0.29   ,
E             -8.14   , -6.223  ],
E            [-7.93   ,  2.805  , -1.986  ,  5.203  ,  4.344  , -4.992  ,
E             -4.42   , -4.438  ],
E            [-0.949  , -2.707  , -6.242  ,  8.305  , -2.777  , -2.77   ,
E              4.754  ,  4.203  ],
E            [ 8.016  ,  8.52   ,  0.167  , -4.086  , -0.4922 ,  1.134  ,
E              2.566  ,  7.875  ]],
E   
E           [[ 1.151  , -5.88   ,  0.4658 , -1.187  , -2.04   ,  3.393  ,
E             -6.68   , -0.378  ],
E            [ 1.881  ,  4.035  ,  4.016  , -1.96   , -8.63   ,  5.14   ,
E             -1.433  ,  2.434  ],
E            [ 7.355  ,  6.617  ,  1.134  , -5.934  ,  6.195  ,  4.773  ,
E              0.923  , -0.993  ],
E            [ 6.285  , -9.     ,  6.645  , -3.875  , -5.21   ,  2.68   ,
E              7.98   ,  4.625  ],
E            [-8.55   , -0.747  , -4.438  ,  2.89   ,  4.895  ,  2.848  ,
E              0.993  , -7.72   ],
E            [ 8.84   ,  1.45   ,  3.322  ,  1.213  , -3.48   , -6.383  ,
E              2.418  ,  8.04   ]],
E   
E           [[-2.074  , -1.538  ,  7.242  , -7.445  , -1.608  ,  3.102  ,
E              1.345  ,  8.13   ],
E            [ 1.758  , -1.292  ,  1.002  ,  5.89   ,  4.86   , -2.338  ,
E              0.589  ,  3.902  ],
E            [-5.098  ,  2.855  ,  7.426  , -2.785  , -3.719  ,  5.47   ,
E              7.727  ,  3.146  ],
E            [-7.734  , -6.594  , -4.535  ,  1.564  , -2.39   ,  7.355  ,
E              7.297  , -4.816  ],
E            [ 7.4    ,  8.57   ,  3.629  ,  1.336  ,  0.9053 ,  0.6504 ,
E              1.116  ,  6.242  ],
E            [-3.156  , -4.016  ,  6.934  ,  3.023  , -4.344  , -7.61   ,
E              4.72   ,  3.734  ]]]], dtype=float16),
E    'input_2': array([[[[-3.0234e+00,  8.7422e+00, -8.8047e+00,  4.0430e-01,
E             -4.3945e+00, -2.6016e+00,  7.3203e+00,  6.1953e+00],
E            [-7.7422e+00,  5.1328e+00, -3.1367e+00,  5.1680e+00,
E              3.6738e+00, -3.0234e+00, -4.5703e-01, -1.9863e+00],
E            [-8.2656e+00, -5.3516e+00, -2.9102e+00, -4.7734e+00,
E             -5.0000e+00, -1.7402e+00, -6.3984e+00,  9.7559e-01],
E            [ 8.4453e+00,  2.0117e+00,  8.3496e-01, -6.5918e-01,
E             -3.0312e+00, -5.6875e+00,  1.3623e+00, -5.3086e+00],
E            [-8.0859e+00,  6.1875e+00,  8.7891e+00, -6.1094e+00,
E             -5.1484e+00,  1.1514e+00,  7.6094e+00, -5.6523e+00],
E            [-7.7969e+00, -7.2070e-01, -1.5996e+00, -6.6367e+00,
E              4.8242e+00, -5.5273e+00,  4.8867e+00, -9.8438e-01]],
E   
E           [[-4.9297e+00,  5.9219e+00, -5.0547e+00,  2.9004e+00,
E              2.2148e+00,  5.9766e+00,  8.4531e+00,  2.4961e+00],
E            [ 2.3984e+00, -8.8359e+00, -6.9062e+00,  2.3633e+00,
E             -5.3359e+00,  8.2969e+00,  2.7148e+00,  2.6367e-01],
E            [ 7.2500e+00,  9.0527e-01,  5.9844e+00, -7.3750e+00,
E              1.1162e+00,  1.8369e+00,  3.6289e+00,  2.4609e+00],
E            [ 8.4219e+00, -6.7852e+00, -8.8438e+00, -2.7500e+00,
E              6.4258e+00,  1.1865e+00, -7.0469e+00, -8.9844e+00],
E            [ 8.4531e+00,  2.0664e+00,  8.9844e+00, -1.5029e+00,
E              4.0781e+00, -7.0039e+00, -9.8438e-01,  5.7383e+00],
E            [-4.6250e+00, -4.4297e+00, -2.8398e+00,  7.5664e+00,
E             -9.4922e-01,  8.5312e+00,  5.9414e+00,  8.7109e+00]],
E   
E           [[ 1.2129e+00,  2.4180e+00, -6.4531e+00,  4.5703e-01,
E              6.0312e+00,  9.5801e-01, -2.7246e+00, -6.7676e-01],
E            [-3.3125e+00,  2.6367e-02,  5.8516e+00, -2.9961e+00,
E              8.2188e+00, -1.3623e+00,  4.0430e-01, -8.1797e+00],
E            [-7.4707e-01, -4.0859e+00,  6.6016e+00,  7.5391e+00,
E             -7.1367e+00, -5.1758e+00, -1.5205e+00, -4.5430e+00],
E            [-1.6611e+00, -3.2871e+00, -7.2070e+00, -5.6602e+00,
E              6.3281e-01,  4.5273e+00,  6.5918e-01,  7.3828e+00],
E            [-8.3281e+00, -1.8369e+00, -2.2578e+00, -5.4062e+00,
E              5.3867e+00, -8.0781e+00,  7.4180e+00, -4.1641e+00],
E            [ 4.1758e+00, -1.7490e+00,  2.4082e+00, -8.2344e+00,
E              6.3438e+00, -1.2656e+00,  6.6797e-01,  6.6797e-01]],
E   
E           [[ 3.8750e+00, -4.2617e+00, -6.8555e+00, -3.9297e+00,
E              6.8281e+00,  3.3477e+00, -3.7793e-01, -2.7070e+00],
E            [ 5.0000e+00,  6.2578e+00,  6.4688e+00, -7.7617e+00,
E             -1.1602e+00,  3.2266e+00,  7.2949e-01,  1.2480e+00],
E            [-5.9141e+00, -4.2188e+00, -9.8438e-01, -7.6719e+00,
E              1.3799e+00,  3.9102e+00, -3.2695e+00, -5.8887e-01],
E            [-2.3027e+00,  2.6543e+00, -3.5234e+00, -3.2422e+00,
E             -5.0547e+00, -2.6719e+00,  1.9600e+00, -8.6328e+00],
E            [-6.6875e+00,  6.6367e+00,  1.7578e-02,  5.5898e+00,
E              2.3477e+00, -3.7695e+00,  1.3184e-01,  2.9258e+00],
E            [ 4.0703e+00,  5.0977e-01, -1.0547e-01, -7.4531e+00,
E             -8.4531e+00, -5.8887e-01, -1.9336e-01, -4.0781e+00]]],
E   
E   
E          [[[ 2.6895e+00,  2.1523e+00,  2.5312e+00,  7.0039e+00,
E             -6.1523e-01,  2.9102e+00,  4.6250e+00, -1.5381e+00],
E            [ 2.2324e+00, -3.6992e+00,  4.4727e+00,  6.9453e+00,
E              6.3008e+00, -3.6836e+00,  6.4688e+00,  7.1367e+00],
E            [-1.7139e+00,  4.0625e+00,  1.7402e+00,  2.3730e+00,
E              8.8828e+00, -2.4531e+00, -4.1055e+00, -8.4453e+00],
E            [ 2.0117e+00, -7.2949e-01,  8.3594e+00, -6.8203e+00,
E              2.1523e+00, -7.4707e-01,  5.1484e+00, -7.6289e+00],
E            [-2.5488e+00,  6.1602e+00, -3.5156e-01,  4.9492e+00,
E              8.3438e+00,  7.6465e-01,  2.7422e+00,  7.4688e+00],
E            [ 5.7129e-01, -3.1914e+00,  1.6699e-01, -8.7188e+00,
E              7.3750e+00,  6.9609e+00, -4.1055e+00, -8.3672e+00]],
E   
E           [[ 7.6016e+00, -1.7578e+00,  3.0059e+00,  1.1602e+00,
E              4.7461e-01, -8.4688e+00,  7.5234e+00,  4.5625e+00],
E            [-6.6172e+00, -6.8477e+00,  1.5381e+00,  8.8281e+00,
E             -3.4883e+00,  2.7773e+00,  6.8750e+00, -2.8125e+00],
E            [-3.1914e+00,  1.1426e+00,  8.8770e-01, -6.2852e+00,
E              6.8633e+00,  2.5586e+00,  6.3359e+00, -4.6562e+00],
E            [ 3.1641e+00, -6.0391e+00, -3.9199e+00, -8.9219e+00,
E             -8.5781e+00,  4.2734e+00, -3.5332e+00,  2.8125e+00],
E            [-2.5840e+00,  4.9922e+00, -2.0215e+00, -4.7461e-01,
E              5.4844e+00, -7.8398e+00,  5.1172e+00, -8.2812e+00],
E            [-8.5078e+00, -5.7031e+00, -5.9414e+00, -6.6250e+00,
E              4.2344e+00, -8.0312e+00, -1.3711e+00, -1.2129e+00]],
E   
E           [[-8.3438e+00,  1.3799e+00,  4.6680e+00,  8.1875e+00,
E              5.6406e+00, -6.1172e+00, -7.9375e+00, -8.6484e+00],
E            [ 1.4590e+00, -9.0527e-01, -3.3672e+00,  1.9336e-01,
E             -6.5547e+00, -2.3027e+00, -4.8867e+00,  3.5078e+00],
E            [-8.9297e+00, -8.8906e+00, -8.8770e-01, -8.9648e-01,
E              7.1875e+00,  7.3047e+00,  4.4824e-01,  1.8281e+00],
E            [ 4.8242e+00,  5.3711e+00,  6.1875e+00,  3.5234e+00,
E              1.6699e+00,  4.3945e-01,  3.4453e+00, -3.2695e+00],
E            [ 5.9688e+00,  7.2266e+00, -3.5508e+00,  6.3125e+00,
E              8.2422e+00,  2.6289e+00,  5.4570e+00, -7.2852e+00],
E            [-5.3438e+00, -3.1719e+00,  3.6836e+00,  6.6250e+00,
E              5.6172e+00,  5.4141e+00,  4.5781e+00,  3.1211e+00]],
E   
E           [[-7.7070e+00,  8.9766e+00, -2.1094e-01,  3.1992e+00,
E             -5.8086e+00,  8.5234e+00,  5.7227e+00,  6.1602e+00],
E            [-5.4766e+00, -3.6836e+00,  4.3164e+00,  8.8516e+00,
E             -6.9961e+00,  5.0469e+00,  7.4805e+00, -4.2891e+00],
E            [-6.6523e+00,  3.5508e+00,  6.9609e+00,  8.7891e-02,
E              2.8047e+00,  6.2578e+00, -7.9023e+00, -6.3203e+00],
E            [ 6.1602e+00,  4.4844e+00,  7.7344e-01,  3.1113e+00,
E             -3.4883e+00,  3.0859e+00, -2.9531e+00,  1.1426e-01],
E            [ 2.2500e+00,  1.8281e+00, -6.5391e+00, -5.1172e+00,
E              7.3750e+00, -1.6523e+00, -5.5898e+00, -3.5859e+00],
E            [ 4.2344e+00, -7.7188e+00,  2.4434e+00, -4.4297e+00,
E             -3.4727e+00,  5.3164e+00, -5.1953e+00, -7.7422e+00]]],
E   
E   
E          [[[-6.4688e+00,  3.3828e+00, -8.8828e+00,  8.8359e+00,
E             -6.7500e+00,  3.0312e+00,  6.1250e+00,  3.0234e+00],
E            [ 3.5078e+00,  1.8105e+00, -1.4678e+00, -7.2070e-01,
E              1.5029e+00, -4.9297e+00, -6.6367e+00,  1.9336e+00],
E            [ 1.3271e+00, -3.5332e+00,  1.5996e+00,  3.4980e+00,
E              7.4688e+00, -8.3750e+00,  3.6641e+00,  7.8828e+00],
E            [ 2.4785e+00, -8.3594e+00, -5.8906e+00,  6.3828e+00,
E             -4.6406e+00,  2.8750e+00, -3.2520e-01,  4.6953e+00],
E            [-3.9102e+00, -1.0107e+00,  3.9199e+00, -5.7305e+00,
E             -5.8281e+00, -8.1875e+00,  2.8125e+00, -6.1602e+00],
E            [-5.6328e+00, -3.9551e+00, -2.5391e+00, -2.5312e+00,
E              7.7344e+00, -5.9688e+00,  7.4531e+00,  8.9648e-01]],
E   
E           [[ 1.8809e+00,  1.6787e+00,  5.8984e+00,  7.7617e+00,
E             -2.6895e+00, -2.1445e+00, -4.3945e-02,  1.6963e+00],
E            [ 3.3320e+00,  3.5332e+00, -3.1719e+00,  6.5742e+00,
E             -3.7344e+00, -8.9141e+00, -7.3672e+00, -2.6289e+00],
E            [ 1.2305e-01, -2.1719e+00,  2.6016e+00, -5.3984e+00,
E             -6.0645e-01, -3.8672e+00,  4.4824e-01,  5.6172e+00],
E            [ 2.3730e-01, -3.9375e+00,  1.4062e-01,  3.0762e+00,
E              3.7539e+00,  5.7031e+00,  7.2969e+00,  2.0039e+00],
E            [ 6.7227e+00, -3.1641e-01,  1.8369e+00,  3.7793e+00,
E              2.5742e+00, -2.4785e+00,  6.6094e+00,  5.1055e+00],
E            [ 9.0527e-01, -7.2148e+00,  4.5703e-01, -4.4824e-01,
E             -7.4707e-01,  3.2871e+00,  7.3906e+00,  4.6582e-01]],
E   
E           [[ 2.4180e+00,  2.9355e+00,  2.6367e-01,  1.1777e+00,
E              5.0098e-01, -8.0859e+00,  2.3281e+00,  4.5781e+00],
E            [ 4.3516e+00, -6.7852e+00, -6.4141e+00, -3.1367e+00,
E              1.3799e+00, -4.1836e+00, -3.4629e+00,  5.8008e-01],
E            [ 6.6172e+00, -6.3828e+00,  4.0625e+00,  6.3633e+00,
E              7.1992e+00, -8.1250e+00,  4.8340e-01, -6.0898e+00],
E            [-1.6611e+00,  6.5938e+00, -5.5547e+00,  6.5234e+00,
E             -1.3271e+00, -5.6602e+00, -8.2031e+00,  6.9453e+00],
E            [-5.0781e+00, -8.0469e+00, -6.2227e+00,  1.4678e+00,
E              7.1289e+00,  8.6250e+00,  8.0781e+00,  3.8145e+00],
E            [-5.1250e+00, -7.0938e+00,  1.9424e+00, -3.0938e+00,
E              4.6328e+00, -6.5664e+00, -1.4062e-01, -4.0078e+00]],
E   
E           [[ 5.0352e+00, -1.3184e-01, -1.3184e+00, -4.9141e+00,
E             -1.6348e+00, -2.3828e+00,  5.2812e+00,  5.1250e+00],
E            [-1.6436e+00, -8.7422e+00, -5.8516e+00,  8.0703e+00,
E              2.5234e+00, -8.2500e+00,  2.0469e+00, -2.4434e+00],
E            [-6.2578e+00, -5.8008e+00,  6.8203e+00,  7.0156e+00,
E              7.3672e+00, -1.0547e+00,  2.3555e+00, -2.8750e+00],
E            [-7.8594e+00, -1.9512e+00,  8.7188e+00, -2.0742e+00,
E              7.3047e+00, -5.9219e+00, -7.4258e+00,  7.0391e+00],
E            [ 4.2109e+00,  7.8047e+00,  1.4238e+00,  6.8750e+00,
E              7.7969e+00,  8.3496e-01, -2.2070e+00, -4.5273e+00],
E            [-5.8008e+00, -3.8672e-01, -5.9062e+00, -1.9775e+00,
E              3.8242e+00,  7.2070e-01,  1.9688e+00, -5.3867e+00]]],
E   
E   
E          [[[-2.5488e+00, -3.8672e-01, -7.9727e+00, -4.7734e+00,
E             -7.3203e+00, -3.8145e+00, -3.8242e+00, -3.2168e+00],
E            [-8.0703e+00, -5.6523e+00, -2.1270e+00, -5.5820e+00,
E             -6.8633e+00, -3.8672e+00, -1.3711e+00, -4.8945e+00],
E            [-1.3975e+00, -7.9980e-01,  4.7188e+00,  1.8369e+00,
E             -3.6992e+00,  5.5898e+00, -8.7891e-02,  8.3125e+00],
E            [ 6.3281e-01,  8.7109e+00,  5.5703e+00,  2.7070e+00,
E              6.1094e+00,  6.1328e+00,  6.2305e+00,  5.1758e+00],
E            [-1.4502e+00,  1.6260e+00,  7.5156e+00,  2.2148e+00,
E             -8.8906e+00, -6.3828e+00,  6.5918e-01,  4.3945e-01],
E            [-2.0918e+00, -7.6992e+00,  6.4141e+00, -7.9375e+00,
E              1.8457e+00,  2.4531e+00, -4.2812e+00,  6.0117e+00]],
E   
E           [[ 1.3623e+00,  2.0820e+00, -5.7031e+00, -8.2891e+00,
E             -4.6836e+00, -5.8789e+00,  8.7969e+00, -8.7891e-01],
E            [ 2.3906e+00,  5.0898e+00,  3.7793e-01, -1.7578e-01,
E              5.0273e+00, -7.9297e+00,  3.0586e+00, -2.7852e+00],
E            [-6.1094e+00,  3.5859e+00, -2.9883e-01, -6.1719e+00,
E              9.6680e-01, -2.2422e+00,  8.9297e+00,  7.7344e-01],
E            [-5.2031e+00, -2.7695e+00, -2.0312e+00, -2.7500e+00,
E              8.6133e-01, -4.2734e+00, -3.7266e+00,  1.2744e+00],
E            [ 6.0898e+00, -6.7500e+00,  1.6523e+00,  2.1016e+00,
E             -1.3535e+00, -1.1074e+00,  4.4141e+00,  6.2148e+00],
E            [-1.8984e+00, -3.0586e+00,  7.1875e+00, -6.6250e+00,
E              4.5625e+00, -4.1758e+00,  9.9316e-01,  2.9707e+00]],
E   
E           [[-4.0156e+00,  8.2812e+00, -3.4277e-01, -5.8516e+00,
E              5.7031e+00,  5.3867e+00,  2.9258e+00,  6.0469e+00],
E            [ 1.4766e+00,  2.9707e+00, -1.9951e+00,  2.2852e+00,
E             -1.2129e+00,  6.4531e+00,  1.7402e+00, -7.1562e+00],
E            [-6.4844e+00,  6.0742e+00,  6.6875e+00, -2.4785e+00,
E              4.1406e+00,  4.2266e+00,  8.9297e+00,  1.9336e-01],
E            [ 4.9141e+00,  9.4922e-01, -6.4062e+00, -8.9531e+00,
E             -4.1758e+00, -3.0234e+00,  7.6211e+00, -2.0215e+00],
E            [-7.2266e+00, -6.1523e+00, -1.7578e+00,  6.8906e+00,
E             -6.8633e+00, -6.1328e+00, -3.3398e-01,  4.0430e-01],
E            [-2.3203e+00,  5.1875e+00,  5.4570e+00,  1.0547e-01,
E             -1.0371e+00,  3.3320e+00, -5.8008e+00,  5.2812e+00]],
E   
E           [[ 2.9961e+00,  7.5508e+00,  6.9531e+00,  1.9336e+00,
E             -8.3672e+00,  6.5469e+00,  6.7773e+00,  5.4844e+00],
E            [ 3.9551e-01,  4.4219e+00, -6.6016e+00,  4.8672e+00,
E             -7.1797e+00,  8.1562e+00,  7.5586e+00,  5.3984e+00],
E            [-7.9180e+00,  7.9102e-02,  2.8828e+00,  1.6348e+00,
E             -4.2344e+00,  5.8633e+00, -8.4844e+00,  6.6797e+00],
E            [-1.1074e+00, -1.2217e+00,  8.3516e+00, -2.1367e+00,
E             -5.1250e+00,  1.8984e+00, -1.0020e+00,  8.7891e-03],
E            [ 4.7031e+00,  6.3008e+00, -1.6963e+00, -1.4941e-01,
E              6.2422e+00,  3.1211e+00, -3.5078e+00,  6.9531e+00],
E            [ 5.8516e+00,  1.4414e+00, -2.3730e-01, -2.5586e+00,
E             -8.2109e+00, -7.5391e+00, -5.8008e-01, -7.1875e+00]]]],
E         dtype=float16)}
E   Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[4,3,8] input_0, float16[4,4,6,8] input_1, float16[4,4,6,8] input_2) => (float16[4,4,3,8] _val_4) 
E      <float16 _val_3>
E   {
E      _val_3 = pkg.onnxscript.torch_lib._attention_scale (input_0)
E      _val_4 = pkg.onnxscript.torch_lib._aten_scaled_dot_product_attention_no_mask_onnx <dropout_p: float = 0> (input_0, input_1, input_2, _val_3)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["" : 18]
E   >
E   _attention_scale (query) => (scale)
E   {
E      tmp = Shape (query)
E      int64_m1 = Constant <value: tensor = int64 int64_m1 {-1}> ()
E      tmp_subscripted = Gather <axis: int = 0> (tmp, int64_m1)
E      embedding_size = CastLike (tmp_subscripted, query)
E      const = Constant <value: tensor = float const {1}> ()
E      tmp_0 = Sqrt (embedding_size)
E      const_cast = CastLike (const, tmp_0)
E      scale = Div (const_cast, tmp_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["" : 18]
E   >
E   _aten_scaled_dot_product_attention_no_mask_onnx <dropout_p>(query, key, value, scale) => (return_val)
E   {
E      key_shape = Shape (key)
E      int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
E      int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
E      int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
E      int64_9223372036854775807_1d = Constant <value: tensor = int64[1] int64_9223372036854775807_1d {9223372036854775807}> ()
E      key_last_dim = Slice (key_shape, int64_m1_1d, int64_9223372036854775807_1d, int64_0_1d, int64_1_1d)
E      int64_0_1d_0 = Constant <value: tensor = int64[1] int64_0_1d_0 {0}> ()
E      int64_1_1d_1 = Constant <value: tensor = int64[1] int64_1_1d_1 {1}> ()
E      int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
E      int64_m1_1d_2 = Constant <value: tensor = int64[1] int64_m1_1d_2 {-1}> ()
E      key_second_last_dim = Slice (key_shape, int64_m2_1d, int64_m1_1d_2, int64_0_1d_0, int64_1_1d_1)
E      int64_0_1d_3 = Constant <value: tensor = int64[1] int64_0_1d_3 {0}> ()
E      int64_1_1d_4 = Constant <value: tensor = int64[1] int64_1_1d_4 {1}> ()
E      int64_m2_1d_5 = Constant <value: tensor = int64[1] int64_m2_1d_5 {-2}> ()
E      key_first_dims = Slice (key_shape, int64_0_1d_3, int64_m2_1d_5, int64_0_1d_3, int64_1_1d_4)
E      tmp = Constant <value_ints: ints = [-1]> ()
E      key_squeezed_shape = Concat <axis: int = 0> (tmp, key_second_last_dim, key_last_dim)
E      key_squeezed = Reshape (key, key_squeezed_shape)
E      key_squeezed_transposed = Transpose <perm: ints = [0, 2, 1]> (key_squeezed)
E      key_transposed_shape = Concat <axis: int = 0> (key_first_dims, key_last_dim, key_second_last_dim)
E      key_transposed = Reshape (key_squeezed_transposed, key_transposed_shape)
E      tmp_6 = Sqrt (scale)
E      query_scaled = Mul (query, tmp_6)
E      tmp_7 = Sqrt (scale)
E      key_transposed_scaled = Mul (key_transposed, tmp_7)
E      tmp_8 = MatMul (query_scaled, key_transposed_scaled)
E      attn_weight = Softmax <axis: int = -1> (tmp_8)
E      dropout_p = Constant <value_float: float = @dropout_p> ()
E      attn_weight_9, _ = Dropout (attn_weight, dropout_p)
E      return_val = MatMul (attn_weight_9, value)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__logaddexp2_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,5] input_0, float16[5,5] input_1) => (float16[5,5] _val_2) {
   _val_2 = pkg.onnxscript.torch_lib.aten_logaddexp2 (input_0, input_1)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
aten_logaddexp2 (self, other) => (return_val)
{
   const = Constant <value: tensor = float const {2}> ()
   tmp = Pow (const, self)
   const_0 = Constant <value: tensor = float const_0 {2}> ()
   tmp_1 = Pow (const_0, other)
   summation = Add (tmp, tmp_1)
   tmp_2 = Log (summation)
   const_3 = Constant <value: tensor = float const_3 {2}> ()
   tmp_4 = Log (const_3)
   tmp_5 = CastLike (tmp_4, self)
   return_val = Div (tmp_2, tmp_5)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_logaddexp2, node name: aten_logaddexp2_0): [ShapeInferenceError] (op_type:Div, node name: n9): B has inconsistent type tensor(float16)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,5] input_0, float16[5,5] input_1) => (float16[5,5] _val_2) {
E      _val_2 = pkg.onnxscript.torch_lib.aten_logaddexp2 (input_0, input_1)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["" : 18]
E   >
E   aten_logaddexp2 (self, other) => (return_val)
E   {
E      const = Constant <value: tensor = float const {2}> ()
E      tmp = Pow (const, self)
E      const_0 = Constant <value: tensor = float const_0 {2}> ()
E      tmp_1 = Pow (const_0, other)
E      summation = Add (tmp, tmp_1)
E      tmp_2 = Log (summation)
E      const_3 = Constant <value: tensor = float const_3 {2}> ()
E      tmp_4 = Log (const_3)
E      tmp_5 = CastLike (tmp_4, self)
E      return_val = Div (tmp_2, tmp_5)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

3 out of 9 runs failed: test_output_match_opinfo__all_dim_cpu_int32 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 1s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32 input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[3,5] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[3,5] input_0) => (bool[1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[3,2,1,2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[3,2,1,2] input_0) => (bool[3,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32 input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[3,5] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[3,5] input_0) => (bool[1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[3,2,1,2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[3,2,1,2] input_0) => (bool[3,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

3 out of 9 runs failed: test_output_match_opinfo__all_dim_cpu_int64 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 1s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64 input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64[2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64[3,5] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64[3,5] input_0) => (bool[1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64[3,2,1,2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64[3,2,1,2] input_0) => (bool[3,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64[2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64[3,5] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64[3,5] input_0) => (bool[1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64[3,2,1,2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64[3,2,1,2] input_0) => (bool[3,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

3 out of 9 runs failed: test_output_match_opinfo__any_dim_cpu_int32 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32 input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[3,5] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[3,5] input_0) => (bool[1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[3,2,1,2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int32[3,2,1,2] input_0) => (bool[3,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32 input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[3,5] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[3,5] input_0) => (bool[1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[3,2,1,2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int32[3,2,1,2] input_0) => (bool[3,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__linspace_tensor_overload_cpu_int64 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 9s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 2s]
Raw output
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.3333333432674408 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.3333333432674408 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.3333333432674408 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 2 at index (25,)
Greatest relative difference: inf at index (25,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 2 at index (25,)
Greatest relative difference: inf at index (25,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 2 at index (25,)
Greatest relative difference: inf at index (25,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 6 at index (49,)
Greatest relative difference: inf at index (9,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 6 at index (49,)
Greatest relative difference: inf at index (9,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 6 at index (49,)
Greatest relative difference: inf at index (9,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 34 / 50 (68.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (1,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 34 / 50 (68.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (1,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 34 / 50 (68.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (1,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 33 / 50 (66.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: 1.0 at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 33 / 50 (66.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: 1.0 at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 33 / 50 (66.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: 1.0 at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 1.0 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 1.0 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 1.0 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 37 / 50 (74.0%)
Greatest absolute difference: 4 at index (49,)
Greatest relative difference: 1.0 at index (13,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 37 / 50 (74.0%)
Greatest absolute difference: 4 at index (49,)
Greatest relative difference: 1.0 at index (13,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 37 / 50 (74.0%)
Greatest absolute difference: 4 at index (49,)
Greatest relative difference: 1.0 at index (13,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.019999999552965164 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.019999999552965164 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.019999999552965164 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 7 at index (49,)
Greatest relative difference: inf at index (22,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 7 at index (49,)
Greatest relative difference: inf at index (22,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 7 at index (49,)
Greatest relative difference: inf at index (22,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (37,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (37,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (37,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (33,)
Greatest relative difference: 3.0 at index (33,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (33,)
Greatest relative difference: 3.0 at index (33,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (33,)
Greatest relative difference: 3.0 at index (33,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 48 / 50 (96.0%)
Greatest absolute difference: 46 at index (49,)
Greatest relative difference: 0.9200000166893005 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 48 / 50 (96.0%)
Greatest absolute difference: 46 at index (49,)
Greatest relative difference: 0.9200000166893005 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 48 / 50 (96.0%)
Greatest absolute difference: 46 at index (49,)
Greatest relative difference: 0.9200000166893005 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (46,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (46,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (46,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 1 at index (1,)
Greatest relative difference: inf at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 1 at index (1,)
Greatest relative difference: inf at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 1 at index (1,)
Greatest relative difference: inf at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 46 at index (48,)
Greatest relative difference: 11.5 at index (48,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 46 at index (48,)
Greatest relative difference: 11.5 at index (48,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 46 at index (48,)
Greatest relative difference: 11.5 at index (48,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 43 / 50 (86.0%)
Greatest absolute difference: 7 at index (49,)
Greatest relative difference: 1.0 at index (7,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.3333333432674408 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.3333333432674408 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.3333333432674408 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 2 at index (25,)
E   Greatest relative difference: inf at index (25,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 2 at index (25,)
E   Greatest relative difference: inf at index (25,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 2 at index (25,)
E   Greatest relative difference: inf at index (25,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 6 at index (49,)
E   Greatest relative difference: inf at index (9,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 6 at index (49,)
E   Greatest relative difference: inf at index (9,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 6 at index (49,)
E   Greatest relative difference: inf at index (9,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 34 / 50 (68.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (1,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 34 / 50 (68.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (1,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 34 / 50 (68.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (1,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 33 / 50 (66.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: 1.0 at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 33 / 50 (66.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: 1.0 at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 33 / 50 (66.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: 1.0 at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 1.0 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 1.0 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 1.0 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 37 / 50 (74.0%)
E   Greatest absolute difference: 4 at index (49,)
E   Greatest relative difference: 1.0 at index (13,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 37 / 50 (74.0%)
E   Greatest absolute difference: 4 at index (49,)
E   Greatest relative difference: 1.0 at index (13,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 37 / 50 (74.0%)
E   Greatest absolute difference: 4 at index (49,)
E   Greatest relative difference: 1.0 at index (13,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.019999999552965164 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.019999999552965164 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.019999999552965164 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 7 at index (49,)
E   Greatest relative difference: inf at index (22,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 7 at index (49,)
E   Greatest relative difference: inf at index (22,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 7 at index (49,)
E   Greatest relative difference: inf at index (22,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (37,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (37,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (37,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (33,)
E   Greatest relative difference: 3.0 at index (33,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (33,)
E   Greatest relative difference: 3.0 at index (33,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (33,)
E   Greatest relative difference: 3.0 at index (33,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 48 / 50 (96.0%)
E   Greatest absolute difference: 46 at index (49,)
E   Greatest relative difference: 0.9200000166893005 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 48 / 50 (96.0%)
E   Greatest absolute difference: 46 at index (49,)
E   Greatest relative difference: 0.9200000166893005 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 48 / 50 (96.0%)
E   Greatest absolute difference: 46 at index (49,)
E   Greatest relative difference: 0.9200000166893005 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (46,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (46,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (46,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 1 at index (1,)
E   Greatest relative difference: inf at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 1 at index (1,)
E   Greatest relative difference: inf at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 1 at index (1,)
E   Greatest relative difference: inf at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 46 at index (48,)
E   Greatest relative difference: 11.5 at index (48,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 46 at index (48,)
E   Greatest relative difference: 11.5 at index (48,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 46 at index (48,)
E   Greatest relative difference: 11.5 at index (48,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 43 / 50 (86.0%)
E   Greatest absolute difference: 7 at index (49,)
E   Greatest relative difference: 1.0 at index (7,)

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

3 out of 9 runs failed: test_output_match_opinfo__all_dim_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 1s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16 input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[3,5] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[3,5] input_0) => (bool[1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[3,2,1,2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[3,2,1,2] input_0) => (bool[3,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16 input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[3,5] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[3,5] input_0) => (bool[1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[3,2,1,2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[3,2,1,2] input_0) => (bool[3,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__logit_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: Tensor-likes are not close!

Mismatched elements: 16 / 125 (12.8%)
Greatest absolute difference: 0.000732421875 at index (1, 2, 2) (up to 1e-05 allowed)
Greatest relative difference: 0.10821533203125 at index (3, 3, 3) (up to 0.001 allowed)
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,5,5] input_0) => (float16[5,5,5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib._aten_logit_clamp_onnx <eps: float = 0.2> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_aten_logit_clamp_onnx <eps>(self) => (return_val)
{
   const = Constant <value: tensor = float const {1}> ()
   eps = Constant <value_float: float = @eps> ()
   tmp = Sub (const, eps)
   tmp_0 = LessOrEqual (self, tmp)
   const_1 = Constant <value: tensor = float const_1 {1}> ()
   eps_2 = Constant <value_float: float = @eps> ()
   tmp_3 = Sub (const_1, eps_2)
   temporary_self = Where (tmp_0, self, tmp_3)
   eps_4 = Constant <value_float: float = @eps> ()
   eps_4_cast = CastLike (eps_4, temporary_self)
   tmp_5 = Less (temporary_self, eps_4_cast)
   eps_6 = Constant <value_float: float = @eps> ()
   eps_6_cast = CastLike (eps_6, temporary_self)
   z = Where (tmp_5, eps_6_cast, temporary_self)
   const_7 = Constant <value: tensor = float const_7 {1}> ()
   const_7_cast = CastLike (const_7, z)
   tmp_8 = Sub (const_7_cast, z)
   tmp_9 = Div (z, tmp_8)
   return_val = Log (tmp_9)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16 input_0) => (float16 _val_1) {
   _val_1 = pkg.onnxscript.torch_lib._aten_logit_clamp_onnx <eps: float = 0.2> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_aten_logit_clamp_onnx <eps>(self) => (return_val)
{
   const = Constant <value: tensor = float const {1}> ()
   eps = Constant <value_float: float = @eps> ()
   tmp = Sub (const, eps)
   tmp_0 = LessOrEqual (self, tmp)
   const_1 = Constant <value: tensor = float const_1 {1}> ()
   eps_2 = Constant <value_float: float = @eps> ()
   tmp_3 = Sub (const_1, eps_2)
   temporary_self = Where (tmp_0, self, tmp_3)
   eps_4 = Constant <value_float: float = @eps> ()
   eps_4_cast = CastLike (eps_4, temporary_self)
   tmp_5 = Less (temporary_self, eps_4_cast)
   eps_6 = Constant <value_float: float = @eps> ()
   eps_6_cast = CastLike (eps_6, temporary_self)
   z = Where (tmp_5, eps_6_cast, temporary_self)
   const_7 = Constant <value: tensor = float const_7 {1}> ()
   const_7_cast = CastLike (const_7, z)
   tmp_8 = Sub (const_7_cast, z)
   tmp_9 = Div (z, tmp_8)
   return_val = Log (tmp_9)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 16 / 125 (12.8%)
E   Greatest absolute difference: 0.000732421875 at index (1, 2, 2) (up to 1e-05 allowed)
E   Greatest relative difference: 0.10821533203125 at index (3, 3, 3) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:_aten_logit_clamp_onnx, node name: _aten_logit_clamp_onnx_0): [ShapeInferenceError] (op_type:Where, node name: n7): Y has inconsistent type tensor(float)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,5,5] input_0) => (float16[5,5,5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib._aten_logit_clamp_onnx <eps: float = 0.2> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["" : 18]
E   >
E   _aten_logit_clamp_onnx <eps>(self) => (return_val)
E   {
E      const = Constant <value: tensor = float const {1}> ()
E      eps = Constant <value_float: float = @eps> ()
E      tmp = Sub (const, eps)
E      tmp_0 = LessOrEqual (self, tmp)
E      const_1 = Constant <value: tensor = float const_1 {1}> ()
E      eps_2 = Constant <value_float: float = @eps> ()
E      tmp_3 = Sub (const_1, eps_2)
E      temporary_self = Where (tmp_0, self, tmp_3)
E      eps_4 = Constant <value_float: float = @eps> ()
E      eps_4_cast = CastLike (eps_4, temporary_self)
E      tmp_5 = Less (temporary_self, eps_4_cast)
E      eps_6 = Constant <value_float: float = @eps> ()
E      eps_6_cast = CastLike (eps_6, temporary_self)
E      z = Where (tmp_5, eps_6_cast, temporary_self)
E      const_7 = Constant <value: tensor = float const_7 {1}> ()
E      const_7_cast = CastLike (const_7, z)
E      tmp_8 = Sub (const_7_cast, z)
E      tmp_9 = Div (z, tmp_8)
E      return_val = Log (tmp_9)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:_aten_logit_clamp_onnx, node name: _aten_logit_clamp_onnx_0): [ShapeInferenceError] (op_type:Where, node name: n7): Y has inconsistent type tensor(float)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16 input_0) => (float16 _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib._aten_logit_clamp_onnx <eps: float = 0.2> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["" : 18]
E   >
E   _aten_logit_clamp_onnx <eps>(self) => (return_val)
E   {
E      const = Constant <value: tensor = float const {1}> ()
E      eps = Constant <value_float: float = @eps> ()
E      tmp = Sub (const, eps)
E      tmp_0 = LessOrEqual (self, tmp)
E      const_1 = Constant <value: tensor = float const_1 {1}> ()
E      eps_2 = Constant <value_float: float = @eps> ()
E      tmp_3 = Sub (const_1, eps_2)
E      temporary_self = Where (tmp_0, self, tmp_3)
E      eps_4 = Constant <value_float: float = @eps> ()
E      eps_4_cast = CastLike (eps_4, temporary_self)
E      tmp_5 = Less (temporary_self, eps_4_cast)
E      eps_6 = Constant <value_float: float = @eps> ()
E      eps_6_cast = CastLike (eps_6, temporary_self)
E      z = Where (tmp_5, eps_6_cast, temporary_self)
E      const_7 = Constant <value: tensor = float const_7 {1}> ()
E      const_7_cast = CastLike (const_7, z)
E      tmp_8 = Sub (const_7_cast, z)
E      tmp_9 = Div (z, tmp_8)
E      return_val = Log (tmp_9)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

3 out of 9 runs failed: test_output_match_opinfo__any_dim_cpu_bool (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[3,5] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[3,5] input_0) => (bool[1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[3,2,1,2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[3,2,1,2] input_0) => (bool[3,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[3,5] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[3,5] input_0) => (bool[1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[3,2,1,2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[3,2,1,2] input_0) => (bool[3,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

3 out of 9 runs failed: test_output_match_opinfo__any_dim_cpu_float32 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float[2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float[3,5] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float[3,5] input_0) => (bool[1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float[3,2,1,2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float[3,2,1,2] input_0) => (bool[3,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_any_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (any_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float[2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float[3,5] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float[3,5] input_0) => (bool[1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float[3,2,1,2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_any_dim, node name: aten_any_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float[3,2,1,2] input_0) => (bool[3,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_any_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_any_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         any_true = ReduceMax <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (any_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__addbmm_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: Tensor-likes are not close!

Mismatched elements: 9 / 50 (18.0%)
Greatest absolute difference: 0.09375 at index (2, 3) (up to 1e-05 allowed)
Greatest relative difference: 0.0338134765625 at index (4, 1) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 5 / 50 (10.0%)
Greatest absolute difference: 0.0625 at index (2, 7) (up to 1e-05 allowed)
Greatest relative difference: 0.01500701904296875 at index (1, 2) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 6 / 50 (12.0%)
Greatest absolute difference: 0.0078125 at index (3, 7) (up to 1e-05 allowed)
Greatest relative difference: 0.00559234619140625 at index (0, 8) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 5 / 50 (10.0%)
Greatest absolute difference: 0.015625 at index (4, 2) (up to 1e-05 allowed)
Greatest relative difference: 0.01526641845703125 at index (1, 2) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 12 / 50 (24.0%)
Greatest absolute difference: 0.0625 at index (3, 2) (up to 1e-05 allowed)
Greatest relative difference: 0.08935546875 at index (4, 7) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 7 / 50 (14.0%)
Greatest absolute difference: 0.013671875 at index (4, 5) (up to 1e-05 allowed)
Greatest relative difference: 0.00673675537109375 at index (2, 6) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 9 / 50 (18.0%)
E   Greatest absolute difference: 0.09375 at index (2, 3) (up to 1e-05 allowed)
E   Greatest relative difference: 0.0338134765625 at index (4, 1) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 5 / 50 (10.0%)
E   Greatest absolute difference: 0.0625 at index (2, 7) (up to 1e-05 allowed)
E   Greatest relative difference: 0.01500701904296875 at index (1, 2) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 6 / 50 (12.0%)
E   Greatest absolute difference: 0.0078125 at index (3, 7) (up to 1e-05 allowed)
E   Greatest relative difference: 0.00559234619140625 at index (0, 8) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 5 / 50 (10.0%)
E   Greatest absolute difference: 0.015625 at index (4, 2) (up to 1e-05 allowed)
E   Greatest relative difference: 0.01526641845703125 at index (1, 2) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 12 / 50 (24.0%)
E   Greatest absolute difference: 0.0625 at index (3, 2) (up to 1e-05 allowed)
E   Greatest relative difference: 0.08935546875 at index (4, 7) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 7 / 50 (14.0%)
E   Greatest absolute difference: 0.013671875 at index (4, 5) (up to 1e-05 allowed)
E   Greatest relative difference: 0.00673675537109375 at index (2, 6) (up to 0.001 allowed)

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__log_softmax_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5] input_0) => (float16[5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = 0, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_special_log_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = LogSoftmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = 0, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_special_log_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = LogSoftmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = 1, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_special_log_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = LogSoftmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = -1, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_special_log_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = LogSoftmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,10,5] input_0) => (float16[5,10,5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = 2, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_special_log_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = LogSoftmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,0,0] input_0) => (float16[5,0,0] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = -1, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_special_log_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = LogSoftmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16 input_0) => (float16 _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = 0, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_special_log_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = LogSoftmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_special_log_softmax, node name: aten_special_log_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5] input_0) => (float16[5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = 0, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_special_log_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = LogSoftmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_special_log_softmax, node name: aten_special_log_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = 0, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_special_log_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = LogSoftmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_special_log_softmax, node name: aten_special_log_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = 1, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_special_log_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = LogSoftmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_special_log_softmax, node name: aten_special_log_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = -1, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_special_log_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = LogSoftmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_special_log_softmax, node name: aten_special_log_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,10,5] input_0) => (float16[5,10,5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = 2, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_special_log_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = LogSoftmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_special_log_softmax, node name: aten_special_log_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,0,0] input_0) => (float16[5,0,0] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = -1, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_special_log_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = LogSoftmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_special_log_softmax, node name: aten_special_log_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16 input_0) => (float16 _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_special_log_softmax <dim: int = 0, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_special_log_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = LogSoftmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__cross_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: Tensor-likes are not close!

Mismatched elements: 1 / 15 (6.7%)
Greatest absolute difference: 0.00390625 at index (3, 0) (up to 1e-05 allowed)
Greatest relative difference: 0.0014905929565429688 at index (3, 0) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 5 / 75 (6.7%)
Greatest absolute difference: 0.015625 at index (1, 0, 4) (up to 1e-05 allowed)
Greatest relative difference: 0.00612640380859375 at index (3, 2, 1) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 2 / 15 (13.3%)
Greatest absolute difference: 0.0013427734375 at index (1, 0) (up to 1e-05 allowed)
Greatest relative difference: 0.006610870361328125 at index (1, 0) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 1 / 15 (6.7%)
E   Greatest absolute difference: 0.00390625 at index (3, 0) (up to 1e-05 allowed)
E   Greatest relative difference: 0.0014905929565429688 at index (3, 0) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 5 / 75 (6.7%)
E   Greatest absolute difference: 0.015625 at index (1, 0, 4) (up to 1e-05 allowed)
E   Greatest relative difference: 0.00612640380859375 at index (3, 2, 1) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 2 / 15 (13.3%)
E   Greatest absolute difference: 0.0013427734375 at index (1, 0) (up to 1e-05 allowed)
E   Greatest relative difference: 0.006610870361328125 at index (1, 0) (up to 0.001 allowed)

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__matmul_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: Tensor-likes are not close!

Mismatched elements: 1 / 5 (20.0%)
Greatest absolute difference: 0.01171875 at index (0,) (up to 1e-05 allowed)
Greatest relative difference: 0.002384185791015625 at index (0,) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 2 / 25 (8.0%)
Greatest absolute difference: 0.03125 at index (2, 3) (up to 1e-05 allowed)
Greatest relative difference: 0.004360198974609375 at index (2, 3) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 38 / 250 (15.2%)
Greatest absolute difference: 0.0625 at index (0, 3, 9) (up to 1e-05 allowed)
Greatest relative difference: 0.0582275390625 at index (3, 4, 6) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 1 / 5 (20.0%)
E   Greatest absolute difference: 0.01171875 at index (0,) (up to 1e-05 allowed)
E   Greatest relative difference: 0.002384185791015625 at index (0,) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 2 / 25 (8.0%)
E   Greatest absolute difference: 0.03125 at index (2, 3) (up to 1e-05 allowed)
E   Greatest relative difference: 0.004360198974609375 at index (2, 3) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 38 / 250 (15.2%)
E   Greatest absolute difference: 0.0625 at index (0, 3, 9) (up to 1e-05 allowed)
E   Greatest relative difference: 0.0582275390625 at index (3, 4, 6) (up to 0.001 allowed)

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__nn_functional_scaled_dot_product_attention_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 1s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[4,3,8] input_0, float16[4,6,8] input_1, float16[4,6,8] input_2) => (float16[4,3,8] _val_5) 
   <float16 _val_3, float[3,6] _val_4>
{
   _val_3 = pkg.onnxscript.torch_lib._attention_scale (input_0)
   _val_4 = pkg.onnxscript.torch_lib._causal_attention_mask (input_0, input_1)
   _val_5 = pkg.onnxscript.torch_lib._aten_scaled_dot_product_attention_float_mask_onnx <dropout_p: float = 0> (input_0, input_1, input_2, _val_4, _val_3)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_attention_scale (query) => (scale)
{
   tmp = Shape (query)
   int64_m1 = Constant <value: tensor = int64 int64_m1 {-1}> ()
   tmp_subscripted = Gather <axis: int = 0> (tmp, int64_m1)
   embedding_size = CastLike (tmp_subscripted, query)
   const = Constant <value: tensor = float const {1}> ()
   tmp_0 = Sqrt (embedding_size)
   const_cast = CastLike (const, tmp_0)
   scale = Div (const_cast, tmp_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_causal_attention_mask (query, key) => (attn_mask_10)
{
   tmp = Shape (query)
   int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
   int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
   int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
   int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
   target_length = Slice (tmp, int64_m2_1d, int64_m1_1d, int64_0_1d, int64_1_1d)
   tmp_0 = Shape (key)
   int64_0_1d_1 = Constant <value: tensor = int64[1] int64_0_1d_1 {0}> ()
   int64_1_1d_2 = Constant <value: tensor = int64[1] int64_1_1d_2 {1}> ()
   int64_m2_1d_3 = Constant <value: tensor = int64[1] int64_m2_1d_3 {-2}> ()
   int64_m1_1d_4 = Constant <value: tensor = int64[1] int64_m1_1d_4 {-1}> ()
   source_length = Slice (tmp_0, int64_m2_1d_3, int64_m1_1d_4, int64_0_1d_1, int64_1_1d_2)
   size = Concat <axis: int = 0> (target_length, source_length)
   const = Constant <value: tensor = float const {1}> ()
   attn_mask = Expand (const, size)
   attn_mask_5 = Trilu <upper: int = 0> (attn_mask)
   const_6 = Constant <value: tensor = float const_6 {0}> ()
   const_6_cast = CastLike (const_6, attn_mask_5)
   tmp_7 = Equal (attn_mask_5, const_6_cast)
   tmp_8 = Constant <value_float: float = -inf> ()
   const_9 = Constant <value: tensor = float const_9 {0}> ()
   const_9_cast = CastLike (const_9, tmp_8)
   attn_mask_10 = Where (tmp_7, tmp_8, const_9_cast)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_aten_scaled_dot_product_attention_float_mask_onnx <dropout_p>(query, key, value, attn_mask, scale) => (return_val)
{
   key_shape = Shape (key)
   int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
   int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
   int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
   int64_9223372036854775807_1d = Constant <value: tensor = int64[1] int64_9223372036854775807_1d {9223372036854775807}> ()
   key_last_dim = Slice (key_shape, int64_m1_1d, int64_9223372036854775807_1d, int64_0_1d, int64_1_1d)
   int64_0_1d_0 = Constant <value: tensor = int64[1] int64_0_1d_0 {0}> ()
   int64_1_1d_1 = Constant <value: tensor = int64[1] int64_1_1d_1 {1}> ()
   int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
   int64_m1_1d_2 = Constant <value: tensor = int64[1] int64_m1_1d_2 {-1}> ()
   key_second_last_dim = Slice (key_shape, int64_m2_1d, int64_m1_1d_2, int64_0_1d_0, int64_1_1d_1)
   int64_0_1d_3 = Constant <value: tensor = int64[1] int64_0_1d_3 {0}> ()
   int64_1_1d_4 = Constant <value: tensor = int64[1] int64_1_1d_4 {1}> ()
   int64_m2_1d_5 = Constant <value: tensor = int64[1] int64_m2_1d_5 {-2}> ()
   key_first_dims = Slice (key_shape, int64_0_1d_3, int64_m2_1d_5, int64_0_1d_3, int64_1_1d_4)
   tmp = Constant <value_ints: ints = [-1]> ()
   key_squeezed_shape = Concat <axis: int = 0> (tmp, key_second_last_dim, key_last_dim)
   key_squeezed = Reshape (key, key_squeezed_shape)
   key_squeezed_transposed = Transpose <perm: ints = [0, 2, 1]> (key_squeezed)
   key_transposed_shape = Concat <axis: int = 0> (key_first_dims, key_last_dim, key_second_last_dim)
   key_transposed = Reshape (key_squeezed_transposed, key_transposed_shape)
   tmp_6 = Sqrt (scale)
   query_scaled = Mul (query, tmp_6)
   tmp_7 = Sqrt (scale)
   key_transposed_scaled = Mul (key_transposed, tmp_7)
   tmp_8 = MatMul (query_scaled, key_transposed_scaled)
   tmp_9 = Add (tmp_8, attn_mask)
   attn_weight = Softmax <axis: int = -1> (tmp_9)
   dropout_p = Constant <value_float: float = @dropout_p> ()
   attn_weight_10, _ = Dropout (attn_weight, dropout_p)
   return_val = MatMul (attn_weight_10, value)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
RuntimeError: ONNX Runtime failed to evaluate:
Inputs:
{'input_0': array([[[ 2.04   ,  2.672  ,  4.613  ,  1.248  ,  3.639  ,  4.5    ,
          3.27   ,  4.965  ],
        [ 7.777  ,  2.268  , -3.242  , -4.867  ,  8.37   , -0.4746 ,
          0.0967 ,  2.89   ],
        [-2.54   , -2.117  , -0.501  , -5.863  , -0.4658 ,  3.578  ,
         -3.137  , -5.73   ]],

       [[ 0.06152,  1.063  ,  3.05   , -3.066  , -8.22   ,  5.688  ,
         -2.875  ,  2.77   ],
        [ 1.556  , -4.26   , -2.574  , -1.371  , -8.65   , -2.504  ,
          6.18   ,  8.1    ],
        [-4.598  , -0.58   ,  1.266  ,  1.758  , -0.8438 ,  8.89   ,
          6.355  , -4.957  ]],

       [[-2.574  , -2.398  , -0.879  , -1.02   ,  4.895  ,  4.797  ,
          2.215  , -5.582  ],
        [ 5.21   , -1.248  , -6.758  , -2.777  ,  7.156  ,  8.086  ,
         -5.055  , -8.92   ],
        [-1.767  , -1.995  ,  2.117  ,  2.197  , -1.301  ,  0.03516,
          1.037  , -0.0791 ]],

       [[ 7.03   , -5.723  ,  0.5625 , -7.727  , -7.04   ,  2.092  ,
         -7.453  ,  6.836  ],
        [-1.512  ,  2.469  , -8.45   ,  1.898  ,  7.496  , -1.74   ,
         -2.021  , -2.953  ],
        [ 4.043  ,  8.16   ,  5.35   , -8.086  ,  0.8613 , -4.516  ,
         -5.625  , -5.45   ]]], dtype=float16),
 'input_1': array([[[ 8.85   , -1.775  , -4.457  , -4.824  ,  8.58   , -2.777  ,
          7.58   ,  5.66   ],
        [ 7.637  , -2.232  ,  3.832  ,  0.1934 , -0.2461 ,  4.957  ,
         -3.059  , -2.734  ],
        [ 4.016  , -8.28   ,  1.266  ,  0.7383 ,  0.677  , -4.992  ,
          7.707  , -9.     ],
        [-8.56   , -2.988  , -2.707  ,  6.777  ,  3.91   , -5.062  ,
         -1.266  , -4.72   ],
        [ 7.023  , -8.71   ,  3.05   , -8.17   ,  0.624  ,  4.836  ,
         -7.656  , -6.812  ],
        [-3.086  , -5.16   , -7.973  , -2.232  ,  7.82   ,  2.68   ,
         -6.652  ,  8.44   ]],

       [[ 0.4658 , -6.934  , -5.59   , -0.3076 ,  6.44   , -2.303  ,
          7.242  , -5.484  ],
        [-3.523  , -2.268  ,  2.654  , -0.9316 ,  1.811  ,  2.004  ,
         -1.512  ,  7.99   ],
        [-3.93   , -8.35   , -5.188  , -8.1    ,  3.7    ,  6.18   ,
         -2.293  , -2.523  ],
        [-1.925  ,  2.68   , -8.15   ,  7.46   , -1.995  ,  2.936  ,
         -1.459  , -5.188  ],
        [-5.08   ,  8.73   ,  2.7    , -6.82   , -7.55   ,  4.22   ,
         -0.3604 ,  2.936  ],
        [-0.04395, -4.246  , -2.338  ,  0.923  ,  4.938  , -8.3    ,
         -7.84   , -2.004  ]],

       [[-1.099  , -7.797  , -7.39   ,  3.516  ,  2.89   , -2.11   ,
          4.457  ,  7.48   ],
        [-0.3604 , -8.41   , -4.21   ,  6.793  , -8.55   ,  3.945  ,
         -7.207  , -7.902  ],
        [ 6.555  , -8.63   ,  6.6    ,  8.52   ,  7.75   , -8.03   ,
         -2.32   ,  5.82   ],
        [ 1.6    , -1.556  , -8.17   ,  8.52   ,  3.277  ,  8.01   ,
          4.562  , -1.099  ],
        [-5.844  , -1.099  ,  6.11   , -6.54   ,  1.705  ,  7.586  ,
          1.705  , -3.146  ],
        [-8.19   , -3.102  ,  8.305  , -8.47   , -3.438  ,  0.4395 ,
          3.533  ,  6.926  ]],

       [[ 0.03516,  4.086  , -3.7    , -3.016  ,  7.277  , -4.316  ,
          3.55   , -1.644  ],
        [ 4.5    , -3.34   , -6.96   , -4.402  , -5.97   ,  0.3955 ,
         -4.21   ,  8.3    ],
        [ 0.677  ,  6.406  ,  7.137  ,  8.1    ,  0.633  , -2.031  ,
         -6.82   , -8.59   ],
        [ 1.055  , -7.13   , -6.906  ,  0.4834 , -5.934  , -8.07   ,
         -1.705  , -8.586  ],
        [ 5.027  , -6.047  ,  0.2197 , -1.942  ,  2.25   , -8.94   ,
         -3.516  ,  7.61   ],
        [ 2.215  ,  6.074  , -2.69   , -6.344  , -3.393  , -8.516  ,
         -2.629  , -4.387  ]]], dtype=float16),
 'input_2': array([[[-4.844  , -8.766  ,  8.63   , -8.32   ,  1.89   ,  3.383  ,
         -5.8    , -3.156  ],
        [-4.387  , -2.865  ,  2.734  , -1.248  ,  0.05273,  0.01758,
          5.47   , -0.9316 ],
        [-2.418  , -5.82   ,  6.594  ,  4.457  ,  8.83   ,  2.398  ,
          4.438  , -1.925  ],
        [-2.514  ,  7.75   ,  0.12305,  1.679  ,  8.65   ,  5.54   ,
         -4.746  , -8.766  ],
        [-2.734  ,  0.334  ,  8.37   ,  2.39   ,  2.021  , -8.25   ,
          4.156  , -7.902  ],
        [-1.872  , -4.29   , -7.734  ,  4.605  ,  1.8545 , -8.79   ,
          5.09   ,  3.453  ]],

       [[-8.42   , -6.96   , -8.05   ,  1.274  , -8.03   , -7.004  ,
         -8.03   ,  4.12   ],
        [-8.71   , -3.533  ,  6.812  ,  8.22   ,  3.234  , -2.434  ,
         -3.78   ,  4.86   ],
        [-5.273  , -3.621  ,  4.543  , -2.926  ,  2.469  ,  2.805  ,
          6.477  ,  3.885  ],
        [ 8.36   , -6.242  , -1.301  ,  8.484  ,  6.504  ,  3.305  ,
          2.531  ,  3.832  ],
        [-3.191  , -6.574  ,  6.23   ,  5.105  ,  4.414  , -3.523  ,
         -4.473  ,  3.066  ],
        [-1.6    ,  3.91   ,  7.285  , -5.934  ,  5.33   ,  5.83   ,
         -1.775  ,  1.195  ]],

       [[-2.338  , -1.107  , -6.875  , -4.234  ,  0.3428 , -6.996  ,
         -4.19   , -0.923  ],
        [ 1.951  , -8.95   ,  2.82   , -4.895  ,  6.426  , -8.35   ,
         -8.98   ,  7.438  ],
        [-3.332  , -7.973  , -1.266  ,  5.316  , -4.58   ,  8.766  ,
         -0.6855 , -3.965  ],
        [ 3.867  , -7.305  , -1.564  , -2.725  ,  3.438  ,  0.2197 ,
          3.814  , -7.49   ],
        [ 2.629  ,  5.66   , -6.145  ,  3.594  ,  1.028  , -1.661  ,
          6.906  , -2.645  ],
        [-7.03   , -4.332  , -8.016  , -7.777  , -4.094  , -9.     ,
         -8.22   , -3.262  ]],

       [[-6.195  ,  4.824  ,  7.066  ,  7.848  , -7.79   ,  4.484  ,
          7.62   , -1.582  ],
        [ 8.805  , -6.734  ,  5.906  , -1.081  , -2.945  ,  8.92   ,
         -8.92   , -6.715  ],
        [ 8.07   ,  5.703  , -7.496  , -1.116  , -8.89   ,  1.468  ,
          5.633  ,  5.23   ],
        [-6.188  ,  2.795  ,  5.38   , -6.117  , -2.11   , -2.18   ,
          8.52   , -1.002  ],
        [ 8.08   ,  4.824  ,  8.914  ,  3.674  ,  5.316  ,  0.826  ,
          7.17   ,  5.098  ],
        [-6.215  ,  8.39   , -6.934  ,  8.305  , -6.074  ,  7.77   ,
         -5.703  ,  4.023  ]]], dtype=float16)}
Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[4,3,8] input_0, float16[4,6,8] input_1, float16[4,6,8] input_2) => (float16[4,3,8] _val_4) 
   <float16 _val_3>
{
   _val_3 = pkg.onnxscript.torch_lib._attention_scale (input_0)
   _val_4 = pkg.onnxscript.torch_lib._aten_scaled_dot_product_attention_no_mask_onnx <dropout_p: float = 0> (input_0, input_1, input_2, _val_3)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_attention_scale (query) => (scale)
{
   tmp = Shape (query)
   int64_m1 = Constant <value: tensor = int64 int64_m1 {-1}> ()
   tmp_subscripted = Gather <axis: int = 0> (tmp, int64_m1)
   embedding_size = CastLike (tmp_subscripted, query)
   const = Constant <value: tensor = float const {1}> ()
   tmp_0 = Sqrt (embedding_size)
   const_cast = CastLike (const, tmp_0)
   scale = Div (const_cast, tmp_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_aten_scaled_dot_product_attention_no_mask_onnx <dropout_p>(query, key, value, scale) => (return_val)
{
   key_shape = Shape (key)
   int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
   int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
   int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
   int64_9223372036854775807_1d = Constant <value: tensor = int64[1] int64_9223372036854775807_1d {9223372036854775807}> ()
   key_last_dim = Slice (key_shape, int64_m1_1d, int64_9223372036854775807_1d, int64_0_1d, int64_1_1d)
   int64_0_1d_0 = Constant <value: tensor = int64[1] int64_0_1d_0 {0}> ()
   int64_1_1d_1 = Constant <value: tensor = int64[1] int64_1_1d_1 {1}> ()
   int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
   int64_m1_1d_2 = Constant <value: tensor = int64[1] int64_m1_1d_2 {-1}> ()
   key_second_last_dim = Slice (key_shape, int64_m2_1d, int64_m1_1d_2, int64_0_1d_0, int64_1_1d_1)
   int64_0_1d_3 = Constant <value: tensor = int64[1] int64_0_1d_3 {0}> ()
   int64_1_1d_4 = Constant <value: tensor = int64[1] int64_1_1d_4 {1}> ()
   int64_m2_1d_5 = Constant <value: tensor = int64[1] int64_m2_1d_5 {-2}> ()
   key_first_dims = Slice (key_shape, int64_0_1d_3, int64_m2_1d_5, int64_0_1d_3, int64_1_1d_4)
   tmp = Constant <value_ints: ints = [-1]> ()
   key_squeezed_shape = Concat <axis: int = 0> (tmp, key_second_last_dim, key_last_dim)
   key_squeezed = Reshape (key, key_squeezed_shape)
   key_squeezed_transposed = Transpose <perm: ints = [0, 2, 1]> (key_squeezed)
   key_transposed_shape = Concat <axis: int = 0> (key_first_dims, key_last_dim, key_second_last_dim)
   key_transposed = Reshape (key_squeezed_transposed, key_transposed_shape)
   tmp_6 = Sqrt (scale)
   query_scaled = Mul (query, tmp_6)
   tmp_7 = Sqrt (scale)
   key_transposed_scaled = Mul (key_transposed, tmp_7)
   tmp_8 = MatMul (query_scaled, key_transposed_scaled)
   attn_weight = Softmax <axis: int = -1> (tmp_8)
   dropout_p = Constant <value_float: float = @dropout_p> ()
   attn_weight_9, _ = Dropout (attn_weight, dropout_p)
   return_val = MatMul (attn_weight_9, value)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[4,4,3,8] input_0, float16[4,4,6,8] input_1, float16[4,4,6,8] input_2) => (float16[4,4,3,8] _val_5) 
   <float16 _val_3, float[3,6] _val_4>
{
   _val_3 = pkg.onnxscript.torch_lib._attention_scale (input_0)
   _val_4 = pkg.onnxscript.torch_lib._causal_attention_mask (input_0, input_1)
   _val_5 = pkg.onnxscript.torch_lib._aten_scaled_dot_product_attention_float_mask_onnx <dropout_p: float = 0> (input_0, input_1, input_2, _val_4, _val_3)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_attention_scale (query) => (scale)
{
   tmp = Shape (query)
   int64_m1 = Constant <value: tensor = int64 int64_m1 {-1}> ()
   tmp_subscripted = Gather <axis: int = 0> (tmp, int64_m1)
   embedding_size = CastLike (tmp_subscripted, query)
   const = Constant <value: tensor = float const {1}> ()
   tmp_0 = Sqrt (embedding_size)
   const_cast = CastLike (const, tmp_0)
   scale = Div (const_cast, tmp_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_causal_attention_mask (query, key) => (attn_mask_10)
{
   tmp = Shape (query)
   int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
   int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
   int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
   int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
   target_length = Slice (tmp, int64_m2_1d, int64_m1_1d, int64_0_1d, int64_1_1d)
   tmp_0 = Shape (key)
   int64_0_1d_1 = Constant <value: tensor = int64[1] int64_0_1d_1 {0}> ()
   int64_1_1d_2 = Constant <value: tensor = int64[1] int64_1_1d_2 {1}> ()
   int64_m2_1d_3 = Constant <value: tensor = int64[1] int64_m2_1d_3 {-2}> ()
   int64_m1_1d_4 = Constant <value: tensor = int64[1] int64_m1_1d_4 {-1}> ()
   source_length = Slice (tmp_0, int64_m2_1d_3, int64_m1_1d_4, int64_0_1d_1, int64_1_1d_2)
   size = Concat <axis: int = 0> (target_length, source_length)
   const = Constant <value: tensor = float const {1}> ()
   attn_mask = Expand (const, size)
   attn_mask_5 = Trilu <upper: int = 0> (attn_mask)
   const_6 = Constant <value: tensor = float const_6 {0}> ()
   const_6_cast = CastLike (const_6, attn_mask_5)
   tmp_7 = Equal (attn_mask_5, const_6_cast)
   tmp_8 = Constant <value_float: float = -inf> ()
   const_9 = Constant <value: tensor = float const_9 {0}> ()
   const_9_cast = CastLike (const_9, tmp_8)
   attn_mask_10 = Where (tmp_7, tmp_8, const_9_cast)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["" : 18]
>
_aten_scaled_dot_product_attention_float_mask_onnx <dropout_p>(query, key, value, attn_mask, scale) => (return_val)
{
   key_shape = Shape (key)
   int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
   int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
   int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
   int64_9223372036854775807_1d = Constant <value: tensor = int64[1] int64_9223372036854775807_1d {9223372036854775807}> ()
   key_last_dim = Slice (key_shape, int64_m1_1d, int64_9223372036854775807_1d, int64_0_1d, int64_1_1d)
   int64_0_1d_0 = Constant <value: tensor = int64[1] int64_0_1d_0 {0}> ()
   int64_1_1d_1 = Constant <value: tensor = int64[1] int64_1_1d_1 {1}> ()
   int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
   int64_m1_1d_2 = Constant <value: tensor = int64[1] int64_m1_1d_2 {-1}> ()
   key_second_last_dim = Slice (key_shape, int64_m2_1d, int64_m1_1d_2, int64_0_1d_0, int64_1_1d_1)
   int64_0_1d_3 = Constant <value: tensor = int64[1] int64_0_1d_3 {0}> ()
   int64_1_1d_4 = Constant <value: tensor = int64[1] int64_1_1d_4 {1}> ()
   int64_m2_1d_5 = Constant <value: tensor = int64[1] int64_m2_1d_5 {-2}> ()
   key_first_dims = Slice (key_shape, int64_0_1d_3, int64_m2_1d_5, int64_0_1d_3, int64_1_1d_4)
   tmp = Constant <value_ints: ints = [-1]> ()
   key_squeezed_shape = Concat <axis: int = 0> (tmp, key_second_last_dim, key_last_dim)
   key_squeezed = Reshape (key, key_squeezed_shape)
   key_squeezed_transposed = Transpose <perm: ints = [0, 2, 1]> (key_squeezed)
   key_transposed_shape = Concat <axis: int = 0> (key_first_dims, key_last_dim, key_second_last_dim)
   key_transposed = Reshape (key_squeezed_transposed, key_transposed_shape)
   tmp_6 = Sqrt (scale)
   query_scaled = Mul (query, tmp_6)
   tmp_7 = Sqrt (scale)
   key_transposed_scaled = Mul (key_transposed, tmp_7)
   tmp_8 = MatMul (query_scaled, key_transposed_scaled)
   tmp_9 = Add (tmp_8, attn_mask)
   attn_weight = Softmax <axis: int = -1> (tmp_9)
   dropout_p = Constant <value_float: float = @dropout_p> ()
   attn_weight_10, _ = Dropout (attn_weight, dropout_p)
   return_val = MatMul (attn_weight_10, value)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
RuntimeError: ONNX Runtime failed to evaluate:
Inputs:
{'input_0': array([[[[ 1.283  , -8.02   , -3.604  ,  4.895  , -0.8877 ,  4.332  ,
           4.43   , -4.836  ],
         [-1.661  ,  7.812  , -6.625  ,  8.734  , -5.31   ,  7.875  ,
          -2.795  ,  3.217  ],
         [ 2.453  ,  2.479  , -8.66   ,  8.42   , -7.348  ,  1.433  ,
          -0.2725 ,  8.69   ]],

        [[ 4.703  , -4.297  ,  8.8    ,  0.02637,  7.33   , -6.46   ,
           8.37   , -5.203  ],
         [-4.867  ,  1.582  ,  2.172  , -3.488  , -6.555  , -1.301  ,
          -3.902  , -1.424  ],
         [-6.777  , -7.418  ,  7.285  , -3.023  , -1.758  ,  4.465  ,
           6.68   , -2.855  ]],

        [[-4.035  ,  2.855  ,  3.129  ,  7.242  ,  5.703  , -3.031  ,
          -5.57   ,  5.4    ],
         [ 8.2    ,  0.7383 ,  2.777  , -7.145  ,  4.516  , -5.633  ,
           6.062  , -6.004  ],
         [ 7.156  ,  8.46   ,  8.67   , -1.591  ,  0.3252 ,  8.875  ,
           4.484  , -5.316  ]],

        [[-2.408  ,  1.477  ,  8.47   ,  7.98   ,  3.006  ,  7.25   ,
          -6.32   ,  4.754  ],
         [-2.84   , -1.371  , -4.29   ,  0.9756 ,  3.604  ,  8.31   ,
          -7.2    ,  1.617  ],
         [ 3.348  , -4.65   ,  3.322  ,  0.4043 ,  7.777  ,  6.496  ,
           4.836  ,  4.633  ]]],


       [[[-4.58   ,  0.1846 , -7.137  , -6.285  , -3.2    ,  8.5    ,
          -2.39   , -6.617  ],
         [ 5.008  ,  8.914  ,  7.016  , -0.7646 ,  1.767  , -8.73   ,
          -5.117  , -7.586  ],
         [-2.268  , -6.777  , -4.43   , -0.4219 ,  5.71   ,  4.21   ,
          -8.92   , -2.629  ]],

        [[-4.57   , -2.11   ,  7.34   ,  4.914  , -5.176  ,  0.967  ,
          -7.664  ,  5.57   ],
         [-0.949  , -1.371  ,  0.8877 , -2.39   ,  7.312  ,  1.67   ,
           6.4    , -4.062  ],
         [ 7.06   ,  0.703  , -4.71   , -1.143  ,  0.7646 ,  1.696  ,
          -8.09   ,  6.875  ]],

        [[ 0.835  ,  3.709  ,  7.82   ,  1.731  ,  0.     , -0.1582 ,
          -5.43   ,  3.312  ],
         [-7.54   , -4.535  ,  5.598  , -6.258  ,  5.203  ,  3.664  ,
          -2.303  , -3.023  ],
         [-4.816  , -8.37   ,  1.23   , -3.895  , -6.707  ,  2.98   ,
           2.7    , -6.855  ]],

        [[ 4.516  ,  2.953  ,  5.047  , -4.08   ,  0.659  ,  7.727  ,
          -7.47   , -8.05   ],
         [-7.4    , -7.32   , -8.44   ,  7.453  , -0.545  ,  4.156  ,
           6.875  ,  0.87   ],
         [ 4.773  , -1.415  ,  1.116  ,  3.965  ,  8.02   , -5.766  ,
          -1.529  , -8.63   ]]],


       [[[ 7.76   ,  3.674  , -4.72   , -8.32   , -1.749  ,  7.03   ,
           2.363  , -3.086  ],
         [-2.848  , -2.338  ,  7.902  , -2.61   ,  2.76   , -0.879  ,
           7.47   , -1.081  ],
         [-1.195  , -0.2812 ,  8.3    ,  1.468  , -6.03   ,  8.89   ,
          -7.312  ,  3.973  ]],

        [[-6.523  ,  2.945  , -5.582  , -0.2197 , -4.395  ,  2.102  ,
           7.305  ,  8.414  ],
         [ 4.234  , -3.217  , -2.715  ,  5.195  , -1.178  ,  4.867  ,
           4.016  ,  0.7734 ],
         [ 5.793  ,  0.826  , -5.035  , -5.246  ,  1.318  ,  4.508  ,
           3.297  ,  0.1846 ]],

        [[-1.248  , -6.258  ,  8.75   , -1.626  , -4.445  ,  1.802  ,
           8.36   , -5.71   ],
         [-5.047  ,  5.492  ,  6.883  , -0.0879 , -3.78   ,  1.564  ,
           1.837  , -4.613  ],
         [-4.93   ,  7.375  ,  1.081  ,  8.72   , -8.016  ,  0.0967 ,
           1.099  ,  4.957  ]],

        [[-6.1    ,  7.91   ,  2.479  , -7.777  ,  3.516  , -1.081  ,
           0.8438 , -4.465  ],
         [-4.008  ,  8.11   , -1.573  ,  0.5977 , -7.973  , -1.204  ,
           0.51   ,  0.2812 ],
         [ 2.785  , -8.57   ,  7.727  ,  4.29   , -8.84   , -2.629  ,
          -7.277  ,  7.82   ]]],


       [[[ 3.945  ,  8.695  , -4.094  ,  5.96   , -5.035  , -6.47   ,
           1.23   ,  0.7295 ],
         [ 6.09   , -5.57   ,  5.188  , -7.117  ,  4.613  , -7.117  ,
           3.533  , -7.883  ],
         [ 7.215  ,  4.184  , -2.328  , -5.457  ,  0.2461 ,  6.953  ,
          -6.04   , -1.705  ]],

        [[-2.734  , -5.836  , -4.008  ,  3.438  , -7.094  ,  5.035  ,
           5.87   , -7.234  ],
         [-8.86   , -6.18   , -4.457  ,  5.     ,  2.848  ,  3.613  ,
           2.785  , -3.023  ],
         [ 8.47   ,  0.712  ,  4.156  ,  4.105  , -5.273  ,  8.3    ,
           6.414  ,  6.047  ]],

        [[ 1.784  ,  5.117  , -0.05273, -5.61   , -2.172  , -8.15   ,
           3.023  ,  7.047  ],
         [-7.18   ,  4.508  ,  5.582  ,  6.953  , -3.86   , -7.55   ,
          -8.81   , -7.656  ],
         [ 8.24   ,  3.85   ,  2.584  , -7.086  , -3.129  ,  4.344  ,
          -6.99   , -8.836  ]],

        [[ 8.664  , -4.15   , -0.659  , -7.707  ,  0.9404 , -5.47   ,
          -3.77   ,  4.234  ],
         [-5.78   ,  7.32   ,  3.629  ,  2.707  , -1.96   , -0.9404 ,
           7.33   ,  1.169  ],
         [ 6.312  ,  2.479  ,  6.83   , -8.37   , -4.78   ,  3.086  ,
          -4.086  ,  2.855  ]]]], dtype=float16),
 'input_1': array([[[[-2.4609e+00,  3.8848e+00, -8.1328e+00,  5.0977e-01,
          -4.5430e+00, -6.7422e+00, -5.3789e+00,  3.9648e+00],
         [ 5.6250e-01, -3.7793e+00,  1.3447e+00,  8.6484e+00,
          -6.6719e+00, -1.7930e+00,  6.8555e-01,  2.7598e+00],
         [-3.1914e+00, -6.8555e-01, -4.0859e+00, -9.4922e-01,
          -1.1777e+00,  2.1719e+00,  6.9336e+00, -1.3799e+00],
         [-3.6484e+00, -5.3711e+00, -8.7891e+00,  8.8281e+00,
          -6.5117e+00,  3.9375e+00, -1.2656e+00, -6.3633e+00],
         [ 5.8887e-01,  5.2734e-02, -1.8281e+00,  1.1953e+00,
           1.4326e+00, -8.2812e+00,  7.8750e+00,  5.7031e+00],
         [ 3.6836e+00,  6.3281e-01,  2.0742e+00, -8.6016e+00,
          -3.5781e+00, -8.5254e-01,  7.0234e+00, -6.7070e+00]],

        [[-7.4609e+00,  1.4502e+00, -3.2344e+00, -1.6084e+00,
          -5.7578e+00,  6.9766e+00, -8.5312e+00, -2.9453e+00],
         [ 3.6738e+00,  6.6367e+00,  4.4453e+00,  2.9883e+00,
           1.1074e+00,  3.5859e+00,  8.1094e+00, -5.7812e+00],
         [ 2.3730e+00, -3.3477e+00,  5.3086e+00, -5.6797e+00,
          -8.3672e+00,  8.1016e+00,  8.0938e+00, -9.6680e-01],
         [ 2.1719e+00, -1.4502e+00,  6.8906e+00, -8.6328e+00,
          -5.6953e+00,  3.5156e-01, -7.5156e+00,  8.3047e+00],
         [-7.1445e+00,  4.3242e+00,  5.9688e+00, -8.6641e+00,
          -5.7656e+00, -2.3555e+00, -7.6797e+00,  9.6680e-01],
         [ 4.2109e+00,  3.8242e+00,  4.0430e-01, -8.7891e-03,
           5.0469e+00, -2.5312e+00,  8.9297e+00,  3.2070e+00]],

        [[-8.4062e+00, -6.3828e+00, -7.1191e-01, -1.9600e+00,
          -6.4062e+00, -7.2266e+00, -8.4688e+00, -6.9434e-01],
         [-5.4492e-01,  7.2852e+00,  2.7500e+00,  1.4062e+00,
          -6.1016e+00,  4.6328e+00, -6.0391e+00,  5.3164e+00],
         [ 2.6641e+00,  4.4141e+00,  5.7031e+00,  2.9609e+00,
           4.8242e+00, -3.3926e+00, -6.5938e+00,  1.4326e+00],
         [ 3.8848e+00, -6.2031e+00, -3.8320e+00, -4.5781e+00,
           6.0195e+00, -5.6094e+00,  8.5156e+00, -1.3623e+00],
         [-1.6875e+00, -8.7891e-02,  7.7969e+00, -3.7090e+00,
           6.5820e+00,  6.8125e+00, -2.9355e+00, -4.6680e+00],
         [-4.7031e+00, -2.6719e+00,  6.3281e-01, -4.5352e+00,
          -5.5820e+00,  5.4297e+00,  7.5234e+00, -6.4141e+00]],

        [[ 3.2344e+00,  8.3906e+00, -2.0117e+00, -1.4062e+00,
           6.0898e+00, -4.1836e+00, -3.3047e+00,  7.4609e+00],
         [-4.0156e+00,  1.6348e+00, -1.5117e+00, -2.2422e+00,
          -5.0977e+00,  5.0000e+00, -5.8203e+00, -7.9297e+00],
         [ 1.6436e+00, -2.4883e+00,  8.9375e+00, -3.0312e+00,
          -5.3164e+00,  5.4922e+00,  2.2070e+00,  5.4297e+00],
         [ 4.8164e+00,  2.9355e+00, -8.2031e+00,  4.6484e+00,
           7.6016e+00, -8.4531e+00,  5.8086e+00, -4.5078e+00],
         [ 1.2129e+00,  6.1250e+00,  1.6172e+00,  1.7930e+00,
          -2.2227e+00,  1.8721e+00, -6.7578e+00, -7.8203e+00],
         [ 5.0273e+00, -1.3184e+00, -1.7842e+00, -8.2344e+00,
           7.3398e+00, -3.2520e-01,  1.8105e+00,  1.1250e+00]]],


       [[[-1.7754e+00,  1.7578e-02, -4.4824e-01, -7.9980e-01,
           6.6172e+00, -7.9453e+00,  3.3750e+00,  5.3867e+00],
         [ 3.2520e-01,  2.9883e+00, -1.4941e-01, -8.7891e-01,
          -3.3398e+00,  8.8594e+00, -2.7344e+00, -1.4502e+00],
         [-8.8281e+00, -6.8828e+00, -5.0078e+00, -5.4492e-01,
          -3.5508e+00, -6.3438e+00,  5.2305e+00,  5.2188e+00],
         [-1.9512e+00, -5.3613e-01, -8.4062e+00,  1.1250e+00,
          -7.2852e+00, -7.7773e+00,  3.8945e+00, -6.5234e+00],
         [-6.2656e+00, -2.4531e+00,  4.6953e+00,  5.8359e+00,
          -6.5742e+00, -8.4688e+00,  2.7949e+00,  7.8125e+00],
         [-8.5547e+00,  7.5156e+00, -7.6641e+00,  6.5742e+00,
           2.7246e+00,  7.1445e+00, -4.2266e+00, -4.0234e+00]],

        [[ 7.5859e+00,  3.4102e+00,  4.8086e+00, -3.5430e+00,
           8.7969e+00,  1.5469e+00,  3.1719e+00, -6.2500e+00],
         [-1.9160e+00, -1.5557e+00, -8.1562e+00, -1.2656e+00,
           1.8633e+00, -1.7227e+00, -7.8047e+00,  4.3945e-02],
         [-1.2305e-01, -8.0469e+00, -4.2031e+00,  3.3398e-01,
          -3.1914e+00, -5.2734e+00, -3.4727e+00, -6.7148e+00],
         [ 3.7344e+00, -8.2422e+00, -4.9219e+00, -8.3438e+00,
          -4.8672e+00,  8.5703e+00,  8.4531e+00, -5.3711e+00],
         [-1.7578e+00, -1.1426e+00, -2.6289e+00, -4.9922e+00,
           2.4961e+00,  2.0918e+00,  5.9414e+00,  5.2578e+00],
         [-3.2695e+00, -3.1016e+00,  4.8945e+00,  3.2617e+00,
          -4.6562e+00,  5.4766e+00,  8.0703e+00,  3.8672e-01]],

        [[ 3.5156e+00,  2.1621e+00,  5.4062e+00, -3.7617e+00,
          -5.0547e+00, -4.7461e+00, -3.3320e+00, -3.5586e+00],
         [ 1.4941e-01,  5.3164e+00, -3.1914e+00, -2.3477e+00,
          -6.4688e+00,  3.6289e+00, -2.6719e+00, -3.4023e+00],
         [-6.9453e+00, -5.3516e+00, -7.6465e-01, -4.6250e+00,
          -4.4824e-01, -3.4375e+00,  5.7031e+00,  8.8438e+00],
         [-4.3945e-02, -8.0000e+00, -8.1738e-01, -3.0859e+00,
           6.2578e+00,  2.0469e+00,  4.7383e+00,  8.7891e-02],
         [-7.1992e+00,  5.9609e+00, -6.8359e+00,  4.9062e+00,
          -2.3477e+00,  1.1074e+00, -7.7188e+00, -7.9727e+00],
         [ 8.0000e+00,  3.3477e+00, -5.3867e+00,  5.8281e+00,
           5.1250e+00, -5.8203e+00,  4.4648e+00, -7.8047e+00]],

        [[ 1.5293e+00,  2.5312e+00, -5.8887e-01,  4.2188e+00,
          -8.8438e+00, -7.2969e+00, -6.1094e+00,  8.2188e+00],
         [-7.4688e+00, -2.0215e-01, -5.0625e+00,  8.1250e+00,
          -6.9434e-01,  3.1016e+00,  4.3750e+00, -7.7695e+00],
         [-2.3477e+00,  7.8750e+00, -8.3496e-01, -8.1875e+00,
           8.2891e+00, -6.9062e+00, -7.2070e+00, -4.0859e+00],
         [ 3.7969e+00, -6.8555e+00, -1.6963e+00,  2.7773e+00,
          -8.7891e-01, -6.4141e+00, -3.3828e+00, -1.4854e+00],
         [ 2.4688e+00, -8.8594e+00, -8.3828e+00,  2.3555e+00,
           2.8477e+00,  6.9688e+00, -6.3281e+00,  3.9551e+00],
         [ 5.2734e-02,  2.7500e+00, -1.8281e+00,  8.9648e-01,
          -1.7930e+00, -7.3125e+00,  6.6094e+00,  5.0352e+00]]],


       [[[-4.3594e+00, -7.5078e+00,  4.7109e+00, -6.8047e+00,
           6.7148e+00,  3.3926e+00,  1.8105e+00, -4.2539e+00],
         [ 5.5195e+00,  8.8047e+00, -5.3516e+00, -2.0312e+00,
           3.3320e+00,  1.2568e+00, -8.7891e+00,  7.2148e+00],
         [-3.1367e+00,  6.3281e-01, -3.4531e+00,  7.2344e+00,
           7.3125e+00,  8.0859e-01, -4.5703e+00,  5.2461e+00],
         [-6.0547e+00, -5.2734e+00, -4.6250e+00,  3.1914e+00,
          -6.8555e-01, -3.4727e+00,  6.5391e+00,  1.7402e+00],
         [-2.6992e+00,  3.6211e+00, -6.0312e+00, -3.2168e+00,
          -5.5371e-01,  8.2266e+00, -5.2383e+00,  6.8750e+00],
         [-1.1074e+00, -7.7…-2.83   , -1.573  , -0.712  ,
E             -6.555  , -5.652  ]],
E   
E           [[ 7.48   , -8.89   , -6.344  ,  4.88   ,  5.414  , -7.6    ,
E             -7.656  , -2.383  ],
E            [-7.418  ,  4.613  ,  1.573  , -5.695  ,  7.06   , -2.188  ,
E              1.468  ,  1.872  ],
E            [ 1.31   , -8.87   ,  0.791  ,  1.046  ,  8.39   , -6.785  ,
E              0.5186 ,  2.328  ]]],
E   
E   
E          [[[ 3.586  , -8.734  ,  0.1758 , -2.584  ,  5.203  , -6.47   ,
E              4.766  , -4.633  ],
E            [ 6.547  ,  4.156  ,  3.55   , -4.19   , -0.5625 , -7.848  ,
E              2.84   , -1.468  ],
E            [ 1.151  ,  8.86   , -4.43   , -8.74   , -3.498  ,  2.637  ,
E             -8.98   ,  2.848  ]],
E   
E           [[-4.535  ,  2.074  ,  4.695  , -8.87   ,  4.684  , -0.2461 ,
E              5.125  ,  7.164  ],
E            [ 3.91   ,  8.72   ,  4.414  , -0.6064 ,  2.012  ,  0.9316 ,
E              6.32   ,  3.41   ],
E            [ 6.32   , -6.695  ,  4.938  ,  0.5625 , -6.68   ,  2.96   ,
E              2.637  , -5.688  ]],
E   
E           [[ 1.529  , -0.545  ,  1.327  , -4.824  ,  2.328  , -6.117  ,
E              8.55   ,  7.164  ],
E            [ 1.424  , -5.16   ,  3.006  ,  3.217  ,  4.08   ,  6.582  ,
E              1.74   ,  4.387  ],
E            [-8.87   ,  7.207  ,  2.594  , -1.38   , -4.516  ,  6.918  ,
E             -0.3604 ,  2.092  ]],
E   
E           [[-3.902  ,  2.637  , -8.95   ,  1.925  , -0.05273,  6.723  ,
E              3.973  ,  3.797  ],
E            [-1.916  ,  7.637  ,  3.383  , -3.568  ,  2.584  ,  1.028  ,
E             -5.45   ,  5.97   ],
E            [-0.2461 ,  6.695  , -3.79   , -4.035  , -2.398  ,  7.707  ,
E             -8.91   ,  1.828  ]]],
E   
E   
E          [[[-7.56   , -1.292  ,  2.574  ,  8.     ,  4.555  , -8.32   ,
E              3.805  ,  5.062  ],
E            [-3.613  ,  8.45   , -1.371  , -1.872  ,  7.016  ,  5.992  ,
E              0.668  ,  0.413  ],
E            [-2.268  , -8.29   ,  7.86   ,  6.33   , -7.17   ,  3.902  ,
E              8.52   ,  5.23   ]],
E   
E           [[-4.42   ,  0.2461 ,  2.664  , -0.2725 , -4.92   ,  4.402  ,
E              3.91   , -3.488  ],
E            [ 4.297  , -2.145  ,  3.754  , -4.094  ,  3.146  , -0.5713 ,
E             -8.21   ,  8.91   ],
E            [ 0.457  ,  6.406  , -1.45   ,  5.168  ,  1.784  , -4.12   ,
E             -7.637  , -8.99   ]],
E   
E           [[ 3.129  , -1.758  ,  4.992  , -1.96   , -0.3691 ,  0.501  ,
E              2.223  ,  1.573  ],
E            [-4.676  , -3.129  ,  0.2988 , -8.35   , -0.712  ,  2.531  ,
E             -5.75   , -3.85   ],
E            [-6.117  , -3.824  ,  6.734  , -2.18   ,  8.77   , -6.336  ,
E             -3.92   , -7.066  ]],
E   
E           [[-0.4043 ,  1.433  ,  4.21   ,  3.99   , -5.71   ,  1.925  ,
E              6.004  ,  2.223  ],
E            [ 6.336  , -7.355  ,  1.67   ,  3.41   ,  1.881  ,  3.287  ,
E              0.747  , -4.844  ],
E            [ 5.42   , -1.591  ,  7.094  , -4.344  ,  0.4043 ,  0.5625 ,
E             -0.545  , -4.836  ]]],
E   
E   
E          [[[ 1.274  , -0.6943 ,  7.004  ,  3.348  ,  0.5713 , -4.465  ,
E              0.8086 ,  7.594  ],
E            [-5.266  ,  4.105  ,  0.8174 ,  5.42   , -7.84   ,  3.805  ,
E              2.988  , -6.25   ],
E            [-3.814  ,  1.8545 ,  4.562  , -0.1406 , -7.61   ,  2.348  ,
E             -3.2    ,  7.566  ]],
E   
E           [[ 8.414  ,  5.29   , -2.434  ,  4.156  , -1.969  , -5.406  ,
E              4.156  ,  3.121  ],
E            [-3.031  ,  1.6875 ,  6.133  ,  2.373  , -8.01   , -1.125  ,
E              4.586  ,  0.4746 ],
E            [ 6.758  , -8.44   ,  8.7    , -2.996  ,  2.383  ,  4.402  ,
E              5.81   ,  3.172  ]],
E   
E           [[-7.727  , -0.7207 ,  1.828  ,  2.504  ,  0.0791 , -2.514  ,
E             -7.375  ,  5.035  ],
E            [ 5.24   ,  4.07   ,  1.169  , -1.67   , -1.898  , -7.508  ,
E             -7.137  ,  8.55   ],
E            [ 4.65   ,  7.25   ,  4.402  ,  5.02   ,  5.266  , -8.77   ,
E             -0.0967 ,  0.589  ]],
E   
E           [[-2.066  , -5.42   ,  4.844  ,  0.949  , -8.36   , -3.93   ,
E             -3.031  ,  2.293  ],
E            [ 3.227  , -2.102  , -1.547  , -1.362  , -6.926  , -2.62   ,
E              3.207  ,  2.504  ],
E            [-5.688  , -3.2    , -1.714  , -1.046  , -5.703  , -3.568  ,
E             -3.27   ,  0.659  ]]]], dtype=float16),
E    'input_1': array([[[[ 7.0312e-02, -7.2852e+00, -1.9160e+00,  6.9531e+00,
E             -1.1602e+00,  5.5273e+00,  2.5391e+00,  8.7734e+00],
E            [ 5.2734e-01, -7.7344e-01,  3.4102e+00, -1.5645e+00,
E             -1.8193e+00,  7.9023e+00,  4.9375e+00, -4.0508e+00],
E            [-3.4375e+00, -3.0410e+00, -6.1523e-02, -8.9375e+00,
E              3.2344e+00,  3.8672e-01, -5.9844e+00,  6.6445e+00],
E            [-2.0117e+00,  3.2871e+00,  5.4297e+00,  2.2852e-01,
E              1.6875e+00,  8.1641e+00, -1.5469e+00,  3.1289e+00],
E            [-8.4609e+00,  5.3613e-01, -5.3008e+00,  6.9688e+00,
E             -3.4453e+00, -2.0469e+00, -4.1641e+00,  3.3320e+00],
E            [ 2.4258e+00, -3.6836e+00,  3.7793e+00,  1.0020e+00,
E              6.0391e+00,  1.0547e+00, -4.3945e+00,  8.7500e+00]],
E   
E           [[-1.1074e+00,  3.6484e+00, -7.5781e+00, -2.0918e+00,
E             -1.0107e+00,  6.0391e+00, -3.7793e-01,  2.9531e+00],
E            [-5.6797e+00,  2.8398e+00, -7.4883e+00, -2.1973e-01,
E              6.9062e+00, -8.4453e+00,  8.7891e-01,  2.8203e+00],
E            [-4.3945e-01, -8.4297e+00,  7.3984e+00,  4.8867e+00,
E              5.2578e+00, -4.9844e+00,  5.0781e+00, -5.0273e+00],
E            [ 2.2676e+00,  8.2031e+00,  6.2930e+00,  3.7344e+00,
E             -4.0430e-01,  8.2188e+00, -7.4453e+00,  8.1719e+00],
E            [-6.5469e+00, -5.9844e+00,  1.4062e-01, -6.4062e+00,
E             -7.2344e+00, -5.5371e-01, -2.9961e+00,  8.0703e+00],
E            [ 6.7930e+00,  1.6963e+00,  1.3008e+00, -5.7109e+00,
E              7.7969e+00, -8.7031e+00,  3.2344e+00, -8.7500e+00]],
E   
E           [[-2.1973e+00, -7.0664e+00, -8.5469e+00,  8.1875e+00,
E             -2.0566e+00,  3.0938e+00,  3.4375e+00, -8.3438e+00],
E            [-4.6680e+00,  6.0391e+00,  4.5781e+00,  8.5625e+00,
E             -7.9805e+00, -6.5469e+00,  6.4062e+00,  1.5293e+00],
E            [-2.4531e+00, -6.6797e-01, -2.2930e+00, -6.4141e+00,
E             -6.1523e-02,  3.9551e+00,  7.3203e+00,  4.5430e+00],
E            [ 1.7578e-01,  6.3281e-01, -1.6084e+00,  3.0762e-01,
E              4.4453e+00,  7.8906e+00,  2.0391e+00,  6.3906e+00],
E            [ 8.6250e+00, -4.6406e+00, -7.1992e+00, -5.2656e+00,
E              8.2656e+00, -8.6250e+00,  7.9102e-01,  8.1562e+00],
E            [ 7.2695e+00,  4.1211e+00, -3.6914e+00,  4.0938e+00,
E              7.4688e+00, -5.0195e+00,  8.8281e+00, -3.7793e+00]],
E   
E           [[ 2.9180e+00, -4.5547e+00, -2.0312e+00,  4.1211e+00,
E             -8.8281e+00,  1.9951e+00,  3.2969e+00,  1.1338e+00],
E            [ 6.0312e+00, -3.5586e+00, -5.3711e+00,  6.9766e+00,
E              6.1523e-01,  3.5430e+00,  4.9297e+00,  5.3281e+00],
E            [ 6.9453e+00,  3.4805e+00, -6.0469e+00,  5.6523e+00,
E              4.4297e+00, -1.0107e+00, -6.5820e+00, -1.1689e+00],
E            [-8.8906e+00,  1.7578e-02,  8.6562e+00,  8.6562e+00,
E             -4.1309e-01,  1.9600e+00, -4.4824e-01, -7.4688e+00],
E            [ 7.8906e+00,  3.3750e+00,  5.2812e+00,  8.0781e+00,
E             -6.8906e+00, -4.2031e+00, -8.8828e+00,  1.7051e+00],
E            [ 3.4805e+00,  3.7793e+00,  2.6797e+00,  2.9258e+00,
E             -2.6367e+00,  3.5938e+00, -8.9844e+00, -2.9355e+00]]],
E   
E   
E          [[[ 4.8438e+00,  3.9648e+00,  5.9336e+00,  7.1875e+00,
E             -2.4434e+00, -2.2500e+00, -1.3447e+00, -4.6562e+00],
E            [-5.3438e+00,  3.4277e+00,  3.2695e+00, -3.4883e+00,
E              4.4824e-01,  3.2969e+00,  3.4629e+00,  8.1016e+00],
E            [-3.5156e-02, -7.7617e+00,  7.4707e-01,  4.0703e+00,
E             -8.6016e+00, -1.5908e+00, -1.4062e-01,  2.0820e+00],
E            [-8.9219e+00, -3.1719e+00, -3.9727e+00,  8.8359e+00,
E              4.6484e+00,  5.2461e+00, -3.3047e+00, -9.8438e-01],
E            [ 6.5117e+00, -3.3828e+00, -3.5938e+00, -2.8750e+00,
E             -1.0195e+00, -7.4453e+00, -1.3711e+00,  4.3945e-02],
E            [-1.5117e+00, -6.3125e+00, -4.1914e+00, -6.1328e+00,
E              3.1211e+00,  6.9609e+00, -5.5391e+00, -6.3711e+00]],
E   
E           [[ 4.6133e+00, -2.4258e+00, -6.5039e+00, -6.1094e+00,
E             -8.5703e+00,  8.8516e+00, -2.2148e+00,  8.5000e+00],
E            [ 5.4570e+00,  6.2227e+00, -5.6875e+00,  6.7070e+00,
E              5.2656e+00, -2.1973e-01,  7.2422e+00,  4.7188e+00],
E            [-2.2070e+00,  1.2568e+00,  5.1855e-01, -1.3799e+00,
E              6.8477e+00,  6.5039e-01, -8.9844e+00,  5.0469e+00],
E            [-2.4336e+00,  3.9297e+00,  8.4141e+00,  6.5039e+00,
E              7.9180e+00, -6.5938e+00,  4.6484e+00,  5.1953e+00],
E            [-7.1172e+00,  5.9922e+00,  4.2539e+00, -6.1172e+00,
E             -6.7656e+00,  3.3926e+00, -7.9023e+00, -2.2676e+00],
E            [ 5.1250e+00,  4.8242e+00,  2.8398e+00,  1.7842e+00,
E              1.5029e+00,  6.4531e+00, -7.1016e+00,  2.1445e+00]],
E   
E           [[ 2.9453e+00,  6.1719e+00,  2.6094e+00, -1.6963e+00,
E             -7.4102e+00, -6.0742e+00, -4.5156e+00, -5.6406e+00],
E            [ 6.2930e+00,  7.6641e+00,  1.7402e+00, -6.0898e+00,
E              1.4414e+00,  2.1172e+00, -1.0986e+00, -3.3672e+00],
E            [ 1.9863e+00, -3.4375e+00,  4.8789e+00, -5.0469e+00,
E             -7.2852e+00,  7.6367e+00,  1.7842e+00,  8.5938e+00],
E            [-4.9844e+00, -1.7139e+00, -6.7500e+00,  4.6406e+00,
E              2.3828e+00, -8.9297e+00, -2.3203e+00, -6.6797e-01],
E            [ 1.7578e+00,  5.0977e-01, -1.2129e+00, -5.5547e+00,
E              5.7227e+00,  1.0371e+00,  4.8516e+00, -5.5000e+00],
E            [ 7.3203e+00, -6.1016e+00,  2.3379e+00,  4.5625e+00,
E              6.1016e+00, -4.7891e+00, -7.8320e+00,  6.0820e+00]],
E   
E           [[-9.5801e-01, -7.9102e-01, -5.8906e+00, -3.8750e+00,
E              4.3672e+00, -3.5156e+00,  9.3164e-01, -6.4609e+00],
E            [-1.7578e+00, -3.4805e+00, -5.2734e-02, -3.4531e+00,
E              1.2832e+00, -6.1328e+00,  3.6387e+00, -7.1641e+00],
E            [ 2.5938e+00,  3.4883e+00,  8.9648e-01, -3.4180e+00,
E              6.6445e+00,  4.4570e+00,  7.8203e+00, -1.3799e+00],
E            [ 1.8281e+00,  8.1875e+00, -5.2461e+00, -5.5547e+00,
E             -1.7490e+00, -9.5801e-01,  9.4922e-01,  7.8047e+00],
E            [-5.9492e+00, -8.5703e+00, -9.5801e-01,  9.1406e-01,
E              3.6836e+00,  3.2695e+00,  2.2852e-01, -5.0098e-01],
E            [-1.9775e+00,  6.7500e+00,  2.3906e+00, -7.0312e-02,
E             -3.3047e+00,  2.9805e+00, -6.3633e+00,  4.2109e+00]]],
E   
E   
E          [[[ 7.3203e+00,  4.7734e+00,  4.8867e+00,  2.3203e+00,
E              4.7539e+00,  5.2305e+00, -7.5508e+00, -6.2305e+00],
E            [-9.0000e+00, -4.3945e-02,  6.6797e+00,  2.2852e+00,
E             -8.4531e+00, -4.7656e+00,  5.9336e+00,  2.8125e+00],
E            [ 7.4258e+00,  1.7578e+00, -7.0586e+00, -7.6719e+00,
E             -3.3477e+00, -3.5078e+00,  4.0508e+00, -7.1016e+00],
E            [-7.2852e+00,  6.9609e+00,  1.7666e+00, -2.6367e-01,
E             -8.7891e-02, -3.3223e+00,  2.0469e+00,  1.2305e+00],
E            [-2.7852e+00,  4.3672e+00,  4.8672e+00,  4.9219e-01,
E              8.1797e+00,  4.4219e+00,  1.2393e+00, -2.6641e+00],
E            [ 5.1250e+00,  6.4609e+00,  4.7266e+00, -3.3828e+00,
E             -8.0781e+00, -7.5586e+00, -7.1172e+00,  6.1523e-01]],
E   
E           [[ 6.2852e+00,  5.8359e+00,  5.0781e+00, -8.2188e+00,
E              6.7773e+00, -1.3271e+00,  1.9951e+00,  6.8555e-01],
E            [-3.6641e+00, -5.9414e+00,  7.3906e+00,  8.6094e+00,
E             -1.1514e+00, -6.8477e+00, -6.6172e+00,  8.8438e+00],
E            [-5.4688e+00,  5.8711e+00,  1.0107e+00,  3.8145e+00,
E              4.6484e+00,  3.8750e+00,  1.5029e+00,  8.9297e+00],
E            [ 1.3184e-01, -5.6250e-01,  6.6016e+00, -1.1162e+00,
E              4.8340e-01,  5.1875e+00,  8.9648e-01, -6.5117e+00],
E            [ 6.1875e+00, -4.4648e+00, -3.5781e+00,  2.4336e+00,
E              7.7695e+00, -3.6289e+00, -2.4180e+00,  2.3555e+00],
E            [ 7.3125e+00, -5.4922e+00,  4.4141e+00,  8.7891e+00,
E             -8.2500e+00, -6.1953e+00,  2.7246e-01, -6.0547e+00]],
E   
E           [[-8.5625e+00,  6.8555e-01,  6.1250e+00, -7.4805e+00,
E              8.3203e+00,  5.4219e+00, -4.6055e+00,  5.0469e+00],
E            [-8.3438e+00, -6.2305e+00,  7.3125e+00,  3.2695e+00,
E              7.0312e-01,  7.6641e+00,  5.6172e+00,  6.1016e+00],
E            [ 6.6250e+00, -2.8828e+00, -1.3184e-01, -2.5742e+00,
E              6.1523e-01, -4.9219e-01, -1.5029e+00, -3.1562e+00],
E            [ 6.6445e+00, -3.2773e+00,  6.1523e-01, -8.6016e+00,
E              4.0430e-01,  5.1875e+00,  3.6992e+00, -7.6992e+00],
E            [-1.0107e+00,  1.1338e+00, -5.0000e+00,  5.6094e+00,
E             -1.3008e+00,  5.5391e+00, -5.1855e-01,  8.7500e+00],
E            [-1.3711e+00,  4.4297e+00, -8.4375e-01, -8.7891e-03,
E             -2.1094e-01, -2.2070e+00, -6.8633e+00, -4.8438e+00]],
E   
E           [[-5.6523e+00,  8.9648e-01, -4.7109e+00,  3.1641e-01,
E              5.7383e+00,  4.8164e+00,  4.5625e+00,  6.5234e+00],
E            [ 2.9180e+00, -5.5195e+00, -6.4062e+00, -8.0703e+00,
E             -1.4502e+00, -6.2402e-01, -4.3516e+00,  7.2422e+00],
E            [ 7.2070e-01, -7.9980e-01, -6.4766e+00,  8.6250e+00,
E             -2.8047e+00,  8.9531e+00,  4.0430e+00, -7.1172e+00],
E            [ 2.4609e+00, -2.5742e+00,  8.3750e+00,  4.0234e+00,
E              7.0312e+00, -5.9844e+00, -7.3047e+00, -7.6211e+00],
E            [-4.9219e+00, -8.0938e+00, -8.7109e+00,  8.8770e-01,
E             -3.3926e+00,  6.3711e+00, -6.1523e+00,  8.2891e+00],
E            [ 7.0938e+00, -2.9355e+00, -3.8047e+00, -7.7695e+00,
E             -4.8438e+00,  2.6367e-01, -1.2393e+00,  8.2109e+00]]],
E   
E   
E          [[[-8.4531e+00,  4.9141e+00,  7.5586e-01, -6.3359e+00,
E              5.2734e-01, -5.5273e+00, -6.0742e+00,  8.3594e+00],
E            [-2.1016e+00, -5.3984e+00, -6.5117e+00, -4.9141e+00,
E             -8.3047e+00,  6.0742e+00, -2.1172e+00,  6.6797e-01],
E            [ 8.3750e+00, -2.5664e+00,  7.9453e+00,  6.9688e+00,
E              4.7383e+00,  3.1641e+00, -8.3281e+00, -4.5352e+00],
E            [ 1.4326e+00,  7.0312e-02,  1.5293e+00, -4.9492e+00,
E              2.6895e+00,  5.1602e+00, -6.8555e+00, -7.9375e+00],
E            [-7.3828e+00, -8.7266e+00, -2.3730e-01,  8.4531e+00,
E             -2.3125e+00, -4.0078e+00, -6.2852e+00,  3.2617e+00],
E            [ 5.7578e+00, -1.7139e+00,  5.2305e+00, -2.9004e-01,
E              4.8340e-01, -2.0566e+00,  2.3906e+00, -4.9297e+00]],
E   
E           [[-3.0508e+00,  4.0000e+00, -4.0430e-01, -3.1211e+00,
E              2.1797e+00, -2.8750e+00, -2.7246e-01, -8.3203e+00],
E            [-4.3164e+00, -8.0625e+00,  2.3906e+00, -2.2930e+00,
E              8.9453e+00, -8.8750e+00, -5.7578e+00,  2.6641e+00],
E            [ 5.8984e+00, -7.2070e+00,  8.6094e+00,  9.8438e-01,
E              5.9688e+00, -1.4062e+00,  2.2148e+00, -5.1328e+00],
E            [ 1.6260e+00, -1.4590e+00, -7.0156e+00, -4.3672e+00,
E              5.8633e+00, -5.8984e+00, -3.5332e+00,  0.0000e+00],
E            [ 4.8516e+00,  4.9141e+00, -8.6133e-01,  3.6836e+00,
E              5.4414e+00,  1.4062e+00, -7.5938e+00, -1.3535e+00],
E            [ 1.1162e+00,  6.3359e+00,  1.3184e+00, -5.1055e+00,
E              6.7344e+00,  4.1328e+00, -4.9375e+00,  6.4062e+00]],
E   
E           [[ 2.5488e-01, -4.8516e+00, -4.1133e+00, -8.3047e+00,
E              3.3398e-01, -6.8984e+00, -4.1133e+00, -3.8242e+00],
E            [ 2.9531e+00,  7.9727e+00,  7.5586e+00,  7.0234e+00,
E              8.3594e+00,  2.4531e+00,  1.0107e+00, -6.6445e+00],
E            [ 8.9219e+00,  8.0391e+00, -6.0625e+00, -6.2656e+00,
E              4.5703e-01,  5.8887e-01, -2.9102e+00, -8.7109e+00],
E            [ 6.9883e+00,  7.1797e+00, -5.6172e+00, -6.8203e+00,
E              9.6680e-01,  8.1738e-01,  3.5938e+00,  1.5645e+00],
E            [ 4.0430e-01, -2.0312e+00, -1.5205e+00, -1.3271e+00,
E              5.3008e+00, -5.7031e+00, -2.6445e+00,  5.1484e+00],
E            [ 7.8125e+00, -7.2344e+00, -5.0977e+00,  8.0703e+00,
E              8.1875e+00, -6.3984e+00,  2.6992e+00,  6.3438e+00]],
E   
E           [[-4.6758e+00, -6.0391e+00,  8.2969e+00, -8.9648e-01,
E              5.6680e+00,  7.3906e+00,  5.7129e-01, -2.0469e+00],
E            [ 3.6992e+00,  4.7383e+00,  8.1094e+00,  5.5195e+00,
E              4.3242e+00,  7.7422e+00,  8.7734e+00, -5.5371e-01],
E            [-6.2227e+00, -7.2949e-01,  7.3477e+00, -3.5078e+00,
E             -8.3516e+00,  6.2656e+00, -7.5586e-01, -8.4922e+00],
E            [-5.6172e+00, -8.8750e+00,  2.0215e+00,  5.4141e+00,
E              5.9414e+00, -7.2578e+00,  8.0391e+00,  3.1016e+00],
E            [-8.7031e+00, -7.5859e+00,  6.6445e+00, -3.7793e-01,
E             -1.9863e+00, -1.1426e+00, -4.8672e+00, -4.3438e+00],
E            [ 8.4609e+00, -2.0469e+00, -1.5205e+00,  3.7090e+00,
E             -5.9492e+00, -2.9102e+00, -7.9102e-01, -9.8438e-01]]]],
E         dtype=float16),
E    'input_2': array([[[[-7.72   ,  3.277  ,  6.04   , -3.076  ,  3.648  , -2.97   ,
E              4.332  ,  4.906  ],
E            [ 2.242  , -0.5713 , -2.98   ,  0.8965 , -7.945  , -5.33   ,
E             -5.934  ,  5.766  ],
E            [-8.64   ,  3.217  , -8.75   , -0.5186 ,  7.832  , -2.11   ,
E              8.58   , -8.88   ],
E            [-0.923  ,  8.37   , -1.819  , -1.758  ,  4.254  ,  1.652  ,
E             -1.345  , -6.414  ],
E            [ 1.6    , -1.415  , -4.19   ,  7.742  ,  0.3164 , -0.1494 ,
E              0.11426,  8.1    ],
E            [-3.428  , -3.78   ,  1.389  ,  4.332  ,  2.883  , -3.613  ,
E              7.453  ,  0.7207 ]],
E   
E           [[ 4.88   ,  5.09   ,  4.914  ,  4.992  ,  5.15   ,  0.1846 ,
E              1.151  , -4.92   ],
E            [ 4.     ,  2.645  , -0.615  , -0.835  ,  2.338  , -1.986  ,
E             -5.54   ,  1.046  ],
E            [-6.848  , -4.016  , -8.67   , -6.855  , -8.83   ,  0.958  ,
E              5.316  , -5.547  ],
E            [ 3.594  , -0.334  ,  5.688  ,  6.523  , -8.94   ,  3.895  ,
E             -6.125  ,  7.79   ],
E            [-6.46   ,  6.695  , -4.516  , -0.993  , -5.836  ,  7.98   ,
E              5.258  , -2.715  ],
E            [-7.086  ,  8.984  , -5.44   ,  8.71   ,  8.93   ,  8.97   ,
E             -2.39   , -0.8965 ]],
E   
E           [[-1.696  ,  6.758  ,  5.45   ,  6.742  ,  8.72   , -2.031  ,
E              1.925  , -3.78   ],
E            [-8.07   , -3.902  , -3.656  ,  6.355  ,  2.504  ,  6.574  ,
E             -8.94   , -1.925  ],
E            [-7.348  , -8.3    , -8.06   ,  2.812  ,  5.08   , -2.98   ,
E             -5.97   ,  7.58   ],
E            [-6.125  , -4.133  ,  6.133  , -6.355  , -7.25   , -1.16   ,
E              3.674  ,  0.334  ],
E            [ 6.426  , -6.312  ,  0.02637,  5.555  ,  7.445  ,  0.668  ,
E             -4.895  , -7.438  ],
E            [-3.34   ,  7.227  ,  6.504  , -6.582  ,  1.327  ,  1.433  ,
E              2.54   ,  3.312  ]],
E   
E           [[-3.55   , -3.402  ,  6.6    ,  2.715  , -0.835  ,  8.59   ,
E              8.016  ,  3.41   ],
E            [ 7.523  ,  5.773  ,  6.707  , -7.4    ,  7.207  ,  2.68   ,
E             -8.375  ,  1.916  ],
E            [-2.69   ,  1.512  ,  5.73   ,  2.383  , -2.629  ,  4.105  ,
E             -6.645  ,  0.167  ],
E            [-3.93   , -0.2637 , -1.846  ,  4.414  , -7.72   ,  3.762  ,
E              1.441  , -1.187  ],
E            [-6.285  , -4.457  , -8.88   , -7.848  ,  7.496  , -2.373  ,
E             -5.15   , -2.479  ],
E            [-2.504  ,  5.914  , -2.021  , -6.695  , -3.357  ,  8.28   ,
E              1.969  , -6.742  ]]],
E   
E   
E          [[[ 2.047  ,  2.945  , -5.414  ,  7.27   , -0.4395 ,  0.2021 ,
E             -6.625  ,  0.826  ],
E            [-7.76   ,  6.09   ,  6.3    , -1.081  , -0.4482 ,  0.703  ,
E             -6.6    , -6.582  ],
E            [ 7.75   ,  8.414  , -4.92   ,  1.819  ,  2.594  ,  0.334  ,
E              8.32   , -6.477  ],
E            [ 1.081  , -1.31   ,  8.51   ,  2.479  , -1.292  , -7.426  ,
E              0.8438 ,  1.67   ],
E            [ 0.6416 , -5.08   , -2.11   ,  4.36   , -4.08   , -4.754  ,
E             -1.547  , -2.664  ],
E            [-7.99   , -4.305  ,  0.8525 ,  2.875  ,  7.98   , -5.43   ,
E              2.742  ,  6.16   ]],
E   
E           [[-2.91   ,  1.292  ,  2.996  , -4.656  , -0.677  , -8.805  ,
E              0.914  ,  5.92   ],
E            [ 5.08   ,  7.04   ,  7.03   , -4.105  ,  5.203  , -0.7383 ,
E              5.78   ,  1.942  ],
E            [-1.494  ,  0.12305, -1.178  ,  5.555  , -8.32   ,  2.82   ,
E             -1.916  ,  2.11   ],
E            [ 0.1494 , -2.258  , -5.02   ,  3.885  , -0.8525 , -6.574  ,
E              3.719  ,  4.176  ],
E            [ 0.2285 , -4.492  ,  7.594  , -4.93   , -1.916  ,  8.625  ,
E             -1.767  , -5.82   ],
E            [ 3.357  , -2.408  , -4.395  ,  3.348  ,  1.608  ,  1.696  ,
E              4.05   , -8.516  ]],
E   
E           [[ 2.012  , -4.71   ,  0.2373 ,  7.305  ,  5.117  , -2.664  ,
E              8.43   ,  1.608  ],
E            [ 3.727  ,  3.287  ,  7.62   , -8.65   ,  4.246  , -0.1846 ,
E             -6.723  , -5.133  ],
E            [-0.87   , -1.485  , -8.27   , -8.6    , -5.14   ,  5.89   ,
E              7.516  ,  4.555  ],
E            [-0.3428 ,  7.418  ,  3.84   , -7.812  ,  1.6    ,  6.223  ,
E              1.503  , -1.02   ],
E            [-1.441  , -2.082  , -3.824  ,  8.15   , -0.6064 , -2.945  ,
E             -4.273  ,  1.046  ],
E            [ 2.46   ,  2.7    , -2.785  ,  2.426  ,  4.65   , -6.414  ,
E              6.875  , -8.55   ]],
E   
E           [[-1.925  ,  3.79   , -8.555  , -2.61   , -8.375  , -8.766  ,
E             -4.887  ,  8.01   ],
E            [-2.293  , -1.222  , -0.993  , -4.203  ,  3.516  , -7.242  ,
E              6.68   ,  5.36   ],
E            [-1.802  ,  8.69   ,  8.77   ,  4.086  ,  5.723  ,  5.22   ,
E             -4.086  ,  6.934  ],
E            [-5.68   ,  6.67   ,  7.86   ,  1.547  ,  0.4482 , -8.31   ,
E              3.375  , -1.679  ],
E            [ 5.055  ,  0.06152,  1.872  , -8.79   , -5.055  , -2.258  ,
E             -2.373  ,  8.51   ],
E            [ 4.008  ,  4.043  , -7.27   ,  0.8174 ,  6.312  ,  8.31   ,
E             -5.168  ,  4.625  ]]],
E   
E   
E          [[[-2.945  ,  8.49   ,  3.312  , -3.85   , -8.09   ,  7.074  ,
E              5.97   ,  8.445  ],
E            [-3.498  ,  8.85   ,  0.334  , -4.984  , -5.81   ,  6.89   ,
E             -7.82   , -5.723  ],
E            [ 2.031  , -8.664  ,  8.46   ,  7.047  , -1.811  , -8.59   ,
E             -8.695  ,  7.305  ],
E            [-8.17   , -0.3516 ,  3.164  ,  4.605  ,  1.274  ,  8.05   ,
E             -5.773  , -8.44   ],
E            [ 1.995  , -2.383  ,  4.562  ,  8.88   , -1.837  ,  7.49   ,
E              4.867  , -2.363  ],
E            [-7.418  ,  4.395  , -3.762  ,  1.354  , -2.092  ,  4.695  ,
E             -3.498  ,  7.18   ]],
E   
E           [[ 3.814  ,  7.117  ,  0.8613 , -3.594  ,  3.262  ,  4.938  ,
E              3.172  , -8.02   ],
E            [-5.027  , -2.363  ,  1.477  , -3.875  , -7.79   ,  7.445  ,
E             -6.977  ,  2.092  ],
E            [-0.3604 , -6.785  ,  8.61   ,  3.824  ,  8.07   , -4.08   ,
E              3.7    ,  2.654  ],
E            [-8.375  ,  2.303  ,  7.32   , -7.06   ,  0.668  , -8.46   ,
E              4.133  ,  1.371  ],
E            [-1.925  , -7.91   , -2.496  ,  7.48   , -7.594  , -4.254  ,
E             -3.508  ,  5.188  ],
E            [ 4.695  , -8.08   ,  1.143  ,  3.129  ,  8.92   ,  8.69   ,
E              6.01   , -2.188  ]],
E   
E           [[ 7.496  ,  7.77   ,  6.996  , -8.99   , -6.96   , -5.766  ,
E              2.197  , -0.6943 ],
E            [ 8.83   ,  7.312  ,  0.9404 ,  2.418  , -7.426  ,  6.363  ,
E             -6.434  , -3.875  ],
E            [-2.021  , -7.47   , -7.93   , -7.016  ,  7.918  , -5.105  ,
E              7.137  , -5.246  ],
E            [ 0.7207 , -2.285  ,  5.773  , -3.453  , -8.44   ,  2.46   ,
E              6.977  ,  3.428  ],
E            [-5.246  , -3.578  , -0.879  ,  8.7    , -1.872  , -8.07   ,
E              4.81   , -3.217  ],
E            [ 2.293  ,  8.086  ,  1.389  , -6.68   ,  4.105  ,  1.767  ,
E              8.19   , -7.65   ]],
E   
E           [[ 0.10547,  2.602  , -1.134  ,  7.32   ,  4.367  ,  5.24   ,
E             -0.2812 ,  6.953  ],
E            [-6.855  ,  2.629  ,  3.172  ,  3.102  ,  7.27   ,  4.57   ,
E             -5.617  , -2.338  ],
E            [ 2.734  , -5.37   ,  2.434  ,  6.582  , -2.725  , -2.338  ,
E             -2.443  , -3.797  ],
E            [ 3.121  ,  4.605  , -1.635  , -8.87   , -0.2812 ,  0.3867 ,
E              2.812  , -3.287  ],
E            [ 2.855  , -7.707  , -5.89   ,  2.066  , -2.83   , -3.621  ,
E             -8.086  ,  4.156  ],
E            [ 6.688  , -5.527  ,  3.91   , -3.684  ,  1.925  ,  8.664  ,
E              7.11   , -1.529  ]]],
E   
E   
E          [[[ 8.47   , -5.125  ,  5.66   , -1.336  , -3.146  ,  0.2373 ,
E             -2.031  , -4.836  ],
E            [-3.488  ,  2.672  ,  6.547  ,  3.55   ,  4.5    , -4.598  ,
E             -5.95   ,  2.102  ],
E            [-0.06152, -4.     ,  7.438  , -2.9    ,  2.953  , -8.78   ,
E              8.6    ,  3.586  ],
E            [ 0.668  ,  8.16   , -7.1    ,  2.875  , -1.652  , -2.207  ,
E              0.1934 ,  6.637  ],
E            [-3.121  , -2.7    , -6.285  , -3.744  , -6.664  ,  3.639  ,
E             -3.332  ,  2.338  ],
E            [ 2.918  , -0.01758,  6.848  ,  8.91   , -5.055  , -4.043  ,
E              5.36   ,  2.162  ]],
E   
E           [[ 0.3076 ,  3.516  , -8.766  , -7.074  , -5.44   ,  5.387  ,
E              6.805  , -6.625  ],
E            [ 6.46   ,  5.71   , -2.047  ,  4.484  , -0.545  ,  2.504  ,
E              3.98   ,  6.688  ],
E            [-1.679  , -6.223  ,  5.176  ,  2.602  , -6.04   ,  2.69   ,
E              3.754  , -5.92   ],
E            [-4.516  , -4.395  ,  7.53   , -4.684  ,  7.973  ,  5.21   ,
E             -8.93   ,  4.36   ],
E            [ 6.363  ,  4.914  , -4.08   ,  7.207  , -5.14   ,  4.844  ,
E              7.68   , -2.047  ],
E            [ 6.477  ,  8.5    ,  0.9404 ,  2.145  ,  2.232  , -8.72   ,
E              0.1934 , -1.582  ]],
E   
E           [[-4.035  ,  7.207  , -8.58   , -4.95   , -3.824  , -6.055  ,
E              8.3    ,  3.137  ],
E            [ 1.063  ,  3.895  , -5.85   ,  5.28   , -5.97   , -7.312  ,
E              3.648  ,  3.305  ],
E            [ 4.062  , -2.363  ,  5.8    ,  1.274  , -4.81   , -1.731  ,
E             -1.187  , -3.27   ],
E            [-5.42   , -6.496  ,  7.883  , -1.529  , -8.03   , -3.559  ,
E              7.72   , -2.04   ],
E            [-8.57   ,  0.11426,  1.169  , -3.84   , -3.77   ,  3.006  ,
E             -3.121  ,  2.504  ],
E            [ 8.2    , -6.25   , -3.684  , -6.4    ,  5.316  , -4.008  ,
E              8.94   ,  1.433  ]],
E   
E           [[-1.696  ,  4.36   , -3.648  , -8.87   ,  3.016  ,  0.677  ,
E              3.031  , -8.375  ],
E            [-6.637  , -6.484  , -8.43   , -1.274  , -8.414  ,  1.6875 ,
E              0.4746 , -0.3955 ],
E            [ 3.322  , -0.4482 ,  1.468  ,  3.586  , -4.543  ,  4.71   ,
E              5.64   , -1.652  ],
E            [ 0.8877 , -1.222  , -2.031  ,  7.094  ,  7.074  , -5.3    ,
E              8.46   , -2.197  ],
E            [-4.81   , -0.87   , -7.953  ,  8.03   ,  6.523  ,  2.549  ,
E             -4.72   ,  1.406  ],
E            [ 5.906  , -8.32   ,  0.3252 ,  3.453  , -5.133  ,  4.74   ,
E             -4.95   , -3.629  ]]]], dtype=float16)}
E   Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[4,4,3,8] input_0, float16[4,4,6,8] input_1, float16[4,4,6,8] input_2, float16[3,6] attn_mask) => (float16[4,4,3,8] _val_5) 
E      <float16 _val_4>
E   {
E      _val_4 = pkg.onnxscript.torch_lib._attention_scale (input_0)
E      _val_5 = pkg.onnxscript.torch_lib._aten_scaled_dot_product_attention_float_mask_onnx <dropout_p: float = 0> (input_0, input_1, input_2, attn_mask, _val_4)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["" : 18]
E   >
E   _attention_scale (query) => (scale)
E   {
E      tmp = Shape (query)
E      int64_m1 = Constant <value: tensor = int64 int64_m1 {-1}> ()
E      tmp_subscripted = Gather <axis: int = 0> (tmp, int64_m1)
E      embedding_size = CastLike (tmp_subscripted, query)
E      const = Constant <value: tensor = float const {1}> ()
E      tmp_0 = Sqrt (embedding_size)
E      const_cast = CastLike (const, tmp_0)
E      scale = Div (const_cast, tmp_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["" : 18]
E   >
E   _aten_scaled_dot_product_attention_float_mask_onnx <dropout_p>(query, key, value, attn_mask, scale) => (return_val)
E   {
E      key_shape = Shape (key)
E      int64_0_1d = Constant <value: tensor = int64[1] int64_0_1d {0}> ()
E      int64_1_1d = Constant <value: tensor = int64[1] int64_1_1d {1}> ()
E      int64_m1_1d = Constant <value: tensor = int64[1] int64_m1_1d {-1}> ()
E      int64_9223372036854775807_1d = Constant <value: tensor = int64[1] int64_9223372036854775807_1d {9223372036854775807}> ()
E      key_last_dim = Slice (key_shape, int64_m1_1d, int64_9223372036854775807_1d, int64_0_1d, int64_1_1d)
E      int64_0_1d_0 = Constant <value: tensor = int64[1] int64_0_1d_0 {0}> ()
E      int64_1_1d_1 = Constant <value: tensor = int64[1] int64_1_1d_1 {1}> ()
E      int64_m2_1d = Constant <value: tensor = int64[1] int64_m2_1d {-2}> ()
E      int64_m1_1d_2 = Constant <value: tensor = int64[1] int64_m1_1d_2 {-1}> ()
E      key_second_last_dim = Slice (key_shape, int64_m2_1d, int64_m1_1d_2, int64_0_1d_0, int64_1_1d_1)
E      int64_0_1d_3 = Constant <value: tensor = int64[1] int64_0_1d_3 {0}> ()
E      int64_1_1d_4 = Constant <value: tensor = int64[1] int64_1_1d_4 {1}> ()
E      int64_m2_1d_5 = Constant <value: tensor = int64[1] int64_m2_1d_5 {-2}> ()
E      key_first_dims = Slice (key_shape, int64_0_1d_3, int64_m2_1d_5, int64_0_1d_3, int64_1_1d_4)
E      tmp = Constant <value_ints: ints = [-1]> ()
E      key_squeezed_shape = Concat <axis: int = 0> (tmp, key_second_last_dim, key_last_dim)
E      key_squeezed = Reshape (key, key_squeezed_shape)
E      key_squeezed_transposed = Transpose <perm: ints = [0, 2, 1]> (key_squeezed)
E      key_transposed_shape = Concat <axis: int = 0> (key_first_dims, key_last_dim, key_second_last_dim)
E      key_transposed = Reshape (key_squeezed_transposed, key_transposed_shape)
E      tmp_6 = Sqrt (scale)
E      query_scaled = Mul (query, tmp_6)
E      tmp_7 = Sqrt (scale)
E      key_transposed_scaled = Mul (key_transposed, tmp_7)
E      tmp_8 = MatMul (query_scaled, key_transposed_scaled)
E      tmp_9 = Add (tmp_8, attn_mask)
E      attn_weight = Softmax <axis: int = -1> (tmp_9)
E      dropout_p = Constant <value_float: float = @dropout_p> ()
E      attn_weight_10, _ = Dropout (attn_weight, dropout_p)
E      return_val = MatMul (attn_weight_10, value)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__native_batch_norm_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: Output 0 mismatch
AssertionError: Output 0 mismatch
AssertionError: Output 0 mismatch
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 10 / 125 (8.0%)
E   Greatest absolute difference: 0.002197265625 at index (2, 1, 2) (up to 1e-05 allowed)
E   Greatest relative difference: 0.01470947265625 at index (1, 0, 0) (up to 0.001 allowed)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:280: in run_test_output_match
    raise AssertionError(f"Output {j} mismatch") from e
E   AssertionError: Output 0 mismatch
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 1 / 3 (33.3%)
E   Greatest absolute difference: 0.000732421875 at index (1, 0) (up to 1e-05 allowed)
E   Greatest relative difference: 0.0014848709106445312 at index (1, 0) (up to 0.001 allowed)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:280: in run_test_output_match
    raise AssertionError(f"Output {j} mismatch") from e
E   AssertionError: Output 0 mismatch
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 2 / 72 (2.8%)
E   Greatest absolute difference: 0.000732421875 at index (0, 0, 0, 2) (up to 1e-05 allowed)
E   Greatest relative difference: 0.0090484619140625 at index (1, 0, 0, 0) (up to 0.001 allowed)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:280: in run_test_output_match
    raise AssertionError(f"Output {j} mismatch") from e
E   AssertionError: Output 0 mismatch

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__softmax_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5] input_0) => (float16[5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = 0, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = Softmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = 0, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = Softmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = 1, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = Softmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = -1, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = Softmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,10,5] input_0) => (float16[5,10,5] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = 2, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = Softmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16[5,0,0] input_0) => (float16[5,0,0] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = -1, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = Softmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float16 input_0) => (float16 _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = 0, dtype: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_softmax <dim>(self) => (result_6)
{
   self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
   self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
      tmp = Constant <value_ints: ints = [0]> ()
      self_0 = Unsqueeze (self, tmp)
   }, else_branch: graph = elseGraph_8 () => ( self_1) {
      self_1 = Identity (self)
   }>
   result = Softmax <axis: int = @dim> (self_2)
   result_3 = Cast <to: int = @dtype> (result)
   result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
      result_4 = Squeeze (result_3)
   }, else_branch: graph = elseGraph_12 () => ( result_5) {
      result_5 = Identity (result_3)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_softmax, node name: aten_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5] input_0) => (float16[5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = 0, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = Softmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_softmax, node name: aten_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = 0, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = Softmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_softmax, node name: aten_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = 1, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = Softmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_softmax, node name: aten_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,5] input_0) => (float16[5,5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = -1, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = Softmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_softmax, node name: aten_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,10,5] input_0) => (float16[5,10,5] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = 2, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = Softmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_softmax, node name: aten_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16[5,0,0] input_0) => (float16[5,0,0] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = -1, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = Softmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_softmax, node name: aten_softmax_0): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (float16 input_0) => (float16 _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_softmax <dim: int = 0, dtype: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_softmax <dim>(self) => (result_6)
E   {
E      self_is_scalar = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      self_2 = If (self_is_scalar) <then_branch: graph = thenGraph_8 () => ( self_0) {
E         tmp = Constant <value_ints: ints = [0]> ()
E         self_0 = Unsqueeze (self, tmp)
E      }, else_branch: graph = elseGraph_8 () => ( self_1) {
E         self_1 = Identity (self)
E      }>
E      result = Softmax <axis: int = @dim> (self_2)
E      result_3 = Cast <to: int = @dtype> (result)
E      result_6 = If (self_is_scalar) <then_branch: graph = thenGraph_12 () => ( result_4) {
E         result_4 = Squeeze (result_3)
E      }, else_branch: graph = elseGraph_12 () => ( result_5) {
E         result_5 = Identity (result_3)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

3 out of 9 runs failed: test_output_match_opinfo__all_dim_cpu_bool (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 1s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 1s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[3,5] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[3,5] input_0) => (bool[1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[3,2,1,2] input_0) => (bool _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (bool[3,2,1,2] input_0) => (bool[3,1] _val_1) {
   _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
}
<
  domain: "pkg.onnxscript.torch_lib",
  opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
>
aten_all_dim <dim>(self) => (result_1)
{
   cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
   result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
      result = Cast <to: int = 9> (self)
   }, else_branch: graph = elseGraph_5 () => ( result_0) {
      self_bool = Cast <to: int = 9> (self)
      self_int = Cast <to: int = 7> (self_bool)
      dim = Constant <value_int: int = @dim> ()
      tmp = Constant <value_ints: ints = [-1]> ()
      dims = Reshape (dim, tmp)
      all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
      result_0 = Cast <to: int = 9> (all_true)
   }>
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[3,5] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[3,5] input_0) => (bool[1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[3,2,1,2] input_0) => (bool _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, 1, 2, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[3,2,1,2] input_0) => (bool[1,2,1,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [0, -1], keepdim: int = 1> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:aten_all_dim, node name: aten_all_dim_0): [ShapeInferenceError] Inference error(s): (op_type:If, node name: n1): [ShapeInferenceError] Inference error(s): (op_type:Constant, node name: n2): [ShapeInferenceError] Attribute 'value_int' expect an integer.
E   (op_type:Reshape, node name: n4): [TypeInferenceError] Input 0 expected to have type but instead is null

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["pkg.onnxscript.torch_lib" : 1, "" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (bool[3,2,1,2] input_0) => (bool[3,1] _val_1) {
E      _val_1 = pkg.onnxscript.torch_lib.aten_all_dim <dim: ints = [1, 3], keepdim: int = 0> (input_0)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib",
E     opset_import: ["pkg.onnxscript.torch_lib.common" : 1,"" : 18]
E   >
E   aten_all_dim <dim>(self) => (result_1)
E   {
E      cond = pkg.onnxscript.torch_lib.common.IsScalar (self)
E      result_1 = If (cond) <then_branch: graph = thenGraph_5 () => ( result) {
E         result = Cast <to: int = 9> (self)
E      }, else_branch: graph = elseGraph_5 () => ( result_0) {
E         self_bool = Cast <to: int = 9> (self)
E         self_int = Cast <to: int = 7> (self_bool)
E         dim = Constant <value_int: int = @dim> ()
E         tmp = Constant <value_ints: ints = [-1]> ()
E         dims = Reshape (dim, tmp)
E         all_true = ReduceMin <keepdims: int = @keepdim> (self_int, dims)
E         result_0 = Cast <to: int = 9> (all_true)
E      }>
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

3 out of 9 runs failed: test_output_match_opinfo__ops_aten_native_group_norm_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
Failed: Unexpected success
Unexpected success

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyEagerCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__nn_functional_scaled_dot_product_attention_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyEagerCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 4s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 3s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 2s]
Raw output
onnxscript.evaluator.EagerModeError: Unable to create onnxruntime InferenceSession for executing .Add op with onnx model
<
   ir_version: 7,
   opset_import: ["" : 14]
>
node_graph (float16[4,3,6] input0, float[3,6] input1) => (float16[4,3,6] output0) {
   output0 = Add (input0, input1)
}
AssertionError: Tensor-likes are not close!

Mismatched elements: 18 / 96 (18.8%)
Greatest absolute difference: 0.0390625 at index (1, 2, 0) (up to 1e-05 allowed)
Greatest relative difference: 0.052001953125 at index (1, 2, 2) (up to 0.001 allowed)
onnxscript.evaluator.EagerModeError: Unable to create onnxruntime InferenceSession for executing .Add op with onnx model
<
   ir_version: 7,
   opset_import: ["" : 14]
>
node_graph (float16[4,4,3,6] input0, float[3,6] input1) => (float16[4,4,3,6] output0) {
   output0 = Add (input0, input1)
}
AssertionError: Tensor-likes are not close!

Mismatched elements: 56 / 384 (14.6%)
Greatest absolute difference: 0.03515625 at index (1, 0, 1, 5) (up to 1e-05 allowed)
Greatest relative difference: 0.11883544921875 at index (2, 0, 0, 0) (up to 0.001 allowed)
onnxscript.evaluator.EagerModeError: Unable to create onnxruntime InferenceSession for executing .Add op with onnx model
<
   ir_version: 7,
   opset_import: ["" : 14]
>
node_graph (float16[4,4,3,6] input0, float[3,6] input1) => (float16[4,4,3,6] output0) {
   output0 = Add (input0, input1)
}
AssertionError: Tensor-likes are not close!

Mismatched elements: 51 / 384 (13.3%)
Greatest absolute difference: 0.102294921875 at index (3, 2, 0, 2) (up to 1e-05 allowed)
Greatest relative difference: 4.1796875 at index (0, 0, 0, 2) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 48 / 384 (12.5%)
Greatest absolute difference: 0.12890625 at index (3, 2, 1, 0) (up to 1e-05 allowed)
Greatest relative difference: 0.1820068359375 at index (0, 2, 0, 3) (up to 0.001 allowed)
onnxscript/evaluator.py:476: in _call_ort
    session = ort.InferenceSession(
.nox/test_torch_nightly/lib/python3.10/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py:419: in __init__
    self._create_inference_session(providers, provider_options, disabled_optimizers)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py:454: in _create_inference_session
    sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
E   onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Type Error: Type parameter (T) of Optype (Add) bound to different types (tensor(float16) and tensor(float) in node ().

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:584: in executor
    return function(*args, **kwargs)
onnxscript/values.py:573: in __call__
    return self.func(*args, **kwargs)
onnxscript/function_libs/torch_lib/ops/nn.py:1689: in aten_scaled_dot_product_attention
    return _aten_scaled_dot_product_attention_float_mask_onnx(
onnxscript/values.py:525: in __call__
    return evaluator.default().eval_function(self, args, kwargs)
onnxscript/evaluator.py:309: in eval_function
    result = function.function(*adapted_args, **adapted_kwargs)
onnxscript/function_libs/torch_lib/ops/nn.py:1913: in _aten_scaled_dot_product_attention_float_mask_onnx
    op.Add(op.MatMul(query_scaled, key_transposed_scaled), attn_mask),
onnxscript/onnx_opset/_impl/opset14.py:82: in Add
    return op(*self._prepare_inputs(schema, A, B))
onnxscript/values.py:303: in __call__
    return evaluator.default().eval(schema, args, kwargs)
onnxscript/evaluator.py:196: in eval
    outputs = self._eval(schema, inputs, attributes, closure)
onnxscript/evaluator.py:510: in _eval
    return _call_ort(schema, inputs, attributes, closure)
onnxscript/evaluator.py:480: in _call_ort
    raise EagerModeError(
E   onnxscript.evaluator.EagerModeError: Unable to create onnxruntime InferenceSession for executing .Add op with onnx model
E   <
E      ir_version: 7,
E      opset_import: ["" : 14]
E   >
E   node_graph (float16[4,3,6] input0, float[3,6] input1) => (float16[4,3,6] output0) {
E      output0 = Add (input0, input1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 18 / 96 (18.8%)
E   Greatest absolute difference: 0.0390625 at index (1, 2, 0) (up to 1e-05 allowed)
E   Greatest relative difference: 0.052001953125 at index (1, 2, 2) (up to 0.001 allowed)
onnxscript/evaluator.py:476: in _call_ort
    session = ort.InferenceSession(
.nox/test_torch_nightly/lib/python3.10/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py:419: in __init__
    self._create_inference_session(providers, provider_options, disabled_optimizers)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py:454: in _create_inference_session
    sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
E   onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Type Error: Type parameter (T) of Optype (Add) bound to different types (tensor(float16) and tensor(float) in node ().

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:584: in executor
    return function(*args, **kwargs)
onnxscript/values.py:573: in __call__
    return self.func(*args, **kwargs)
onnxscript/function_libs/torch_lib/ops/nn.py:1689: in aten_scaled_dot_product_attention
    return _aten_scaled_dot_product_attention_float_mask_onnx(
onnxscript/values.py:525: in __call__
    return evaluator.default().eval_function(self, args, kwargs)
onnxscript/evaluator.py:309: in eval_function
    result = function.function(*adapted_args, **adapted_kwargs)
onnxscript/function_libs/torch_lib/ops/nn.py:1913: in _aten_scaled_dot_product_attention_float_mask_onnx
    op.Add(op.MatMul(query_scaled, key_transposed_scaled), attn_mask),
onnxscript/onnx_opset/_impl/opset14.py:82: in Add
    return op(*self._prepare_inputs(schema, A, B))
onnxscript/values.py:303: in __call__
    return evaluator.default().eval(schema, args, kwargs)
onnxscript/evaluator.py:196: in eval
    outputs = self._eval(schema, inputs, attributes, closure)
onnxscript/evaluator.py:510: in _eval
    return _call_ort(schema, inputs, attributes, closure)
onnxscript/evaluator.py:480: in _call_ort
    raise EagerModeError(
E   onnxscript.evaluator.EagerModeError: Unable to create onnxruntime InferenceSession for executing .Add op with onnx model
E   <
E      ir_version: 7,
E      opset_import: ["" : 14]
E   >
E   node_graph (float16[4,4,3,6] input0, float[3,6] input1) => (float16[4,4,3,6] output0) {
E      output0 = Add (input0, input1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 56 / 384 (14.6%)
E   Greatest absolute difference: 0.03515625 at index (1, 0, 1, 5) (up to 1e-05 allowed)
E   Greatest relative difference: 0.11883544921875 at index (2, 0, 0, 0) (up to 0.001 allowed)
onnxscript/evaluator.py:476: in _call_ort
    session = ort.InferenceSession(
.nox/test_torch_nightly/lib/python3.10/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py:419: in __init__
    self._create_inference_session(providers, provider_options, disabled_optimizers)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py:454: in _create_inference_session
    sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
E   onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Type Error: Type parameter (T) of Optype (Add) bound to different types (tensor(float16) and tensor(float) in node ().

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:584: in executor
    return function(*args, **kwargs)
onnxscript/values.py:573: in __call__
    return self.func(*args, **kwargs)
onnxscript/function_libs/torch_lib/ops/nn.py:1689: in aten_scaled_dot_product_attention
    return _aten_scaled_dot_product_attention_float_mask_onnx(
onnxscript/values.py:525: in __call__
    return evaluator.default().eval_function(self, args, kwargs)
onnxscript/evaluator.py:309: in eval_function
    result = function.function(*adapted_args, **adapted_kwargs)
onnxscript/function_libs/torch_lib/ops/nn.py:1913: in _aten_scaled_dot_product_attention_float_mask_onnx
    op.Add(op.MatMul(query_scaled, key_transposed_scaled), attn_mask),
onnxscript/onnx_opset/_impl/opset14.py:82: in Add
    return op(*self._prepare_inputs(schema, A, B))
onnxscript/values.py:303: in __call__
    return evaluator.default().eval(schema, args, kwargs)
onnxscript/evaluator.py:196: in eval
    outputs = self._eval(schema, inputs, attributes, closure)
onnxscript/evaluator.py:510: in _eval
    return _call_ort(schema, inputs, attributes, closure)
onnxscript/evaluator.py:480: in _call_ort
    raise EagerModeError(
E   onnxscript.evaluator.EagerModeError: Unable to create onnxruntime InferenceSession for executing .Add op with onnx model
E   <
E      ir_version: 7,
E      opset_import: ["" : 14]
E   >
E   node_graph (float16[4,4,3,6] input0, float[3,6] input1) => (float16[4,4,3,6] output0) {
E      output0 = Add (input0, input1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 51 / 384 (13.3%)
E   Greatest absolute difference: 0.102294921875 at index (3, 2, 0, 2) (up to 1e-05 allowed)
E   Greatest relative difference: 4.1796875 at index (0, 0, 0, 2) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 48 / 384 (12.5%)
E   Greatest absolute difference: 0.12890625 at index (3, 2, 1, 0) (up to 1e-05 allowed)
E   Greatest relative difference: 0.1820068359375 at index (0, 2, 0, 3) (up to 0.001 allowed)

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyEagerCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__logaddexp_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyEagerCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: Tensor-likes are not close!

Mismatched elements: 6 / 250 (2.4%)
Greatest absolute difference: 0.00048828125 at index (3, 1, 1) (up to 1e-05 allowed)
Greatest relative difference: 0.0264892578125 at index (0, 4, 3) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 5 / 250 (2.0%)
Greatest absolute difference: 0.00048828125 at index (2, 1, 2) (up to 1e-05 allowed)
Greatest relative difference: 0.0095367431640625 at index (0, 1, 0) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 4 / 500 (0.8%)
Greatest absolute difference: 0.00048828125 at index (1, 1, 0) (up to 1e-05 allowed)
Greatest relative difference: 1.0 at index (2, 1, 0) (up to 0.001 allowed)
AssertionError: Tensor-likes are not close!

Mismatched elements: 23 / 500 (4.6%)
Greatest absolute difference: 0.0009765625 at index (6, 6, 2) (up to 1e-05 allowed)
Greatest relative difference: 0.01522064208984375 at index (1, 2, 2) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 6 / 250 (2.4%)
E   Greatest absolute difference: 0.00048828125 at index (3, 1, 1) (up to 1e-05 allowed)
E   Greatest relative difference: 0.0264892578125 at index (0, 4, 3) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 5 / 250 (2.0%)
E   Greatest absolute difference: 0.00048828125 at index (2, 1, 2) (up to 1e-05 allowed)
E   Greatest relative difference: 0.0095367431640625 at index (0, 1, 0) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 4 / 500 (0.8%)
E   Greatest absolute difference: 0.00048828125 at index (1, 1, 0) (up to 1e-05 allowed)
E   Greatest relative difference: 1.0 at index (2, 1, 0) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 23 / 500 (4.6%)
E   Greatest absolute difference: 0.0009765625 at index (6, 6, 2) (up to 1e-05 allowed)
E   Greatest relative difference: 0.01522064208984375 at index (1, 2, 2) (up to 0.001 allowed)

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyEagerCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__linspace_tensor_overload_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyEagerCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 5s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 11s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 6s]
Raw output
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: The values for attribute 'dtype' do not match: torch.float32 != torch.float16.

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyEagerCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__linspace_tensor_overload_cpu_int32 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyEagerCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 8s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 9s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 5s]
Raw output
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.3333333432674408 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.3333333432674408 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.3333333432674408 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 2 at index (25,)
Greatest relative difference: inf at index (25,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 2 at index (25,)
Greatest relative difference: inf at index (25,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 2 at index (25,)
Greatest relative difference: inf at index (25,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 6 at index (49,)
Greatest relative difference: inf at index (9,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 6 at index (49,)
Greatest relative difference: inf at index (9,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 6 at index (49,)
Greatest relative difference: inf at index (9,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 34 / 50 (68.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (1,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 34 / 50 (68.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (1,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 34 / 50 (68.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: inf at index (1,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 33 / 50 (66.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: 1.0 at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 33 / 50 (66.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: 1.0 at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 33 / 50 (66.0%)
Greatest absolute difference: 3 at index (49,)
Greatest relative difference: 1.0 at index (17,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 1.0 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 1.0 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 1.0 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 37 / 50 (74.0%)
Greatest absolute difference: 4 at index (49,)
Greatest relative difference: 1.0 at index (13,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 37 / 50 (74.0%)
Greatest absolute difference: 4 at index (49,)
Greatest relative difference: 1.0 at index (13,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 37 / 50 (74.0%)
Greatest absolute difference: 4 at index (49,)
Greatest relative difference: 1.0 at index (13,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.019999999552965164 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.019999999552965164 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 1 / 50 (2.0%)
Greatest absolute difference: 1 at index (49,)
Greatest relative difference: 0.019999999552965164 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 7 at index (49,)
Greatest relative difference: inf at index (22,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 7 at index (49,)
Greatest relative difference: inf at index (22,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 7 at index (49,)
Greatest relative difference: inf at index (22,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (37,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (37,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (37,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (33,)
Greatest relative difference: 3.0 at index (33,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (33,)
Greatest relative difference: 3.0 at index (33,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 3 at index (33,)
Greatest relative difference: 3.0 at index (33,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 48 / 50 (96.0%)
Greatest absolute difference: 46 at index (49,)
Greatest relative difference: 0.9200000166893005 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 48 / 50 (96.0%)
Greatest absolute difference: 46 at index (49,)
Greatest relative difference: 0.9200000166893005 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 48 / 50 (96.0%)
Greatest absolute difference: 46 at index (49,)
Greatest relative difference: 0.9200000166893005 at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (46,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (46,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 4 at index (37,)
Greatest relative difference: inf at index (46,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 1 at index (1,)
Greatest relative difference: inf at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 1 at index (1,)
Greatest relative difference: inf at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 1 at index (1,)
Greatest relative difference: inf at index (49,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 46 at index (48,)
Greatest relative difference: 11.5 at index (48,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 46 at index (48,)
Greatest relative difference: 11.5 at index (48,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 49 / 50 (98.0%)
Greatest absolute difference: 46 at index (48,)
Greatest relative difference: 11.5 at index (48,)
AssertionError: Tensor-likes are not equal!

Mismatched elements: 43 / 50 (86.0%)
Greatest absolute difference: 7 at index (49,)
Greatest relative difference: 1.0 at index (7,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.3333333432674408 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.3333333432674408 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.3333333432674408 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 2 at index (25,)
E   Greatest relative difference: inf at index (25,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 2 at index (25,)
E   Greatest relative difference: inf at index (25,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 2 at index (25,)
E   Greatest relative difference: inf at index (25,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 6 at index (49,)
E   Greatest relative difference: inf at index (9,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 6 at index (49,)
E   Greatest relative difference: inf at index (9,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 6 at index (49,)
E   Greatest relative difference: inf at index (9,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 34 / 50 (68.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (1,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 34 / 50 (68.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (1,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 34 / 50 (68.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: inf at index (1,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 33 / 50 (66.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: 1.0 at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 33 / 50 (66.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: 1.0 at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 33 / 50 (66.0%)
E   Greatest absolute difference: 3 at index (49,)
E   Greatest relative difference: 1.0 at index (17,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 1.0 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 1.0 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 1.0 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 37 / 50 (74.0%)
E   Greatest absolute difference: 4 at index (49,)
E   Greatest relative difference: 1.0 at index (13,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 37 / 50 (74.0%)
E   Greatest absolute difference: 4 at index (49,)
E   Greatest relative difference: 1.0 at index (13,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 37 / 50 (74.0%)
E   Greatest absolute difference: 4 at index (49,)
E   Greatest relative difference: 1.0 at index (13,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.019999999552965164 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.019999999552965164 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 1 / 50 (2.0%)
E   Greatest absolute difference: 1 at index (49,)
E   Greatest relative difference: 0.019999999552965164 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 7 at index (49,)
E   Greatest relative difference: inf at index (22,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 7 at index (49,)
E   Greatest relative difference: inf at index (22,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 7 at index (49,)
E   Greatest relative difference: inf at index (22,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (37,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (37,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (37,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (33,)
E   Greatest relative difference: 3.0 at index (33,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (33,)
E   Greatest relative difference: 3.0 at index (33,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 3 at index (33,)
E   Greatest relative difference: 3.0 at index (33,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 48 / 50 (96.0%)
E   Greatest absolute difference: 46 at index (49,)
E   Greatest relative difference: 0.9200000166893005 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 48 / 50 (96.0%)
E   Greatest absolute difference: 46 at index (49,)
E   Greatest relative difference: 0.9200000166893005 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 48 / 50 (96.0%)
E   Greatest absolute difference: 46 at index (49,)
E   Greatest relative difference: 0.9200000166893005 at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (46,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (46,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 4 at index (37,)
E   Greatest relative difference: inf at index (46,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 1 at index (1,)
E   Greatest relative difference: inf at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 1 at index (1,)
E   Greatest relative difference: inf at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 1 at index (1,)
E   Greatest relative difference: inf at index (49,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 46 at index (48,)
E   Greatest relative difference: 11.5 at index (48,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 46 at index (48,)
E   Greatest relative difference: 11.5 at index (48,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 49 / 50 (98.0%)
E   Greatest absolute difference: 46 at index (48,)
E   Greatest relative difference: 11.5 at index (48,)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not equal!
E   
E   Mismatched elements: 43 / 50 (86.0%)
E   Greatest absolute difference: 7 at index (49,)
E   Greatest relative difference: 1.0 at index (7,)

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__linspace_tensor_overload_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 7s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 6s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 8s]
Raw output
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0, int64 input_1) => (float16[0] _val_16) 
   <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_2 = Constant <value: tensor = float {0}> ()
   _val_3 = Cast <to: int = 1> (_val_2)
   _val_4 = Constant <value: tensor = float {1}> ()
   _val_5 = Cast <to: int = 1> (_val_4)
   _val_6 = Cast <to: int = 1> (input_0)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_3, _val_9, _val_5)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_5)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0) => (float16[0] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Cast <to: int = 1> (input_0)
   _val_6 = Constant <value: tensor = int64 {-3}> ()
   _val_7 = Cast <to: int = 1> (_val_6)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_5, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64 input_1) => (float16[0] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Constant <value: tensor = float {-2}> ()
   _val_6 = Cast <to: int = 1> (_val_5)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0, int64 input_1) => (float16[50] _val_16) 
   <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_2 = Constant <value: tensor = float {0}> ()
   _val_3 = Cast <to: int = 1> (_val_2)
   _val_4 = Constant <value: tensor = float {1}> ()
   _val_5 = Cast <to: int = 1> (_val_4)
   _val_6 = Cast <to: int = 1> (input_0)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {50}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_3, _val_9, _val_5)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_5)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0) => (float16[50] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Cast <to: int = 1> (input_0)
   _val_6 = Constant <value: tensor = int64 {-3}> ()
   _val_7 = Cast <to: int = 1> (_val_6)
   _val_8 = Constant <value: tensor = int64 {50}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_5, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64 input_1) => (float16[50] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Constant <value: tensor = float {-2}> ()
   _val_6 = Cast <to: int = 1> (_val_5)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {50}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0, int64 input_1) => (float16[0] _val_16) 
   <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_2 = Constant <value: tensor = float {0}> ()
   _val_3 = Cast <to: int = 1> (_val_2)
   _val_4 = Constant <value: tensor = float {1}> ()
   _val_5 = Cast <to: int = 1> (_val_4)
   _val_6 = Cast <to: int = 1> (input_0)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_3, _val_9, _val_5)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_5)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0) => (float16[0] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Cast <to: int = 1> (input_0)
   _val_6 = Constant <value: tensor = int64 {0}> ()
   _val_7 = Cast <to: int = 1> (_val_6)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_5, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64 input_1) => (float16[0] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Constant <value: tensor = float {-2}> ()
   _val_6 = Cast <to: int = 1> (_val_5)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0, int64 input_1) => (float16[50] _val_16) 
   <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_2 = Constant <value: tensor = float {0}> ()
   _val_3 = Cast <to: int = 1> (_val_2)
   _val_4 = Constant <value: tensor = float {1}> ()
   _val_5 = Cast <to: int = 1> (_val_4)
   _val_6 = Cast <to: int = 1> (input_0)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {50}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_3, _val_9, _val_5)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_5)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0) => (float16[50] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Cast <to: int = 1> (input_0)
   _val_6 = Constant <value: tensor = int64 {0}> ()
   _val_7 = Cast <to: int = 1> (_val_6)
   _val_8 = Constant <value: tensor = int64 {50}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_5, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64 input_1) => (float16[50] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Constant <value: tensor = float {-2}> ()
   _val_6 = Cast <to: int = 1> (_val_5)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {50}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0, int64 input_1) => (float16[0] _val_16) 
   <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_2 = Constant <value: tensor = float {0}> ()
   _val_3 = Cast <to: int = 1> (_val_2)
   _val_4 = Constant <value: tensor = float {1}> ()
   _val_5 = Cast <to: int = 1> (_val_4)
   _val_6 = Cast <to: int = 1> (input_0)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_3, _val_9, _val_5)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_5)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0) => (float16[0] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Cast <to: int = 1> (input_0)
   _val_6 = Constant <value: tensor = int64 {1}> ()
   _val_7 = Cast <to: int = 1> (_val_6)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_5, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64 input_1) => (float16[0] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Constant <value: tensor = float {-2}> ()
   _val_6 = Cast <to: int = 1> (_val_5)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0, int64 input_1) => (float16[50] _val_16) 
   <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_2 = Constant <value: tensor = float {0}> ()
   _val_3 = Cast <to: int = 1> (_val_2)
   _val_4 = Constant <value: tensor = float {1}> ()
   _val_5 = Cast <to: int = 1> (_val_4)
   _val_6 = Cast <to: int = 1> (input_0)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {50}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_3, _val_9, _val_5)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_5)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0) => (float16[50] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Cast <to: int = 1> (input_0)
   _val_6 = Constant <value: tensor = int64 {1}> ()
   _val_7 = Cast <to: int = 1> (_val_6)
   _val_8 = Constant <value: tensor = int64 {50}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_5, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64 input_1) => (float16[50] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Constant <value: tensor = float {-2}> ()
   _val_6 = Cast <to: int = 1> (_val_5)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {50}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0, int64 input_1) => (float16[0] _val_16) 
   <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_2 = Constant <value: tensor = float {0}> ()
   _val_3 = Cast <to: int = 1> (_val_2)
   _val_4 = Constant <value: tensor = float {1}> ()
   _val_5 = Cast <to: int = 1> (_val_4)
   _val_6 = Cast <to: int = 1> (input_0)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_3, _val_9, _val_5)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_5)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (float input_0) => (float16[0] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Cast <to: int = 1> (input_0)
   _val_6 = Constant <value: tensor = int64 {4}> ()
   _val_7 = Cast <to: int = 1> (_val_6)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_5, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
IsScalar (input) => (return_val)
{
   tmp = Shape (input)
   tmp_0 = Size (tmp)
   tmp_1 = Constant <value_int: int = 0> ()
   return_val = Equal (tmp_0, tmp_1)
}
AssertionError: ONNX model is invalid. Model:
<
   ir_version: 8,
   opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
   producer_name: "pytorch",
   producer_version: "2.2.0"
>
main_graph (int64 input_1) => (float16[0] _val_16) 
   <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
{
   _val_1 = Constant <value: tensor = float {0}> ()
   _val_2 = Cast <to: int = 1> (_val_1)
   _val_3 = Constant <value: tensor = float {1}> ()
   _val_4 = Cast <to: int = 1> (_val_3)
   _val_5 = Constant <value: tensor = float {-2}> ()
   _val_6 = Cast <to: int = 1> (_val_5)
   _val_7 = Cast <to: int = 1> (input_1)
   _val_8 = Constant <value: tensor = int64 {0}> ()
   _val_9 = Cast <to: int = 1> (_val_8)
   _val_10 = Range (_val_2, _val_9, _val_4)
   _val_11 = CastLike (_val_6, _val_7)
   _val_12 = Sub (_val_7, _val_11)
   _val_13 = Sub (_val_9, _val_4)
   _val_14 = Div (_val_12, _val_13)
   _val_15 = Mul (_val_10, _val_14)
   _val_16 = Add (_val_15, _val_11)
}
<
  domain: "pkg.onnxscript.torch_lib.common",
  opset_import: ["" : 18]
>
Rank (input) => (return_val)
{
   tmp = Shape (input)
   return_val = Size (tmp)
}
<
  domain:…
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_19): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_0) => (float16[0] _val_16) 
E      <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_1 = Constant <value: tensor = float {0}> ()
E      _val_2 = Cast <to: int = 1> (_val_1)
E      _val_3 = Constant <value: tensor = float {1}> ()
E      _val_4 = Cast <to: int = 1> (_val_3)
E      _val_5 = Cast <to: int = 1> (input_0)
E      _val_6 = Constant <value: tensor = int64 {4}> ()
E      _val_7 = Cast <to: int = 1> (_val_6)
E      _val_8 = Constant <value: tensor = int64 {0}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_2, _val_9, _val_4)
E      _val_11 = CastLike (_val_5, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_4)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_19): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_1) => (float16[0] _val_16) 
E      <float _val_1, float _val_2, float _val_3, float _val_4, int64 _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_1 = Constant <value: tensor = float {0}> ()
E      _val_2 = Cast <to: int = 1> (_val_1)
E      _val_3 = Constant <value: tensor = float {1}> ()
E      _val_4 = Cast <to: int = 1> (_val_3)
E      _val_5 = Constant <value: tensor = int64 {50}> ()
E      _val_6 = Cast <to: int = 1> (_val_5)
E      _val_7 = Cast <to: int = 1> (input_1)
E      _val_8 = Constant <value: tensor = int64 {0}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_2, _val_9, _val_4)
E      _val_11 = CastLike (_val_6, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_4)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_17): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_0, int64 input_1) => (float16[50] _val_16) 
E      <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_2 = Constant <value: tensor = float {0}> ()
E      _val_3 = Cast <to: int = 1> (_val_2)
E      _val_4 = Constant <value: tensor = float {1}> ()
E      _val_5 = Cast <to: int = 1> (_val_4)
E      _val_6 = Cast <to: int = 1> (input_0)
E      _val_7 = Cast <to: int = 1> (input_1)
E      _val_8 = Constant <value: tensor = int64 {50}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_3, _val_9, _val_5)
E      _val_11 = CastLike (_val_6, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_5)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_19): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_0) => (float16[50] _val_16) 
E      <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_1 = Constant <value: tensor = float {0}> ()
E      _val_2 = Cast <to: int = 1> (_val_1)
E      _val_3 = Constant <value: tensor = float {1}> ()
E      _val_4 = Cast <to: int = 1> (_val_3)
E      _val_5 = Cast <to: int = 1> (input_0)
E      _val_6 = Constant <value: tensor = int64 {4}> ()
E      _val_7 = Cast <to: int = 1> (_val_6)
E      _val_8 = Constant <value: tensor = int64 {50}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_2, _val_9, _val_4)
E      _val_11 = CastLike (_val_5, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_4)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_19): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_1) => (float16[50] _val_16) 
E      <float _val_1, float _val_2, float _val_3, float _val_4, int64 _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_1 = Constant <value: tensor = float {0}> ()
E      _val_2 = Cast <to: int = 1> (_val_1)
E      _val_3 = Constant <value: tensor = float {1}> ()
E      _val_4 = Cast <to: int = 1> (_val_3)
E      _val_5 = Constant <value: tensor = int64 {50}> ()
E      _val_6 = Cast <to: int = 1> (_val_5)
E      _val_7 = Cast <to: int = 1> (input_1)
E      _val_8 = Constant <value: tensor = int64 {50}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_2, _val_9, _val_4)
E      _val_11 = CastLike (_val_6, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_4)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_17): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_0, int64 input_1) => (float16[0] _val_16) 
E      <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_2 = Constant <value: tensor = float {0}> ()
E      _val_3 = Cast <to: int = 1> (_val_2)
E      _val_4 = Constant <value: tensor = float {1}> ()
E      _val_5 = Cast <to: int = 1> (_val_4)
E      _val_6 = Cast <to: int = 1> (input_0)
E      _val_7 = Cast <to: int = 1> (input_1)
E      _val_8 = Constant <value: tensor = int64 {0}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_3, _val_9, _val_5)
E      _val_11 = CastLike (_val_6, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_5)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_19): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_0) => (float16[0] _val_16) 
E      <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_1 = Constant <value: tensor = float {0}> ()
E      _val_2 = Cast <to: int = 1> (_val_1)
E      _val_3 = Constant <value: tensor = float {1}> ()
E      _val_4 = Cast <to: int = 1> (_val_3)
E      _val_5 = Cast <to: int = 1> (input_0)
E      _val_6 = Constant <value: tensor = int64 {50}> ()
E      _val_7 = Cast <to: int = 1> (_val_6)
E      _val_8 = Constant <value: tensor = int64 {0}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_2, _val_9, _val_4)
E      _val_11 = CastLike (_val_5, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_4)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_19): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_1) => (float16[0] _val_16) 
E      <float _val_1, float _val_2, float _val_3, float _val_4, int64 _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_1 = Constant <value: tensor = float {0}> ()
E      _val_2 = Cast <to: int = 1> (_val_1)
E      _val_3 = Constant <value: tensor = float {1}> ()
E      _val_4 = Cast <to: int = 1> (_val_3)
E      _val_5 = Constant <value: tensor = int64 {50}> ()
E      _val_6 = Cast <to: int = 1> (_val_5)
E      _val_7 = Cast <to: int = 1> (input_1)
E      _val_8 = Constant <value: tensor = int64 {0}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_2, _val_9, _val_4)
E      _val_11 = CastLike (_val_6, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_4)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_17): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_0, int64 input_1) => (float16[50] _val_16) 
E      <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_2 = Constant <value: tensor = float {0}> ()
E      _val_3 = Cast <to: int = 1> (_val_2)
E      _val_4 = Constant <value: tensor = float {1}> ()
E      _val_5 = Cast <to: int = 1> (_val_4)
E      _val_6 = Cast <to: int = 1> (input_0)
E      _val_7 = Cast <to: int = 1> (input_1)
E      _val_8 = Constant <value: tensor = int64 {50}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_3, _val_9, _val_5)
E      _val_11 = CastLike (_val_6, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_5)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_19): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_0) => (float16[50] _val_16) 
E      <float _val_1, float _val_2, float _val_3, float _val_4, float _val_5, int64 _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_1 = Constant <value: tensor = float {0}> ()
E      _val_2 = Cast <to: int = 1> (_val_1)
E      _val_3 = Constant <value: tensor = float {1}> ()
E      _val_4 = Cast <to: int = 1> (_val_3)
E      _val_5 = Cast <to: int = 1> (input_0)
E      _val_6 = Constant <value: tensor = int64 {50}> ()
E      _val_7 = Cast <to: int = 1> (_val_6)
E      _val_8 = Constant <value: tensor = int64 {50}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_2, _val_9, _val_4)
E      _val_11 = CastLike (_val_5, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_4)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_19): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_1) => (float16[50] _val_16) 
E      <float _val_1, float _val_2, float _val_3, float _val_4, int64 _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_1 = Constant <value: tensor = float {0}> ()
E      _val_2 = Cast <to: int = 1> (_val_1)
E      _val_3 = Constant <value: tensor = float {1}> ()
E      _val_4 = Cast <to: int = 1> (_val_3)
E      _val_5 = Constant <value: tensor = int64 {50}> ()
E      _val_6 = Cast <to: int = 1> (_val_5)
E      _val_7 = Cast <to: int = 1> (input_1)
E      _val_8 = Constant <value: tensor = int64 {50}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_2, _val_9, _val_4)
E      _val_11 = CastLike (_val_6, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_4)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:522: in _capture_graph_and_evaluate_torch_script_evaluator
    onnx.checker.check_model(onnx_model, full_check=True)
.nox/test_torch_nightly/lib/python3.10/site-packages/onnx/checker.py:157: in check_model
    C.check_model(protobuf_string, full_check, skip_opset_compatibility_check)
E   onnx.onnx_cpp2py_export.shape_inference.InferenceError: [ShapeInferenceError] Inference error(s): (op_type:Add, node name: Add_17): [TypeInferenceError] Inferred elem type differs from existing elem type: (1) vs (10)

The above exception was the direct cause of the following exception:
onnxscript/tests/function_libs/torch_lib/ops_test.py:229: in run_test_output_match
    function_output = function_executor(test_name, reference_torch_outputs)(
onnxscript/tests/function_libs/torch_lib/ops_test_common.py:524: in _capture_graph_and_evaluate_torch_script_evaluator
    raise AssertionError(
E   AssertionError: ONNX model is invalid. Model:
E   <
E      ir_version: 8,
E      opset_import: ["" : 18, "pkg.onnxscript.torch_lib.common" : 1],
E      producer_name: "pytorch",
E      producer_version: "2.2.0"
E   >
E   main_graph (int64 input_0, int64 input_1) => (float16[50] _val_16) 
E      <float _val_2, float _val_3, float _val_4, float _val_5, float _val_6, float _val_7, int64 _val_8, float _val_9, float[unk__0] _val_10, float _val_11, float _val_12, float _val_13, float _val_14, float[unk__0] _val_15>
E   {
E      _val_2 = Constant <value: tensor = float {0}> ()
E      _val_3 = Cast <to: int = 1> (_val_2)
E      _val_4 = Constant <value: tensor = float {1}> ()
E      _val_5 = Cast <to: int = 1> (_val_4)
E      _val_6 = Cast <to: int = 1> (input_0)
E      _val_7 = Cast <to: int = 1> (input_1)
E      _val_8 = Constant <value: tensor = int64 {50}> ()
E      _val_9 = Cast <to: int = 1> (_val_8)
E      _val_10 = Range (_val_3, _val_9, _val_5)
E      _val_11 = CastLike (_val_6, _val_7)
E      _val_12 = Sub (_val_7, _val_11)
E      _val_13 = Sub (_val_9, _val_5)
E      _val_14 = Div (_val_12, _val_13)
E      _val_15 = Mul (_val_10, _val_14)
E      _val_16 = Add (_val_15, _val_11)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   Rank (input) => (return_val)
E   {
E      tmp = Shape (input)
E      return_val = Size (tmp)
E   }
E   <
E     domain: "pkg.onnxscript.torch_lib.common",
E     opset_import: ["" : 18]
E   >
E   IsScalar (input) => (return_val)
E   {
E      tmp = Shape (input)
E      tmp_0 = Size (tmp)
E      tmp_1 = Constant <value_int: int = 0> ()
E      return_val = Equal (tmp_0, tmp_1)
E   }

Check warning on line 0 in onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU

See this annotation in the file changed.

@github-actions github-actions / Test Results

All 3 runs failed: test_output_match_opinfo__addmv_cpu_float16 (onnxscript.tests.function_libs.torch_lib.ops_test.TestOutputConsistencyFullGraphCPU)

artifacts/Test Results (py310-torch-nightly-macos-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-ubuntu-latest)/pytest.xml [took 0s]
artifacts/Test Results (py310-torch-nightly-windows-latest)/pytest.xml [took 0s]
Raw output
AssertionError: Tensor-likes are not close!

Mismatched elements: 1 / 5 (20.0%)
Greatest absolute difference: 0.01171875 at index (1,) (up to 1e-05 allowed)
Greatest relative difference: 0.0018596649169921875 at index (1,) (up to 0.001 allowed)
onnxscript/tests/function_libs/torch_lib/ops_test.py:266: in run_test_output_match
    torch.testing.assert_close(
E   AssertionError: Tensor-likes are not close!
E   
E   Mismatched elements: 1 / 5 (20.0%)
E   Greatest absolute difference: 0.01171875 at index (1,) (up to 1e-05 allowed)
E   Greatest relative difference: 0.0018596649169921875 at index (1,) (up to 0.001 allowed)