Skip to content

Commit

Permalink
Upgrade default ORTModule opset from 15 to 17 (#19315)
Browse files Browse the repository at this point in the history
### Description
<!-- Describe your changes. -->

This PR upgrades ORTModule's default opset from 15 to 17. Opset 17 is
the final opset supported by torchscript exporter
(pytorch/pytorch#107829)

### Motivation and Context
<!-- - Why is this change required? What problem does it solve?
- If it fixes an open issue, please link to the issue here. -->

Engineering excellence contribution for ORT Training DRI.

---------

Co-authored-by: Prathik Rao <[email protected]@orttrainingdev8.d32nl1ml4oruzj4qz3bqlggovf.px.internal.cloudapp.net>
  • Loading branch information
prathikr and Prathik Rao authored Feb 14, 2024
1 parent 1508c2e commit 3b03b2e
Show file tree
Hide file tree
Showing 13 changed files with 51 additions and 27 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def _defined_from_envvar(name, default_value, warn=True):
# NOTE: To *change* values in runtime, import onnxruntime.training.ortmodule and
# assign them new values. Importing them directly do not propagate changes.
################################################################################
ONNX_OPSET_VERSION = 15
ONNX_OPSET_VERSION = 17
MINIMUM_RUNTIME_PYTORCH_VERSION_STR = "1.8.1"
ORTMODULE_TORCH_CPP_DIR = os.path.join(os.path.dirname(__file__), "torch_cpp_extensions")
_FALLBACK_INIT_EXCEPTION = None
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -821,3 +821,27 @@ def upsample_bicubic2d(g, input, output_size, align_corners, scale_factors):
operator_s="upsample_bicubic2d",
overload_name_s="vec",
)


@register_symbolic("layer_norm")
@parse_args("v", "is", "v", "v", "f", "none")
def layer_norm(g, input, normalized_shape, weight, bias, eps, cudnn_enable):
# normalized_shape: input shape from an expected input of size
# axis: The first normalization dimension.
# layer_norm normalizes on the last D dimensions,
# where D is the size of normalized_shape
axis = -len(normalized_shape)

res, new_running_mean, new_running_var = g.op(
"LayerNormalization",
input,
weight,
bias,
epsilon_f=eps,
axis_i=axis,
outputs=3, # force all 3 outputs to be exported in training mode
operator_s="layer_norm",
overload_name_s="vec",
)

return res
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ TEST(ComputeOptimizerTests, InsertGatherBeforeSceLoss_Allowed) {
}
};

std::vector<int> opsets{12, 13, 14, 15};
std::vector<int> opsets{12, 13, 14, 15, 17};
for (auto opset : opsets) {
std::unique_ptr<GraphTransformer> transformer =
std::make_unique<InsertGatherBeforeSceLoss>(compatible_eps, std::vector<std::string>{"label"});
Expand Down Expand Up @@ -206,7 +206,7 @@ TEST(ComputeOptimizerTests, InsertGatherBeforeSceLoss_NotAllowed_LabelNameNotMat
}
};

std::vector<int> opsets{12, 13, 14, 15};
std::vector<int> opsets{12, 13, 14, 15, 17};
for (auto opset : opsets) {
std::unique_ptr<GraphTransformer> transformer =
std::make_unique<InsertGatherBeforeSceLoss>(compatible_eps, std::vector<std::string>{"label"});
Expand Down Expand Up @@ -277,7 +277,7 @@ TEST(ComputeOptimizerTests, InsertGatherBeforeSceLoss_NotAllowed_ReduceNone) {
}
};

std::vector<int> opsets{12, 13, 14, 15};
std::vector<int> opsets{12, 13, 14, 15, 17};
for (auto opset : opsets) {
std::unique_ptr<GraphTransformer> transformer =
std::make_unique<InsertGatherBeforeSceLoss>(compatible_eps, std::vector<std::string>{"label"});
Expand Down Expand Up @@ -344,7 +344,7 @@ TEST(ComputeOptimizerTests, InsertGatherBeforeSceLoss_NotAllowed_NoIgnoreIndex)
}
};

std::vector<int> opsets{12, 13, 14, 15};
std::vector<int> opsets{12, 13, 14, 15, 17};
for (auto opset : opsets) {
std::unique_ptr<GraphTransformer> transformer =
std::make_unique<InsertGatherBeforeSceLoss>(compatible_eps, std::vector<std::string>{"label"});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1523,7 +1523,7 @@ TEST_F(GraphTransformationTests, ScaledSumFusionThreeInputs) {
builder.AddNode("Identity", {add2_out}, {graph_out});
};

const std::vector<int> opsets{12, 13, 14, 15};
const std::vector<int> opsets{12, 13, 14, 15, 17};
for (auto& opset_version : opsets) {
std::unique_ptr<GraphTransformer> transformer = std::make_unique<ScaledSumFusion>();
ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, opset_version, *logger_, std::move(transformer),
Expand Down Expand Up @@ -1616,7 +1616,7 @@ TEST_F(GraphTransformationTests, ScaledSumFusionThreeInputs_LastAddNotHaveScaleI
builder.AddNode("Identity", {add2_out}, {graph_out});
};

const std::vector<int> opsets{12, 13, 14, 15};
const std::vector<int> opsets{12, 13, 14, 15, 17};
for (auto& opset_version : opsets) {
std::unique_ptr<GraphTransformer> transformer = std::make_unique<ScaledSumFusion>();
ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, opset_version, *logger_, std::move(transformer),
Expand Down Expand Up @@ -1710,7 +1710,7 @@ TEST_F(GraphTransformationTests, ScaledSumFusionTwoInputs) {
builder.AddNode("Identity", {add1_out}, {graph_output2});
};

const std::vector<int> opsets{12, 13, 14, 15};
const std::vector<int> opsets{12, 13, 14, 15, 17};
for (auto& opset_version : opsets) {
std::unique_ptr<GraphTransformer> transformer = std::make_unique<ScaledSumFusion>();
ASSERT_STATUS_OK(TestGraphTransformer(build_test_case, opset_version, *logger_, std::move(transformer),
Expand Down
20 changes: 10 additions & 10 deletions orttraining/orttraining/test/optimizer/shape_optimizer_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ TEST(ShapeOptimizerTests, Shape15CannotFold) {
return Status::OK();
};

std::vector<int> opset_candidates{15};
std::vector<int> opset_candidates{15, 17};
for (auto opset : opset_candidates) {
auto build_test_case = [&](ModelTestBuilder& builder) {
std::vector<std::variant<int64_t, std::string>> identity_input_shape;
Expand Down Expand Up @@ -145,7 +145,7 @@ TEST(ShapeOptimizerTests, Shape15) {
return Status::OK();
};

std::vector<int> opset_candidates{15};
std::vector<int> opset_candidates{15, 17};
for (auto opset : opset_candidates) {
auto build_test_case = [&](ModelTestBuilder& builder) {
std::vector<std::variant<int64_t, std::string>> identity_input_shape;
Expand Down Expand Up @@ -218,7 +218,7 @@ TEST(ShapeOptimizerTests, Shape15TakesGraphInput) {
return Status::OK();
};

std::vector<int> opset_candidates{15};
std::vector<int> opset_candidates{15, 17};
for (auto opset : opset_candidates) {
auto build_test_case = [&](ModelTestBuilder& builder) {
std::vector<std::variant<int64_t, std::string>> shape_input_shape;
Expand Down Expand Up @@ -289,7 +289,7 @@ TEST(ShapeOptimizerTests, Shape15GeneratesGraphOutput) {
return Status::OK();
};

std::vector<int> opset_candidates{15};
std::vector<int> opset_candidates{15, 17};
for (auto opset : opset_candidates) {
auto build_test_case = [&](ModelTestBuilder& builder) {
std::vector<std::variant<int64_t, std::string>> identity_input_shape;
Expand Down Expand Up @@ -366,7 +366,7 @@ TEST(ShapeOptimizerTests, Slice) {
return Status::OK();
};

std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15};
std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15, 17};
for (auto opset : opset_candidates) {
auto build_test_case = [&](ModelTestBuilder& builder) {
std::vector<std::variant<int64_t, std::string>> shape_input_shape;
Expand Down Expand Up @@ -446,7 +446,7 @@ TEST(ShapeOptimizerTests, SliceGeneratesGraphOutput) {
return Status::OK();
};

std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15};
std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15, 17};
for (auto opset : opset_candidates) {
auto build_test_case = [&](ModelTestBuilder& builder) {
std::vector<std::variant<int64_t, std::string>> shape_input_shape;
Expand Down Expand Up @@ -530,7 +530,7 @@ TEST(ShapeOptimizerTests, Gather) {
return Status::OK();
};

std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15};
std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15, 17};
for (auto opset : opset_candidates) {
auto build_test_case = [&](ModelTestBuilder& builder) {
std::vector<std::variant<int64_t, std::string>> shape_input_shape;
Expand Down Expand Up @@ -639,7 +639,7 @@ TEST(ShapeOptimizerTests, ConcreteDimUsedBySlice) {
return Status::OK();
};

std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15};
std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15, 17};
for (auto opset : opset_candidates) {
auto build_test_case = [&](ModelTestBuilder& builder) {
std::vector<std::variant<int64_t, std::string>> dropout_input_shape;
Expand Down Expand Up @@ -810,7 +810,7 @@ TEST(ShapeOptimizerTests, ConcreteDimUsedByGatherSlice) {
return Status::OK();
};

std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15};
std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15, 17};
for (auto opset : opset_candidates) {
auto build_test_case = [&](ModelTestBuilder& builder) {
std::vector<std::variant<int64_t, std::string>> reshape_input_shape;
Expand Down Expand Up @@ -976,7 +976,7 @@ TEST(ShapeOptimizerTests, SymbolicDimUsedByGather_ConcreteDimUsedByGather) {
return Status::OK();
};

std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15};
std::vector<int> opset_candidates{10, 11, 12, 13, 14, 15, 17};
for (auto opset : opset_candidates) {
auto build_test_case = [&](ModelTestBuilder& builder) {
std::vector<std::variant<int64_t, std::string>> reshape_input_shape;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
from onnxruntime.training.ortmodule._custom_gradient_registry import register_gradient
from onnxruntime.training.ortmodule.options import _SkipCheck

DEFAULT_OPSET = 15
DEFAULT_OPSET = 17


# PyTorch model definitions for tests
Expand Down Expand Up @@ -5280,7 +5280,7 @@ def run_step(model, x):
assert ort_model._torch_module._execution_manager(True)._runtime_options.onnx_opset_version == 13


@pytest.mark.parametrize("opset_version", [12, 13, 14, 15])
@pytest.mark.parametrize("opset_version", [12, 13, 14, 15, 17])
def test_opset_version_change(opset_version):
original_env = None
if "ORTMODULE_ONNX_OPSET_VERSION" in os.environ:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def run_step(model, x):
for onnx_model in [onnx_graph_inf, onnx_graph_train]:
for oimp in onnx_model.opset_import:
if oimp.domain == "":
self.assertEqual(oimp.version, 15)
self.assertEqual(oimp.version, 17) # Needs to match latest default ORTModule opset
if op_grad_type is not None:
if isinstance(op_grad_type, tuple):
text = str(onnx_graph_train)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ stages:
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '2.0.0'
opset_version: '15'
opset_version: '17'
cuda_version: '11.8'
cmake_cuda_architectures: 60;61;70;75;80;86
docker_file: Dockerfile.manylinux2_28_training_cuda11_8
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ stages:
parameters:
build_py_parameters: --enable_training --update --build
torch_version: '2.1.0'
opset_version: '15'
opset_version: '17'
cuda_version: '12.2'
cmake_cuda_architectures: 70;75;80;86;90
docker_file: Dockerfile.manylinux2_28_training_cuda12_2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ CMD ["/bin/bash"]
#Build manylinux2014 docker image end

ARG PYTHON_VERSION=3.8
ARG OPSET_VERSION=15
ARG OPSET_VERSION=17
ARG INSTALL_DEPS_EXTRA_ARGS


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ CMD ["/bin/bash"]
#Build manylinux2014 docker image end
ARG PYTHON_VERSION=3.9
ARG TORCH_VERSION=2.0.0
ARG OPSET_VERSION=15
ARG OPSET_VERSION=17
ARG INSTALL_DEPS_EXTRA_ARGS

#Add our own dependencies
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ CMD ["/bin/bash"]
#Build manylinux2014 docker image end
ARG PYTHON_VERSION=3.9
ARG TORCH_VERSION=2.1.0
ARG OPSET_VERSION=15
ARG OPSET_VERSION=17
ARG INSTALL_DEPS_EXTRA_ARGS

#Add our own dependencies
Expand Down
2 changes: 1 addition & 1 deletion tools/ci_build/github/pai/rocm-ci-pipeline-env.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ RUN pip install \
# Install migraphx
RUN apt update && apt install -y migraphx

ENV ORTMODULE_ONNX_OPSET_VERSION=15
ENV ORTMODULE_ONNX_OPSET_VERSION=17

ARG BUILD_UID=1001
ARG BUILD_USER=onnxruntimedev
Expand Down

0 comments on commit 3b03b2e

Please sign in to comment.