Skip to content

Commit

Permalink
Merge branch 'pytorch:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
Michiel-Olieslagers authored Dec 10, 2024
2 parents 7200ab3 + f6a87ac commit 8e912a5
Show file tree
Hide file tree
Showing 84 changed files with 9,172 additions and 14,161 deletions.
5 changes: 5 additions & 0 deletions .ci/docker/build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,10 @@ case "${IMAGE_NAME}" in
QNN_SDK=yes
CLANG_VERSION=12
;;
executorch-ubuntu-22.04-mediatek-sdk)
MEDIATEK_SDK=yes
CLANG_VERSION=12
;;
executorch-ubuntu-22.04-clang12-android)
LINTRUNNER=""
CLANG_VERSION=12
Expand Down Expand Up @@ -77,6 +81,7 @@ docker build \
--build-arg "BUILD_DOCS=${BUILD_DOCS}" \
--build-arg "ARM_SDK=${ARM_SDK:-}" \
--build-arg "QNN_SDK=${QNN_SDK:-}" \
--build-arg "MEDIATEK_SDK=${MEDIATEK_SDK:-}" \
--build-arg "ANDROID_NDK_VERSION=${ANDROID_NDK_VERSION:-}" \
-f "${OS}"/Dockerfile \
"$@" \
Expand Down
2 changes: 2 additions & 0 deletions .ci/docker/ubuntu/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -85,5 +85,7 @@ RUN if [ -n "${ARM_SDK}" ]; then git config --global user.email "[email protected]

ARG QNN_SDK

ARG MEDIATEK_SDK

USER ci-user
CMD ["bash"]
9 changes: 9 additions & 0 deletions .ci/scripts/test_llama.sh
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,12 @@ else
COREML=OFF
fi

if [[ "${MODE}" =~ .*quantize_kv.* ]]; then
QUANTIZE_KV_CACHE=ON
else
QUANTIZE_KV_CACHE=OFF
fi

echo "COREML option ${COREML}"

if [[ "${MODE}" =~ .*qnn.* ]]; then
Expand Down Expand Up @@ -249,6 +255,9 @@ if [[ "${QNN}" == "ON" ]]; then
EXPORT_ARGS+=" --tokenizer_path tokenizer.model --pt2e_quantize qnn_16a16w --calibration_tasks wikitext --calibration_limit 1 --calibration_seq_length 128 --calibration_data Once "
fi
fi
if [[ "${QUANTIZE_KV_CACHE}" == "ON" ]]; then
EXPORT_ARGS="${EXPORT_ARGS} --quantize_kv_cache"
fi
# Add dynamically linked library location
$PYTHON_EXECUTABLE -m examples.models.llama.export_llama ${EXPORT_ARGS}

Expand Down
1 change: 1 addition & 0 deletions .github/workflows/docker-builds.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ jobs:
- docker-image-name: executorch-ubuntu-22.04-linter
- docker-image-name: executorch-ubuntu-22.04-arm-sdk
- docker-image-name: executorch-ubuntu-22.04-qnn-sdk
- docker-image-name: executorch-ubuntu-22.04-mediatek-sdk
- docker-image-name: executorch-ubuntu-22.04-clang12-android
env:
DOCKER_IMAGE: 308535385114.dkr.ecr.us-east-1.amazonaws.com/executorch/${{ matrix.docker-image-name }}
Expand Down
20 changes: 19 additions & 1 deletion .github/workflows/pull.yml
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ jobs:
strategy:
matrix:
dtype: [fp32]
mode: [portable, xnnpack+custom, xnnpack+custom+qe]
mode: [portable, xnnpack+custom, xnnpack+custom+qe,xnnpack+custom+quantize_kv,xnnpack+quantize_kv]
include:
- dtype: bf16
mode: portable
Expand Down Expand Up @@ -504,3 +504,21 @@ jobs:
# run llama runner in eager mode
PYTHON_EXECUTABLE=python bash .ci/scripts/test_llama_runner_eager.sh
test-mediatek-models-linux:
name: test-mediatek-models-linux
uses: pytorch/test-infra/.github/workflows/linux_job.yml@main
strategy:
fail-fast: false
with:
runner: linux.24xlarge
docker-image: executorch-ubuntu-22.04-mediatek-sdk
submodules: 'true'
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
timeout: 90
script: |
# The generic Linux job chooses to use base env, not the one setup by the image
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
conda activate "${CONDA_ENV}"
# placeholder for mediatek to add more tests
2 changes: 1 addition & 1 deletion .github/workflows/trunk.yml
Original file line number Diff line number Diff line change
Expand Up @@ -225,7 +225,7 @@ jobs:
strategy:
matrix:
dtype: [fp32]
mode: [portable, xnnpack+kv+custom, mps, coreml]
mode: [portable, xnnpack+kv+custom, mps, coreml, xnnpack+custom+quantize_kv]
include:
- dtype: bf16
mode: portable
Expand Down
6 changes: 3 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -742,9 +742,9 @@ if(EXECUTORCH_BUILD_PYBIND)
endif()

if(EXECUTORCH_BUILD_XNNPACK)
# need to explicitly specify XNNPACK here otherwise uses XNNPACK symbols
# from libtorch_cpu
list(APPEND _dep_libs xnnpack_backend XNNPACK)
# need to explicitly specify XNNPACK and microkernels-prod
# here otherwise uses XNNPACK and microkernel-prod symbols from libtorch_cpu
list(APPEND _dep_libs xnnpack_backend XNNPACK microkernels-prod)
endif()

# compile options for pybind
Expand Down
9 changes: 3 additions & 6 deletions backends/arm/arm_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@

import logging
import os
from typing import final, List, Optional
from typing import cast, final, List, Optional

import serializer.tosa_serializer as ts
from executorch.backends.arm.arm_vela import vela_compile
Expand All @@ -32,6 +32,7 @@
from executorch.exir.backend.backend_details import BackendDetails, PreprocessResult
from executorch.exir.backend.compile_spec_schema import CompileSpec
from torch.export.exported_program import ExportedProgram
from torch.fx import Node

# TOSA backend debug functionality
logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -269,6 +270,7 @@ def preprocess( # noqa: C901
node_visitors = get_node_visitors(edge_program, tosa_spec)
input_count = 0
for node in graph_module.graph.nodes:
node = cast(Node, node)
if node.op == "call_function":
process_call_function(node, tosa_graph, node_visitors, tosa_spec)
elif node.op == "placeholder":
Expand All @@ -288,9 +290,6 @@ def preprocess( # noqa: C901
"The rank of the input order is not equal to amount of input tensors"
)

# TODO: It would be awesome if this dump could somehow be done on top level and not here.
# Problem is that the desc.json has to be created on the tosa_graph object, which we can't
# access from top level.
if artifact_path:
tag = _get_first_delegation_tag(graph_module)
dbg_tosa_dump(
Expand All @@ -311,6 +310,4 @@ def preprocess( # noqa: C901
else:
raise RuntimeError(f"Unknown format {output_format}")

# Continueing from above. Can I put tosa_graph into this function?
# debug_handle_map = ...
return PreprocessResult(processed_bytes=binary)
12 changes: 4 additions & 8 deletions backends/arm/test/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,19 +74,15 @@ def get_tosa_compile_spec_unbuilt(
the compile spec before calling .build() to finalize it.
"""
if not custom_path:
intermediate_path = maybe_get_tosa_collate_path() or tempfile.mkdtemp(
prefix="arm_tosa_"
)
else:
intermediate_path = custom_path
custom_path = maybe_get_tosa_collate_path()

if not os.path.exists(intermediate_path):
os.makedirs(intermediate_path, exist_ok=True)
if custom_path is not None:
os.makedirs(custom_path, exist_ok=True)
compile_spec_builder = (
ArmCompileSpecBuilder()
.tosa_compile_spec(tosa_version)
.set_permute_memory_format(permute_memory_to_nhwc)
.dump_intermediate_artifacts_to(intermediate_path)
.dump_intermediate_artifacts_to(custom_path)
)

return compile_spec_builder
Expand Down
4 changes: 3 additions & 1 deletion backends/arm/test/misc/test_debug_feats.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,9 @@ def test_numerical_diff_prints(self):
model,
example_inputs=model.get_inputs(),
compile_spec=common.get_tosa_compile_spec(
"TOSA-0.80.0+MI", permute_memory_to_nhwc=True
"TOSA-0.80.0+MI",
permute_memory_to_nhwc=True,
custom_path=tempfile.mkdtemp("diff_print_test"),
),
)
.export()
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/test/ops/test_cat.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def test_cat_tosa_MI(self, operands: tuple[torch.Tensor, ...], dim: int):
def test_cat_4d_tosa_MI(self):
square = torch.ones((2, 2, 2, 2))
for dim in range(-3, 3):
test_data = ((square, square), dim)
test_data = ((square, square.clone()), dim)
self._test_cat_tosa_MI_pipeline(self.Cat(), test_data)

@parameterized.expand(Cat.test_parameters)
Expand Down
13 changes: 6 additions & 7 deletions backends/arm/test/ops/test_depthwise_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,11 +156,14 @@
("two_dw_conv2d", two_dw_conv2d),
]

testsuite_conv2d_u85_xfails = [
testsuite_conv2d_u85 = [
("2x2_1x6x4x4_gp6_st1", dw_conv2d_2x2_1x6x4x4_gp6_st1),
("3x3_1x3x256x256_gp3_st1", dw_conv2d_3x3_1x3x256x256_gp3_st1),
("3x3_1x4x256x256_gp4_st1", dw_conv2d_3x3_1x4x256x256_gp4_st1),
("3x3_1x4x256x256_gp4_nobias", dw_conv2d_3x3_1x4x256x256_gp4_nobias),
]

testsuite_conv2d_u85_xfails = [
("3x3_2x8x198x198_gp8_st3", dw_conv2d_3x3_2x8x198x198_gp8_st3),
("two_dw_conv2d", two_dw_conv2d),
]
Expand Down Expand Up @@ -284,7 +287,7 @@ def test_dw_conv1d_u55_BI(
model.get_inputs(),
)

@parameterized.expand(testsuite_conv1d[2:])
@parameterized.expand(testsuite_conv1d + testsuite_conv2d_u85)
def test_dw_conv_u85_BI(
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = False
):
Expand All @@ -296,12 +299,8 @@ def test_dw_conv_u85_BI(
model.get_inputs(),
)

testsuite_conv2d_u85_xfails.remove(
("3x3_1x3x256x256_gp3_st1", dw_conv2d_3x3_1x3x256x256_gp3_st1)
) # Works

# All test cases except 3x3_1x3x256x256_gp3_st1 have numerical issues on FVP. MLETORCH-520
@parameterized.expand(testsuite_conv2d_u85_xfails + testsuite_conv1d[:2])
@parameterized.expand(testsuite_conv2d_u85_xfails)
@conftest.expectedFailureOnFVP
def test_dw_conv_u85_BI_xfails(
self, test_name: str, model: torch.nn.Module, set_quantize_io: bool = False
Expand Down
30 changes: 28 additions & 2 deletions backends/arm/test/ops/test_div.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,8 +183,21 @@ def test_div_tosa_BI(
test_data = (input_, other_)
self._test_div_tosa_BI_pipeline(self.Div(), test_data)

@parameterized.expand(test_data_suite[:2])
def test_div_u55_BI(
self,
test_name: str,
input_: Union[torch.Tensor, torch.types.Number],
other_: Union[torch.Tensor, torch.types.Number],
rounding_mode: Optional[str] = None,
):
test_data = (input_, other_)
self._test_div_ethos_BI_pipeline(
self.Div(), common.get_u55_compile_spec(), test_data
)

# Numerical issues on FVP likely due to mul op, MLETORCH-521
@parameterized.expand(test_data_suite)
@parameterized.expand(test_data_suite[2:])
@conftest.expectedFailureOnFVP
def test_div_u55_BI_xfails(
self,
Expand All @@ -198,8 +211,21 @@ def test_div_u55_BI_xfails(
self.Div(), common.get_u55_compile_spec(), test_data
)

@parameterized.expand(test_data_suite[:2])
def test_div_u85_BI(
self,
test_name: str,
input_: Union[torch.Tensor, torch.types.Number],
other_: Union[torch.Tensor, torch.types.Number],
rounding_mode: Optional[str] = None,
):
test_data = (input_, other_)
self._test_div_ethos_BI_pipeline(
self.Div(), common.get_u85_compile_spec(), test_data
)

# Numerical issues on FVP likely due to mul op, MLETORCH-521
@parameterized.expand(test_data_suite)
@parameterized.expand(test_data_suite[2:])
@conftest.expectedFailureOnFVP
def test_div_u85_BI_xfails(
self,
Expand Down
7 changes: 1 addition & 6 deletions backends/arm/test/ops/test_mul.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,9 +152,7 @@ def test_mul_tosa_BI(
test_data = (input_, other_)
self._test_mul_tosa_BI_pipeline(self.Mul(), test_data)

# Numerical issues on FVP, MLETORCH-521
@parameterized.expand(test_data_sute)
@conftest.expectedFailureOnFVP
def test_mul_u55_BI(
self,
test_name: str,
Expand All @@ -166,10 +164,7 @@ def test_mul_u55_BI(
common.get_u55_compile_spec(), self.Mul(), test_data
)

# Numerical issues on FVP, MLETORCH-521
# test_data_sute[0] works on U85
@parameterized.expand(test_data_sute[1:])
@conftest.expectedFailureOnFVP
@parameterized.expand(test_data_sute)
def test_mul_u85_BI(
self,
test_name: str,
Expand Down
2 changes: 1 addition & 1 deletion backends/arm/test/ops/test_scalars.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def _test_add_tosa_BI_pipeline(self, module: torch.nn.Module, test_data: tuple):
def test_MI(self, test_name: str, op: torch.nn.Module, x, y):
expected_exception = None
if any(token in test_name for token in ("Sub_int", "Sub__int")):
expected_exception = RuntimeError
expected_exception = ValueError
elif test_name.endswith("_st"):
expected_exception = AttributeError

Expand Down
2 changes: 0 additions & 2 deletions backends/arm/test/ops/test_select.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,6 @@ def _test_select_tosa_BI_pipeline(
.check(["torch.ops.quantized_decomposed"])
.to_edge()
.partition()
.dump_artifact()
.dump_operator_distribution()
.check_count({"torch.ops.higher_order.executorch_call_delegate": 1})
.to_executorch()
.run_method_and_compare_outputs(inputs=test_data)
Expand Down
Loading

0 comments on commit 8e912a5

Please sign in to comment.