diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 15057ec27b2b4d..8fbc1755e587d5 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -214,6 +214,21 @@ jobs: --bridge-app ./out/linux-x64-bridge-${BUILD_VARIANT}/chip-bridge-app \ " + - name: Run purposeful failure tests using the python parser sending commands to chip-tool + run: | + ./scripts/run_in_build_env.sh \ + "./scripts/tests/run_test_suite.py \ + --runner chip_tool_python \ + --include-tags PURPOSEFUL_FAILURE \ + --chip-tool ./out/linux-x64-chip-tool${CHIP_TOOL_VARIANT}-${BUILD_VARIANT}/chip-tool \ + run \ + --iterations 1 \ + --expected-failures 1 \ + --keep-going \ + --test-timeout-seconds 120 \ + --all-clusters-app ./out/linux-x64-all-clusters-${BUILD_VARIANT}/chip-all-clusters-app \ + " + - name: Run Tests using chip-repl (skip slow) if: github.event_name == 'pull_request' run: | @@ -224,6 +239,7 @@ jobs: --exclude-tags FLAKY \ --exclude-tags IN_DEVELOPMENT \ --exclude-tags SLOW \ + --exclude-tags PURPOSEFUL_FAILURE run \ --iterations 1 \ --test-timeout-seconds 120 \ @@ -336,6 +352,21 @@ jobs: --bridge-app ./out/darwin-x64-bridge-${BUILD_VARIANT}/chip-bridge-app \ " + - name: Run purposeful failure tests using the python parser sending commands to chip-tool + run: | + ./scripts/run_in_build_env.sh \ + "./scripts/tests/run_test_suite.py \ + --runner chip_tool_python \ + --include-tags PURPOSEFUL_FAILURE \ + --chip-tool ./out/darwin-x64-chip-tool${CHIP_TOOL_VARIANT}-${BUILD_VARIANT}/chip-tool \ + run \ + --iterations 1 \ + --expected-failures 1 \ + --keep-going \ + --test-timeout-seconds 120 \ + --all-clusters-app ./out/darwin-x64-all-clusters-${BUILD_VARIANT}/chip-all-clusters-app \ + " + - name: Uploading core files uses: actions/upload-artifact@v3 if: ${{ failure() && !env.ACT }} diff --git a/scripts/tests/chiptest/__init__.py b/scripts/tests/chiptest/__init__.py index ce312d492beabd..bd306862ce810a 100644 --- a/scripts/tests/chiptest/__init__.py +++ b/scripts/tests/chiptest/__init__.py @@ -180,6 +180,13 @@ def _GetChipReplUnsupportedTests() -> Set[str]: } +def _GetPurposefulFailureTests() -> Set[str]: + """Tests that fail in YAML on purpose.""" + return { + "TestPurposefulFailureEqualities.yaml" + } + + def _AllYamlTests(): yaml_test_suite_path = Path(_YAML_TEST_SUITE_PATH) @@ -249,6 +256,7 @@ def _AllFoundYamlTests(treat_repl_unsupported_as_in_development: bool): slow_tests = _GetSlowTests() in_development_tests = _GetInDevelopmentTests() chip_repl_unsupported_tests = _GetChipReplUnsupportedTests() + purposeful_failure_tests = _GetPurposefulFailureTests() for path in _AllYamlTests(): if not _IsValidYamlTest(path.name): @@ -267,6 +275,9 @@ def _AllFoundYamlTests(treat_repl_unsupported_as_in_development: bool): if path.name in in_development_tests: tags.add(TestTag.IN_DEVELOPMENT) + if path.name in purposeful_failure_tests: + tags.add(TestTag.PURPOSEFUL_FAILURE) + if treat_repl_unsupported_as_in_development and path.name in chip_repl_unsupported_tests: tags.add(TestTag.IN_DEVELOPMENT) diff --git a/scripts/tests/chiptest/test_definition.py b/scripts/tests/chiptest/test_definition.py index 08f97c98a2ea4e..8180261bcf7c5e 100644 --- a/scripts/tests/chiptest/test_definition.py +++ b/scripts/tests/chiptest/test_definition.py @@ -218,6 +218,7 @@ class TestTag(Enum): FLAKY = auto() # test is considered flaky (usually a bug/time dependent issue) IN_DEVELOPMENT = auto() # test may not pass or undergoes changes CHIP_TOOL_PYTHON_ONLY = auto() # test uses YAML features only supported by the CHIP_TOOL_PYTHON runner. + PURPOSEFUL_FAILURE = auto() # test fails on purpose def to_s(self): for (k, v) in TestTag.__members__.items(): diff --git a/scripts/tests/run_test_suite.py b/scripts/tests/run_test_suite.py index dc126b4e471314..acb1e98e84a793 100755 --- a/scripts/tests/run_test_suite.py +++ b/scripts/tests/run_test_suite.py @@ -174,6 +174,7 @@ def main(context, dry_run, log_level, target, target_glob, target_skip_glob, TestTag.MANUAL, TestTag.IN_DEVELOPMENT, TestTag.FLAKY, + TestTag.PURPOSEFUL_FAILURE } if runtime != TestRunTime.CHIP_TOOL_PYTHON: @@ -272,9 +273,19 @@ def cmd_list(context): default=None, type=int, help='If provided, fail if a test runs for longer than this time') +@click.option( + '--expected-failures', + type=int, + default=0, + show_default=True, + help='Number of tests that are expected to fail in each iteration. Overall test will pass if the number of failures matches this. Nonzero values require --keep-going') @click.pass_context def cmd_run(context, iterations, all_clusters_app, lock_app, ota_provider_app, ota_requestor_app, - tv_app, bridge_app, chip_repl_yaml_tester, chip_tool_with_python, pics_file, keep_going, test_timeout_seconds): + tv_app, bridge_app, chip_repl_yaml_tester, chip_tool_with_python, pics_file, keep_going, test_timeout_seconds, expected_failures): + if expected_failures != 0 and not keep_going: + logging.exception(f"'--expected-failures {expected_failures}' used without '--keep-going'") + sys.exit(2) + runner = chiptest.runner.Runner() paths_finder = PathsFinder() @@ -326,8 +337,14 @@ def cmd_run(context, iterations, all_clusters_app, lock_app, ota_provider_app, o apps_register = AppsRegister() apps_register.init() + def cleanup(): + apps_register.uninit() + if sys.platform == 'linux': + chiptest.linux.ShutdownNamespaceForTestExecution() + for i in range(iterations): logging.info("Starting iteration %d" % (i+1)) + observed_failures = 0 for test in context.obj.tests: if context.obj.include_tags: if not (test.tags & context.obj.include_tags): @@ -356,13 +373,17 @@ def cmd_run(context, iterations, all_clusters_app, lock_app, ota_provider_app, o test_end = time.monotonic() logging.exception('%-30s - FAILED in %0.2f seconds' % (test.name, (test_end - test_start))) + observed_failures += 1 if not keep_going: - apps_register.uninit() + cleanup() sys.exit(2) - apps_register.uninit() - if sys.platform == 'linux': - chiptest.linux.ShutdownNamespaceForTestExecution() + if observed_failures != expected_failures: + logging.exception(f'Itera {i}: expected failure count {expected_failures}, but got {observed_failures}') + cleanup() + sys.exit(2) + + cleanup() # On linux, allow an execution shell to be prepared diff --git a/src/app/tests/suites/TestPurposefulFailureEqualities.yaml b/src/app/tests/suites/TestPurposefulFailureEqualities.yaml new file mode 100644 index 00000000000000..14e886437d678e --- /dev/null +++ b/src/app/tests/suites/TestPurposefulFailureEqualities.yaml @@ -0,0 +1,44 @@ +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Test that purposefully fails in EqualityCommands + +config: + nodeId: 0x12344321 + cluster: "EqualityCommands" + endpoint: 1 + +tests: + - label: "Wait for the commissioned device to be retrieved" + cluster: "DelayCommands" + command: "WaitForCommissionee" + arguments: + values: + - name: "nodeId" + value: nodeId + + - label: + "Compute the result of comparing 0 to 1 and claim that they are equal" + command: "UnsignedNumberEquals" + arguments: + values: + - name: "Value1" + value: 0 + - name: "Value2" + value: 1 + response: + - values: + - name: "Equals" + # This is the wrong value on purpose, so this test will fail. + value: true