diff --git a/.version_information b/.version_information index a4437f83..b1d30c47 100644 --- a/.version_information +++ b/.version_information @@ -1 +1 @@ -v2.11-beta1+fall2024 +v2.11-beta2+fall2024 diff --git a/alembic/versions/e2c185af1226_pics_v2_support.py b/alembic/versions/e2c185af1226_pics_v2_support.py new file mode 100644 index 00000000..ae318519 --- /dev/null +++ b/alembic/versions/e2c185af1226_pics_v2_support.py @@ -0,0 +1,70 @@ +"""pics_v2_support + +Revision ID: e2c185af1226 +Revises: 9df8004ad9bb +Create Date: 2024-06-19 11:46:15.158526 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "e2c185af1226" +down_revision = "9df8004ad9bb" +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column( + "testrunexecution", + sa.Column("certification_mode", sa.Boolean(), nullable=True, default=False), + ) + op.add_column( + "testsuiteexecution", + sa.Column("mandatory", sa.Boolean(), nullable=True, default=False), + ) + + op.add_column( + "testsuitemetadata", + sa.Column("mandatory", sa.Boolean(), nullable=True, default=False), + ) + op.add_column( + "testcasemetadata", + sa.Column("mandatory", sa.Boolean(), nullable=True, default=False), + ) + + op.execute("UPDATE testrunexecution SET certification_mode = false") + op.execute("UPDATE testsuiteexecution SET mandatory = false") + op.execute("UPDATE testsuitemetadata SET mandatory = false") + op.execute("UPDATE testcasemetadata SET mandatory = false") + + op.alter_column( + "testrunexecution", + "certification_mode", + existing_type=sa.Boolean(), + nullable=False, + ) + op.alter_column( + "testsuiteexecution", "mandatory", existing_type=sa.Boolean(), nullable=False + ) + op.alter_column( + "testsuitemetadata", + sa.Column("mandatory", nullable=False, default=False), + ) + op.alter_column( + "testcasemetadata", + sa.Column("mandatory", nullable=False, default=False), + ) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column("testrunexecution", "certification_mode") + op.drop_column("testsuiteexecution", "mandatory") + op.drop_column("testsuitemetadata", "mandatory") + op.drop_column("testcasemetadata", "mandatory") + # ### end Alembic commands ### diff --git a/app/api/api_v1/endpoints/test_run_executions.py b/app/api/api_v1/endpoints/test_run_executions.py index 08544105..a67a8e98 100644 --- a/app/api/api_v1/endpoints/test_run_executions.py +++ b/app/api/api_v1/endpoints/test_run_executions.py @@ -78,11 +78,13 @@ def create_test_run_execution( db: Session = Depends(get_db), test_run_execution_in: schemas.TestRunExecutionCreate, selected_tests: schemas.TestSelection, + certification_mode: bool = False, ) -> TestRunExecution: """Create a new test run execution.""" # TODO: Remove test_run_config completely from the project test_run_execution_in.test_run_config_id = None + test_run_execution_in.certification_mode = certification_mode test_run_execution = crud.test_run_execution.create( db=db, obj_in=test_run_execution_in, selected_tests=selected_tests @@ -260,6 +262,7 @@ def repeat_test_run_execution( test_run_execution_in.description = execution_to_repeat.description test_run_execution_in.project_id = execution_to_repeat.project_id test_run_execution_in.operator_id = execution_to_repeat.operator_id + test_run_execution_in.certification_mode = execution_to_repeat.certification_mode # TODO: Remove test_run_config completely from the project test_run_execution_in.test_run_config_id = None diff --git a/app/crud/crud_test_run_execution.py b/app/crud/crud_test_run_execution.py index 1616d149..939f2bf2 100644 --- a/app/crud/crud_test_run_execution.py +++ b/app/crud/crud_test_run_execution.py @@ -164,6 +164,25 @@ def __load_stats( return result + def __sort_selected_tests( + self, selected_tests: List[TestSuiteExecution] + ) -> List[TestSuiteExecution]: + """Sorts the selected tests, make the mandatories test cases the first to be + returned.""" + sorted_selected_tests = [] + + # First add the mandatories test cases + for suite in selected_tests: + if suite.mandatory: + sorted_selected_tests.append(suite) + + # Add the remaining test cases + for suite in selected_tests: + if not suite.mandatory: + sorted_selected_tests.append(suite) + + return sorted_selected_tests + def create( self, db: Session, @@ -196,7 +215,9 @@ def create( ) ) - test_run_execution.test_suite_executions.extend(test_suites) + # Sorting test_suite according to mandatories suites + test_suites_sorted = self.__sort_selected_tests(test_suites) + test_run_execution.test_suite_executions.extend(test_suites_sorted) db.commit() db.refresh(test_run_execution) diff --git a/app/models/test_case_metadata.py b/app/models/test_case_metadata.py index 3d29ae86..0c759a53 100644 --- a/app/models/test_case_metadata.py +++ b/app/models/test_case_metadata.py @@ -35,6 +35,7 @@ class TestCaseMetadata(Base): description: Mapped[str] = mapped_column(Text, nullable=False) version: Mapped[str] = mapped_column(nullable=False) source_hash: Mapped[str] = mapped_column(VARCHAR(64), nullable=False, index=True) + mandatory: Mapped[bool] = mapped_column(default=False, nullable=False) created_at: Mapped[datetime] = mapped_column(default=datetime.now, nullable=False) diff --git a/app/models/test_enums.py b/app/models/test_enums.py index ceeba799..ce23ef58 100644 --- a/app/models/test_enums.py +++ b/app/models/test_enums.py @@ -24,5 +24,5 @@ class TestStateEnum(str, Enum): PASSED = "passed" # Test Passed with no issued FAILED = "failed" # Test Failed ERROR = "error" # Test Error due to tool setup or environment - NOT_APPLICABLE = "not_applicable" # TODO: Do we need this for full cert runs? + NOT_APPLICABLE = "not_applicable" # Test is not applicable - e.g. PICS mismatch CANCELLED = "cancelled" diff --git a/app/models/test_run_execution.py b/app/models/test_run_execution.py index 5118e531..08694eec 100644 --- a/app/models/test_run_execution.py +++ b/app/models/test_run_execution.py @@ -50,6 +50,7 @@ class TestRunExecution(Base): completed_at: Mapped[Optional[datetime]] archived_at: Mapped[Optional[datetime]] imported_at: Mapped[Optional[datetime]] + certification_mode: Mapped[bool] = mapped_column(default=False, nullable=False) description: Mapped[Optional[str]] = mapped_column(default=None, nullable=True) diff --git a/app/models/test_suite_execution.py b/app/models/test_suite_execution.py index 6ec16dbb..6028f07d 100644 --- a/app/models/test_suite_execution.py +++ b/app/models/test_suite_execution.py @@ -38,6 +38,7 @@ class TestSuiteExecution(Base): public_id: Mapped[str] = mapped_column(nullable=False) execution_index: Mapped[int] = mapped_column(nullable=False) collection_id: Mapped[str] = mapped_column(nullable=False) + mandatory: Mapped[bool] = mapped_column(default=False, nullable=False) state: Mapped[TestStateEnum] = mapped_column( Enum(TestStateEnum), nullable=False, default=TestStateEnum.PENDING diff --git a/app/models/test_suite_metadata.py b/app/models/test_suite_metadata.py index 441a1568..4fc5118e 100644 --- a/app/models/test_suite_metadata.py +++ b/app/models/test_suite_metadata.py @@ -35,6 +35,7 @@ class TestSuiteMetadata(Base): description: Mapped[str] = mapped_column(Text, nullable=False) version: Mapped[str] = mapped_column(nullable=False) source_hash: Mapped[str] = mapped_column(VARCHAR(64), nullable=False, index=True) + mandatory: Mapped[bool] = mapped_column(default=False, nullable=False) created_at: Mapped[datetime] = mapped_column(default=datetime.now) diff --git a/app/pics_applicable_test_cases.py b/app/pics_applicable_test_cases.py index 2f9b106e..b5f81909 100644 --- a/app/pics_applicable_test_cases.py +++ b/app/pics_applicable_test_cases.py @@ -13,9 +13,12 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import Dict + from loguru import logger from app.schemas.pics import PICS, PICSApplicableTestCases +from app.test_engine.models.test_declarations import TestCollectionDeclaration from app.test_engine.test_script_manager import test_script_manager @@ -29,7 +32,7 @@ def applicable_test_cases_list(pics: PICS) -> PICSApplicableTestCases: PICSApplicableTestCases: List of test cases that are applicable for this Project """ - applicable_tests: set = set() + applicable_tests: list = [] if len(pics.clusters) == 0: # If the user has not uploaded any PICS @@ -40,15 +43,37 @@ def applicable_test_cases_list(pics: PICS) -> PICSApplicableTestCases: test_collections = test_script_manager.test_collections enabled_pics = set([item.number for item in pics.all_enabled_items()]) - for test_collection in test_collections.values(): - for test_suite in test_collection.test_suites.values(): - for test_case in test_suite.test_cases.values(): - if len(test_case.pics) == 0: - # Test cases without pics required are always applicable - applicable_tests.add(test_case.metadata["title"]) - elif len(test_case.pics) > 0: - if test_case.pics.issubset(enabled_pics): - applicable_tests.add(test_case.metadata["title"]) + applicable_mandatories_tests = __applicable_test_cases( + test_collections, enabled_pics, True + ) + applicable_remaining_tests = __applicable_test_cases( + test_collections, enabled_pics, False + ) + + # Add first the mandatories test cases + applicable_tests.extend(applicable_mandatories_tests) + # Add the remaining test cases + applicable_tests.extend(applicable_remaining_tests) logger.debug(f"Applicable test cases: {applicable_tests}") return PICSApplicableTestCases(test_cases=applicable_tests) + + +def __applicable_test_cases( + test_collections: Dict[str, TestCollectionDeclaration], + enabled_pics: set[str], + mandatory: bool, +) -> list: + applicable_tests: list = [] + + for test_collection in test_collections.values(): + if test_collection.mandatory == mandatory: + for test_suite in test_collection.test_suites.values(): + for test_case in test_suite.test_cases.values(): + if len(test_case.pics) == 0: + # Test cases without pics required are always applicable + applicable_tests.append(test_case.metadata["title"]) + elif len(test_case.pics) > 0: + if test_case.pics.issubset(enabled_pics): + applicable_tests.append(test_case.metadata["title"]) + return applicable_tests diff --git a/app/schemas/pics.py b/app/schemas/pics.py index ce801c42..21dc0e67 100644 --- a/app/schemas/pics.py +++ b/app/schemas/pics.py @@ -38,7 +38,7 @@ def all_enabled_items(self) -> list[PICSItem]: class PICSApplicableTestCases(BaseModel): - test_cases: set[str] + test_cases: list[str] class PICSError(Exception): diff --git a/app/schemas/test_case_metadata.py b/app/schemas/test_case_metadata.py index f7eb2653..2ba0b4fd 100644 --- a/app/schemas/test_case_metadata.py +++ b/app/schemas/test_case_metadata.py @@ -26,6 +26,7 @@ class TestCaseMetadataBase(BaseModel): description: str version: str source_hash: str + mandatory: bool = False class Config: orm_mode = True diff --git a/app/schemas/test_collections.py b/app/schemas/test_collections.py index b262de93..b981e6cf 100644 --- a/app/schemas/test_collections.py +++ b/app/schemas/test_collections.py @@ -23,6 +23,7 @@ class TestMetadata(BaseModel): version: str title: str description: str + mandatory: bool = False class TestCase(BaseModel): diff --git a/app/schemas/test_run_execution.py b/app/schemas/test_run_execution.py index 88c423ff..087addc3 100644 --- a/app/schemas/test_run_execution.py +++ b/app/schemas/test_run_execution.py @@ -39,6 +39,7 @@ class TestRunExecutionBase(BaseModel): title: str description: Optional[str] + certification_mode: bool = False # Base + properties that represent relationhips diff --git a/app/schemas/test_suite_execution.py b/app/schemas/test_suite_execution.py index f97f9d39..ff27368c 100644 --- a/app/schemas/test_suite_execution.py +++ b/app/schemas/test_suite_execution.py @@ -31,6 +31,7 @@ class TestSuiteExecutionBase(BaseModel): public_id: str execution_index: int collection_id: str + mandatory: bool = False # Properties shared by models stored in DB diff --git a/app/schemas/test_suite_metadata.py b/app/schemas/test_suite_metadata.py index e97be99c..cdb7c53c 100644 --- a/app/schemas/test_suite_metadata.py +++ b/app/schemas/test_suite_metadata.py @@ -26,6 +26,7 @@ class TestSuiteMetadataBase(BaseModel): description: str version: str source_hash: str + mandatory: bool = False class Config: orm_mode = True diff --git a/app/test_engine/models/test_case.py b/app/test_engine/models/test_case.py index 7c285c04..1c98119f 100644 --- a/app/test_engine/models/test_case.py +++ b/app/test_engine/models/test_case.py @@ -130,6 +130,11 @@ def __compute_state(self) -> TestStateEnum: if self.errors is not None and len(self.errors) > 0: return TestStateEnum.ERROR + # Test cases that have already been marked as not applicable should not + # change state + if self.state == TestStateEnum.NOT_APPLICABLE: + return TestStateEnum.NOT_APPLICABLE + # Note: These loops cannot be easily coalesced as we need to iterate through # and assign Test Case State in order. if self.any_steps_with_state(TestStateEnum.CANCELLED): @@ -147,10 +152,14 @@ def __compute_state(self) -> TestStateEnum: return TestStateEnum.PASSED def any_steps_with_state(self, state: TestStateEnum) -> bool: - return any(ts for ts in self.test_steps if ts.state == state) + return any(ts.state == state for ts in self.test_steps) def completed(self) -> bool: - return self.state not in [TestStateEnum.PENDING, TestStateEnum.EXECUTING] + return self.state not in [ + TestStateEnum.PENDING, + TestStateEnum.EXECUTING, + TestStateEnum.NOT_APPLICABLE, + ] def __cancel_remaning_test_steps(self) -> None: for step in self.test_steps: @@ -171,7 +180,9 @@ def mark_as_completed(self) -> None: if self.completed(): return self.state = self.__compute_state() - logger.info(f"Test Case Completed[{self.state.name}]: {self.metadata['title']}") + logger.info( + f"Test Case Completed [{self.state.name}]: {self.metadata['title']}" + ) self.__print_log_separator() def mark_as_executing(self) -> None: @@ -269,6 +280,10 @@ def mark_step_failure(self, msg: Union[str, Exception]) -> None: self.current_test_step.append_failure(message) + def mark_as_not_applicable(self) -> None: + self.state = TestStateEnum.NOT_APPLICABLE + logger.warning(f"Test Case Not Applicable: {self.metadata['public_id']}") + def next_step(self) -> None: if self.current_test_step_index + 1 >= len(self.test_steps): return diff --git a/app/test_engine/models/test_declarations.py b/app/test_engine/models/test_declarations.py index 21f5788d..81ac0bb9 100644 --- a/app/test_engine/models/test_declarations.py +++ b/app/test_engine/models/test_declarations.py @@ -42,9 +42,10 @@ def public_id(self) -> str: class TestSuiteDeclaration(object): - def __init__(self, class_ref: Type[TestSuite]) -> None: + def __init__(self, class_ref: Type[TestSuite], mandatory: bool = False) -> None: self.class_ref = class_ref self.test_cases: Dict[str, TestCaseDeclaration] = {} + self.mandatory: bool = mandatory @property def public_id(self) -> str: @@ -100,10 +101,11 @@ def metadata(self) -> TestMetadata: class TestCollectionDeclaration(object): - def __init__(self, path: str, name: str) -> None: + def __init__(self, path: str, name: str, mandatory: bool = False) -> None: self.name = name self.path = path self.test_suites: Dict[str, TestSuiteDeclaration] = {} + self.mandatory = mandatory def add_test_suite(self, suite: TestSuiteDeclaration) -> None: self.test_suites[suite.public_id] = suite diff --git a/app/test_engine/models/test_run.py b/app/test_engine/models/test_run.py index 14e821e6..a922e159 100644 --- a/app/test_engine/models/test_run.py +++ b/app/test_engine/models/test_run.py @@ -23,11 +23,13 @@ from app.test_engine.logger import test_engine_logger as logger from app.test_engine.test_observable import TestObservable from app.test_engine.test_observer import Observer +from app.user_prompt_support.prompt_request import OptionsSelectPromptRequest +from app.user_prompt_support.user_prompt_support import UserPromptSupport from .test_suite import TestSuite -class TestRun(TestObservable): +class TestRun(TestObservable, UserPromptSupport): """ Test run is a run-time object for a test_run """ @@ -72,18 +74,21 @@ def __compute_state(self) -> TestStateEnum: # Note: These loops cannot be easily coalesced as we need to iterate through # and assign Test Suite State in order. - if any(ts for ts in self.test_suites if ts.state == TestStateEnum.CANCELLED): + if any(ts.state == TestStateEnum.CANCELLED for ts in self.test_suites): return TestStateEnum.CANCELLED - if any(ts for ts in self.test_suites if ts.state == TestStateEnum.ERROR): + if any(ts.state == TestStateEnum.ERROR for ts in self.test_suites): return TestStateEnum.ERROR - if any(ts for ts in self.test_suites if ts.state == TestStateEnum.FAILED): + if any(ts.state == TestStateEnum.FAILED for ts in self.test_suites): return TestStateEnum.FAILED - if any(ts for ts in self.test_suites if ts.state == TestStateEnum.PENDING): + if any(ts.state == TestStateEnum.PENDING for ts in self.test_suites): return TestStateEnum.PENDING + if all(ts.state == TestStateEnum.NOT_APPLICABLE for ts in self.test_suites): + return TestStateEnum.NOT_APPLICABLE + return TestStateEnum.PASSED def completed(self) -> bool: @@ -107,7 +112,7 @@ async def run(self) -> None: self.__current_testing_task = create_task(self.__run_handle_errors()) await self.__current_testing_task except CancelledError: - logger.error("User cancelled test run") + logger.error("The test run has been cancelled") self.__cancel_remaining_tests() finally: self.__current_testing_task = None @@ -120,6 +125,31 @@ async def __run_handle_errors(self) -> None: self.current_test_suite = test_suite await test_suite.run() + # Check if mandatory suite failed + if ( + self.test_run_execution.certification_mode + and test_suite.mandatory + and any( + tc.state != TestStateEnum.PASSED for tc in test_suite.test_cases + ) + ): + print("Abort execution") + self.__cancel_remaining_tests() + self.cancel() + await self.__display_mandatory_test_failure_prompt() + break + + async def __display_mandatory_test_failure_prompt(self) -> None: + prompt = ( + "At least one of the mandatory test cases failed while running in " + "certification mode.\nAs a consequence, the remaining tests were cancelled." + ) + options = {"OK": 1} + prompt_request = OptionsSelectPromptRequest(prompt=prompt, options=options) + + logger.info(f'User prompt: "{prompt}"') + await self.send_prompt_request(prompt_request) + def cancel(self) -> None: """This will abort executuion of the current test suite, and mark all remaining tests as cancelled.""" diff --git a/app/test_engine/models/test_suite.py b/app/test_engine/models/test_suite.py index c22a6d5f..1b5486d1 100644 --- a/app/test_engine/models/test_suite.py +++ b/app/test_engine/models/test_suite.py @@ -47,6 +47,7 @@ def __init__(self, test_suite_execution: TestSuiteExecution): self.test_cases: List[TestCase] = [] self.__state = TestStateEnum.PENDING self.errors: List[str] = [] + self.mandatory: bool = test_suite_execution.mandatory @property def project(self) -> Project: @@ -84,18 +85,21 @@ def __compute_state(self) -> TestStateEnum: # Note: These loops cannot be easily coalesced as we need to iterate through # and assign Test Suite State in order. - if any(tc for tc in self.test_cases if tc.state == TestStateEnum.CANCELLED): + if any(tc.state == TestStateEnum.CANCELLED for tc in self.test_cases): return TestStateEnum.CANCELLED - if any(tc for tc in self.test_cases if tc.state == TestStateEnum.ERROR): + if any(tc.state == TestStateEnum.ERROR for tc in self.test_cases): return TestStateEnum.ERROR - if any(tc for tc in self.test_cases if tc.state == TestStateEnum.FAILED): + if any(tc.state == TestStateEnum.FAILED for tc in self.test_cases): return TestStateEnum.FAILED - if any(tc for tc in self.test_cases if tc.state == TestStateEnum.PENDING): + if any(tc.state == TestStateEnum.PENDING for tc in self.test_cases): return TestStateEnum.PENDING + if all(tc.state == TestStateEnum.NOT_APPLICABLE for tc in self.test_cases): + return TestStateEnum.NOT_APPLICABLE + return TestStateEnum.PASSED def completed(self) -> bool: diff --git a/app/test_engine/test_collection_discovery.py b/app/test_engine/test_collection_discovery.py index 3e239fb3..29afb022 100644 --- a/app/test_engine/test_collection_discovery.py +++ b/app/test_engine/test_collection_discovery.py @@ -266,7 +266,11 @@ def __find_test_suite( if not test_cases: return None - suite_declaration = TestSuiteDeclaration(suite) + mandatory = False + if "mandatory" in suite.metadata: + mandatory = suite.metadata["mandatory"] # type: ignore + + suite_declaration = TestSuiteDeclaration(suite, mandatory=mandatory) for test in test_cases: test_declaration = TestCaseDeclaration(test) suite_declaration.test_cases[test.public_id()] = test_declaration diff --git a/app/test_engine/test_script_manager.py b/app/test_engine/test_script_manager.py index 756b4ea7..efcbc01c 100644 --- a/app/test_engine/test_script_manager.py +++ b/app/test_engine/test_script_manager.py @@ -145,6 +145,7 @@ def __pending_test_suite_execution( public_id=metadata.public_id, test_suite_metadata=metadata, collection_id=test_collection.name, + mandatory=test_suite.mandatory, ) return test_suite_execution diff --git a/app/tests/api/api_v1/test_test_run_executions.py b/app/tests/api/api_v1/test_test_run_executions.py index c24ca88e..cba0091f 100644 --- a/app/tests/api/api_v1/test_test_run_executions.py +++ b/app/tests/api/api_v1/test_test_run_executions.py @@ -207,6 +207,7 @@ def test_create_test_run_execution_with_test_run_config_and_selected_tests_succe "title": title, "description": description, "test_run_config_id": None, + "certification_mode": False, }, ) @@ -253,7 +254,117 @@ def test_create_test_run_execution_with_selected_tests_with_two_suites_succeeds( response=response, expected_status_code=HTTPStatus.OK, expected_keys=["id", "test_suite_executions"], - expected_content={"title": title, "description": description}, + expected_content={ + "title": title, + "description": description, + "certification_mode": False, + }, + ) + + content = response.json() + suites = content.get("test_suite_executions") + returned_suites = [s["public_id"] for s in suites] + selected_tests = json_data["selected_tests"]["sample_tests"].keys() + for selected_suite in selected_tests: + assert selected_suite in returned_suites + + +def test_create_test_run_execution_cer_with_test_run_config_and_selected_tests_succeeds( + client: TestClient, db: Session +) -> None: + """This test will create a new test run execution. A success is expected. + The selected tests are passed directly by JSON payload. + Also, one reference to a test run config is also included, but this is ignored by + the API by assigning None. + """ + + test_run_config = create_random_test_run_config(db) + title = "TestRunExecutionFoo" + description = random_lower_string() + json_data = { + "test_run_execution_in": { + "title": title, + "description": description, + "test_run_config_id": test_run_config.id, + }, + "selected_tests": { + "sample_tests": { + "SampleTestSuite1": { + "TCSS1001": 1, + "TCSS1002": 2, + "TCSS1003": 4, + "TCSS1004": 8, + "TCSS1005": 16, + }, + }, + }, + } + response = client.post( + f"{settings.API_V1_STR}/test_run_executions/?certification_mode=True", + json=json_data, + ) + validate_json_response( + response=response, + expected_status_code=HTTPStatus.OK, + expected_content={ + "title": title, + "description": description, + "test_run_config_id": None, + "certification_mode": True, + }, + ) + + +def test_create_test_run_execution_cert_with_selected_tests_with_3_suites_succeeds( + client: TestClient, +) -> None: + """This test will create a new test run execution with two suites selected. + A success is expected. + The selected tests are passed directly by JSON payload. + """ + + title = "TestRunExecutionFoo" + description = random_lower_string() + json_data = { + "test_run_execution_in": { + "title": title, + "description": description, + }, + "selected_tests": { + "sample_tests": { + "SampleTestSuite1": { + "TCSS1001": 1, + "TCSS1002": 2, + "TCSS1003": 4, + "TCSS1004": 8, + "TCSS1005": 16, + }, + "SampleTestSuite2": { + "TCSS2001": 1, + "TCSS2002": 2, + "TCSS2003": 4, + "TCSS2004": 8, + "TCSS2005": 16, + }, + "SampleTestSuite3Mandatory": { + "TCSS3001": 1, + }, + }, + }, + } + response = client.post( + f"{settings.API_V1_STR}/test_run_executions/?certification_mode=True", + json=json_data, + ) + validate_json_response( + response=response, + expected_status_code=HTTPStatus.OK, + expected_keys=["id", "test_suite_executions"], + expected_content={ + "title": title, + "description": description, + "certification_mode": True, + }, ) content = response.json() @@ -319,7 +430,10 @@ def test_repeat_existing_test_run_execution_with_two_suites_succeeds( response=response, expected_status_code=HTTPStatus.OK, expected_keys=["id", "title", "description", "test_suite_executions"], - expected_content={"description": test_run_execution.description}, + expected_content={ + "description": test_run_execution.description, + "certification_mode": False, + }, ) content = response.json() @@ -355,6 +469,80 @@ def test_repeat_existing_test_run_execution_with_title_succeeds( response=response, expected_status_code=HTTPStatus.OK, expected_keys=["id", "title", "description", "test_suite_executions"], + expected_content={"certification_mode": False}, + ) + content = response.json() + assert title == remove_title_date(content.get("title")) + + +def test_repeat_existing_test_run_execution_certification_mode_with_two_suites_succeeds( + client: TestClient, db: Session +) -> None: + """This test will repeat all the tests from a previous created test run execution. + A success is expected. + We use a sample collection, suites and test cases to create the test run execution + that will be repeated. The title and description are provided by the JSON payload. + """ + + selected_tests = { + "sample_tests": { + "SampleTestSuite1": {"TCSS1001": 1, "TCSS1002": 2, "TCSS1003": 3}, + "SampleTestSuite2": {"TCSS2004": 4, "TCSS2005": 5, "TCSS2006": 6}, + } + } + test_run_execution = create_random_test_run_execution( + db=db, selected_tests=selected_tests, certification_mode=True + ) + + base_title = remove_title_date(test_run_execution.title) + response = client.post( + f"{settings.API_V1_STR}/test_run_executions/{test_run_execution.id}/repeat", + ) + + validate_json_response( + response=response, + expected_status_code=HTTPStatus.OK, + expected_keys=["id", "title", "description", "test_suite_executions"], + expected_content={ + "description": test_run_execution.description, + "certification_mode": True, + }, + ) + + content = response.json() + assert test_run_execution.id != content.get("id") + assert base_title == remove_title_date(content.get("title")) + + suites = content.get("test_suite_executions") + returned_suites = [s["public_id"] for s in suites] + for selected_suite in selected_tests["sample_tests"].keys(): + assert selected_suite in returned_suites + + +def test_repeat_existing_test_run_execution_certification_mode_with_title_succeeds( + client: TestClient, db: Session +) -> None: + """This test will repeat all the tests from a previous created test run execution, + with a custom title instead of the old name. + A success is expected. + """ + title = "TestRunExecutionFoo" + selected_tests = { + "sample_tests": { + "SampleTestSuite1": {"TCSS1001": 1, "TCSS1002": 2, "TCSS1003": 3} + } + } + test_run_execution = create_random_test_run_execution( + db=db, selected_tests=selected_tests, certification_mode=True + ) + url = f"{settings.API_V1_STR}/test_run_executions/{test_run_execution.id}/repeat" + response = client.post(url + f"?title={title}") + + validate_json_response( + response=response, + expected_status_code=HTTPStatus.OK, + expected_keys=["id", "title", "description", "test_suite_executions"], + expected_content={"certification_mode": True}, ) content = response.json() assert title == remove_title_date(content.get("title")) @@ -867,7 +1055,7 @@ def test_archive_project(client: TestClient, db: Session) -> None: validate_json_response( response=response, expected_status_code=HTTPStatus.OK, - expected_content={"id": test_run_execution.id}, + expected_content={"id": test_run_execution.id, "certification_mode": False}, expected_keys=["archived_at"], ) @@ -883,6 +1071,38 @@ def test_unarchive_test_run_execution(client: TestClient, db: Session) -> None: expected_content={ "id": test_run_execution.id, "archived_at": None, + "certification_mode": False, + }, + ) + + +def test_archive_project_certification_mode(client: TestClient, db: Session) -> None: + test_run_execution = create_random_test_run_execution(db, certification_mode=True) + response = client.post( + f"{settings.API_V1_STR}/test_run_executions/{test_run_execution.id}/archive" + ) + validate_json_response( + response=response, + expected_status_code=HTTPStatus.OK, + expected_content={"id": test_run_execution.id, "certification_mode": True}, + expected_keys=["archived_at"], + ) + + +def test_unarchive_test_run_execution_certification_mode( + client: TestClient, db: Session +) -> None: + test_run_execution = create_random_test_run_execution(db, certification_mode=True) + response = client.post( + f"{settings.API_V1_STR}/test_run_executions/{test_run_execution.id}/unarchive" + ) + validate_json_response( + response=response, + expected_status_code=HTTPStatus.OK, + expected_content={ + "id": test_run_execution.id, + "archived_at": None, + "certification_mode": True, }, ) diff --git a/app/tests/api/api_v1/test_test_run_executions_import_export.py b/app/tests/api/api_v1/test_test_run_executions_import_export.py index 50ebbbe7..ba3a9a74 100644 --- a/app/tests/api/api_v1/test_test_run_executions_import_export.py +++ b/app/tests/api/api_v1/test_test_run_executions_import_export.py @@ -80,6 +80,7 @@ def test_export_test_run_execution_successfully() -> None: state=TestStateEnum.PASSED, created_at="2023-05-23T21:43:43.543147", log=log, + certification_mode=False, ) with mock.patch( diff --git a/app/tests/crud/test_test_run_execution.py b/app/tests/crud/test_test_run_execution.py index 64f7169a..775e0d91 100644 --- a/app/tests/crud/test_test_run_execution.py +++ b/app/tests/crud/test_test_run_execution.py @@ -319,6 +319,7 @@ def test_create_test_run_execution_from_test_run_config(db: Session) -> None: # Assert direct properties assert test_run_execution.title == test_run_execution_title assert test_run_execution.test_run_config_id == test_run_config.id + assert not test_run_execution.certification_mode # Assert created test_suite_executions test_suite_executions = test_run_execution.test_suite_executions @@ -343,6 +344,77 @@ def test_create_test_run_execution_from_test_run_config(db: Session) -> None: assert missing_count == 0 +def test_create_test_run_execution_certification_from_selected_tests_mandatory_suite( + db: Session, +) -> None: + first_test_suite_identifier = "SampleTestSuite1" + first_test_case_identifier = "TCSS1001" + first_mandatory_test_case_identifier = "TCSS3001" + + selected_tests = { + "sample_tests": { + first_test_suite_identifier: { + first_test_case_identifier: 1, + "TCSS1002": 2, + "TCSS1003": 3, + }, + "SampleTestSuite2": { + "TCSS2001": 2, + }, + "SampleTestSuite3Mandatory": { + "TCSS3001": 1, + }, + }, + } + + total_test_case_count = ( + sum(selected_tests["sample_tests"][first_test_suite_identifier].values()) + + sum(selected_tests["sample_tests"]["SampleTestSuite2"].values()) + + sum(selected_tests["sample_tests"]["SampleTestSuite3Mandatory"].values()) + ) + + # Prepare data for test_run_execution + test_run_execution_title = "Test Execution title" + test_run_execution_data = TestRunExecutionCreate( + title=test_run_execution_title, certification_mode=True + ) + + test_run_execution = crud.test_run_execution.create( + db=db, obj_in=test_run_execution_data, selected_tests=selected_tests + ) + + # Assert direct properties + assert test_run_execution.title == test_run_execution_title + assert test_run_execution.test_run_config_id is None + assert test_run_execution.certification_mode + + # Assert created test_suite_executions + test_suite_executions = test_run_execution.test_suite_executions + assert len(test_suite_executions) > 0 + + total_test_case_executions = 0 + for test_suite_execution in test_suite_executions: + total_test_case_executions += len(test_suite_execution.test_case_executions) + + assert total_test_case_executions == total_test_case_count + + first_test_case_execution = test_suite_executions[0].test_case_executions[0] + assert first_test_case_execution.public_id == first_mandatory_test_case_identifier + + remaining_test_cases = selected_tests["sample_tests"] + for test_suite_execution in test_suite_executions: + for test_case_execution in test_suite_execution.test_case_executions: + public_id = test_case_execution.public_id + + assert public_id in remaining_test_cases[test_suite_execution.public_id] + remaining_test_cases[test_suite_execution.public_id][public_id] -= 1 + + # Assert the correct number of test cases where created + for r in remaining_test_cases: + for _, missing_count in remaining_test_cases[r].items(): + assert missing_count == 0 + + def test_create_test_run_execution_from_selected_tests(db: Session) -> None: first_test_suite_identifier = "SampleTestSuite1" first_test_case_identifier = "TCSS1001" @@ -371,6 +443,139 @@ def test_create_test_run_execution_from_selected_tests(db: Session) -> None: # Assert direct properties assert test_run_execution.title == test_run_execution_title assert test_run_execution.test_run_config_id is None + assert not test_run_execution.certification_mode + + # Assert created test_suite_executions + test_suite_executions = test_run_execution.test_suite_executions + assert len(test_suite_executions) > 0 + + first_test_suite_execution = test_suite_executions[0] + test_case_executions = first_test_suite_execution.test_case_executions + assert len(test_case_executions) == total_test_case_count + + first_test_case_execution = test_case_executions[0] + assert first_test_case_execution.public_id == first_test_case_identifier + + remaining_test_cases = selected_tests["sample_tests"][first_test_suite_identifier] + for test_case_execution in test_case_executions: + public_id = test_case_execution.public_id + # Assert all test case public id's match + assert public_id in remaining_test_cases + remaining_test_cases[public_id] -= 1 + + # Assert the correct number of test cases where created + for _, missing_count in remaining_test_cases.items(): + assert missing_count == 0 + + +def test_create_test_run_execution_certification_mode_from_test_run_config( + db: Session, +) -> None: + # Create build new test_run_config object + name = random_lower_string() + dut_name = random_lower_string() + first_test_suite_identifier = "SampleTestSuite1" + first_test_case_identifier = "TCSS1001" + + selected_tests = { + "sample_tests": { + first_test_suite_identifier: { + first_test_case_identifier: 1, + "TCSS1002": 2, + "TCSS1003": 3, + } + } + } + + total_test_case_count = sum( + selected_tests["sample_tests"][first_test_suite_identifier].values() + ) + test_run_config_dict = random_test_run_config_dict( + name=name, + dut_name=dut_name, + selected_tests=selected_tests, + ) + + test_run_config_in = TestRunConfigCreate( + **test_run_config_dict, certification_mode=True + ) + + # Save create test_run_config in DB + test_run_config = crud.test_run_config.create(db=db, obj_in=test_run_config_in) + + # Prepare data for test_run_execution + test_run_execution_title = "Test Execution title" + test_run_execution_data = TestRunExecutionCreate( + title=test_run_execution_title, + test_run_config_id=test_run_config.id, + certification_mode=True, + ) + + test_run_execution = crud.test_run_execution.create( + db=db, obj_in=test_run_execution_data + ) + + # Assert direct properties + assert test_run_execution.title == test_run_execution_title + assert test_run_execution.test_run_config_id == test_run_config.id + assert test_run_execution.certification_mode + + # Assert created test_suite_executions + test_suite_executions = test_run_execution.test_suite_executions + assert len(test_suite_executions) > 0 + + first_test_suite_execution = test_suite_executions[0] + test_case_executions = first_test_suite_execution.test_case_executions + assert len(test_case_executions) == total_test_case_count + + first_test_case_execution = test_case_executions[0] + assert first_test_case_execution.public_id == first_test_case_identifier + + remaining_test_cases = selected_tests["sample_tests"][first_test_suite_identifier] + for test_case_execution in test_case_executions: + public_id = test_case_execution.public_id + # Assert all test case public id's match + assert public_id in remaining_test_cases + remaining_test_cases[public_id] -= 1 + + # Assert the correct number of test cases where created + for _, missing_count in remaining_test_cases.items(): + assert missing_count == 0 + + +def test_create_test_run_execution_certification_mode_from_selected_tests( + db: Session, +) -> None: + first_test_suite_identifier = "SampleTestSuite1" + first_test_case_identifier = "TCSS1001" + selected_tests = { + "sample_tests": { + first_test_suite_identifier: { + first_test_case_identifier: 1, + "TCSS1002": 2, + "TCSS1003": 3, + } + } + } + + total_test_case_count = sum( + selected_tests["sample_tests"][first_test_suite_identifier].values() + ) + + # Prepare data for test_run_execution + test_run_execution_title = "Test Execution title" + test_run_execution_data = TestRunExecutionCreate( + title=test_run_execution_title, certification_mode=True + ) + + test_run_execution = crud.test_run_execution.create( + db=db, obj_in=test_run_execution_data, selected_tests=selected_tests + ) + + # Assert direct properties + assert test_run_execution.title == test_run_execution_title + assert test_run_execution.test_run_config_id is None + assert test_run_execution.certification_mode # Assert created test_suite_executions test_suite_executions = test_run_execution.test_suite_executions diff --git a/app/tests/test_engine/test_runner.py b/app/tests/test_engine/test_runner.py index ec2b4162..39c1fbe1 100644 --- a/app/tests/test_engine/test_runner.py +++ b/app/tests/test_engine/test_runner.py @@ -35,7 +35,13 @@ load_and_run_tool_unit_tests, load_test_run_for_test_cases, ) -from test_collections.tool_unit_tests.test_suite_expected import TestSuiteExpected +from test_collections.tool_unit_tests.test_suite_expected import ( + TestSuiteExpected, + TestSuiteExpected2, +) +from test_collections.tool_unit_tests.test_suite_expected.tctr_expected_case_not_applicable import ( # noqa: E501 + TCTRExpectedCaseNotApplicable, +) from test_collections.tool_unit_tests.test_suite_expected.tctr_expected_error import ( TCTRExpectedError, ) @@ -310,6 +316,74 @@ async def test_runner_test_step_not_applicable(db: Session) -> None: assert case.test_steps[1].state == TestStateEnum.NOT_APPLICABLE +@pytest.mark.asyncio +async def test_runner_not_all_test_cases_not_applicable(db: Session) -> None: + """Load and run a test_run that is not applicable. + + Args: + db (Session): Database fixture for creating test models. + """ + + test_cases = { + "tool_unit_tests": { + TestSuiteExpected.public_id(): { + TCTRExpectedCaseNotApplicable.public_id(): 1, + TCTRExpectedPass.public_id(): 1, + }, + TestSuiteExpected2.public_id(): { + TCTRExpectedCaseNotApplicable.public_id(): 1, + }, + } + } + + runner = load_test_run_for_test_cases(db, test_cases) + run = runner.test_run + assert run is not None + + await runner.run() + + assert runner.state == TestRunnerState.IDLE + assert run.state == TestStateEnum.PASSED + assert run.test_suites[0].state == TestStateEnum.PASSED + assert run.test_suites[0].test_cases[0].state == TestStateEnum.NOT_APPLICABLE + assert run.test_suites[0].test_cases[1].state == TestStateEnum.PASSED + assert run.test_suites[1].state == TestStateEnum.NOT_APPLICABLE + assert run.test_suites[1].test_cases[0].state == TestStateEnum.NOT_APPLICABLE + + +@pytest.mark.asyncio +async def test_runner_all_test_cases_not_applicable(db: Session) -> None: + """Load and run a test_run that is not applicable. + + Args: + db (Session): Database fixture for creating test models. + """ + + test_cases = { + "tool_unit_tests": { + TestSuiteExpected.public_id(): { + TCTRExpectedCaseNotApplicable.public_id(): 1, + }, + TestSuiteExpected2.public_id(): { + TCTRExpectedCaseNotApplicable.public_id(): 1, + }, + } + } + + runner = load_test_run_for_test_cases(db, test_cases) + run = runner.test_run + assert run is not None + + await runner.run() + + assert runner.state == TestRunnerState.IDLE + assert run.state == TestStateEnum.NOT_APPLICABLE + assert run.test_suites[0].state == TestStateEnum.NOT_APPLICABLE + assert run.test_suites[0].test_cases[0].state == TestStateEnum.NOT_APPLICABLE + assert run.test_suites[1].state == TestStateEnum.NOT_APPLICABLE + assert run.test_suites[1].test_cases[0].state == TestStateEnum.NOT_APPLICABLE + + @pytest.mark.asyncio async def test_test_runner_load_finished_test_run(db: Session) -> None: """Load a test_run_execution that has already been executed. diff --git a/app/tests/test_log_utils.py b/app/tests/test_log_utils.py index 64905daa..3f985243 100644 --- a/app/tests/test_log_utils.py +++ b/app/tests/test_log_utils.py @@ -208,7 +208,23 @@ ), TestRunLogEntry( level="INFO", - timestamp=1684878245.0, + timestamp=1684878254.0, + message="Test suite 1 case 3 log 0", + test_suite_execution_index=1, + test_case_execution_index=3, + test_step_execution_index=None, + ), + TestRunLogEntry( + level="INFO", + timestamp=1684878255.0, + message="Test suite 1 case 3 step 0 log 0", + test_suite_execution_index=1, + test_case_execution_index=3, + test_step_execution_index=0, + ), + TestRunLogEntry( + level="INFO", + timestamp=1684878265.0, message="Test suite 1 log 2", test_suite_execution_index=1, test_case_execution_index=None, @@ -216,7 +232,7 @@ ), TestRunLogEntry( level="INFO", - timestamp=1684878246.0, + timestamp=1684878276.0, message="General log 3", test_suite_execution_index=None, test_case_execution_index=None, @@ -366,6 +382,31 @@ ), test_step_executions=[], ), + schemas.TestCaseExecution( + state=TestStateEnum.NOT_APPLICABLE, + public_id="TC-Y-1.4", + execution_index=1, + id=5, + test_suite_execution_id=2, + test_case_metadata_id=5, + test_case_metadata=schemas.TestCaseMetadata( + public_id="TC-Y-1.4", + title="[TC-Y-1.4] Title", + description="Fourth test case", + version="1.0", + source_hash="abc123", + id=5, + ), + test_step_executions=[ + schemas.TestStepExecution( + state=TestStateEnum.PASSED, + title="First step", + execution_index=0, + id=5, + test_case_execution_id=5, + ) + ], + ), ], test_suite_metadata=schemas.TestSuiteMetadata( public_id="Suite1", @@ -392,11 +433,13 @@ def test_group_test_run_execution_logs() -> None: assert len(grouped_logs.suites) == 2 assert len(grouped_logs.suites["Suite0"]) == 4 assert len(grouped_logs.suites["Suite1"]) == 3 - assert len(grouped_logs.cases) == 3 + assert len(grouped_logs.cases) == 4 assert len(grouped_logs.cases[TestStateEnum.PASSED]) == 2 assert len(grouped_logs.cases[TestStateEnum.ERROR]) == 1 assert len(grouped_logs.cases[TestStateEnum.FAILED]) == 1 + assert len(grouped_logs.cases[TestStateEnum.NOT_APPLICABLE]) == 1 assert len(grouped_logs.cases[TestStateEnum.PASSED]["TC-X-1.1"]) == 5 assert len(grouped_logs.cases[TestStateEnum.PASSED]["TC-Y-1.3"]) == 1 assert len(grouped_logs.cases[TestStateEnum.ERROR]["TC-Y-1.1"]) == 5 assert len(grouped_logs.cases[TestStateEnum.FAILED]["TC-Y-1.2"]) == 3 + assert len(grouped_logs.cases[TestStateEnum.NOT_APPLICABLE]["TC-Y-1.4"]) == 2 diff --git a/app/tests/utils/test_run_execution.py b/app/tests/utils/test_run_execution.py index d3e54a8b..e0e76123 100644 --- a/app/tests/utils/test_run_execution.py +++ b/app/tests/utils/test_run_execution.py @@ -37,6 +37,7 @@ def random_test_run_execution_dict( project_id: Optional[int] = None, operator_id: Optional[int] = None, description: Optional[str] = None, + certification_mode: Optional[bool] = False, ) -> Dict[str, Any]: output: Dict[str, Any] = {} @@ -69,6 +70,10 @@ def random_test_run_execution_dict( if description is not None: output["description"] = description + # certification_mode is optional, include if present + if certification_mode is not None: + output["certification_mode"] = certification_mode + return output @@ -145,6 +150,7 @@ def create_test_run_execution_with_some_test_cases( "imported_at": None, "archived_at": None, "created_at": "2023-05-23T21:43:31.038050", + "certification_mode": False, "log": [ { "level": "INFO", @@ -169,6 +175,7 @@ def create_test_run_execution_with_some_test_cases( "description": "FirstChipToolSuite", "version": "0.0.1", "source_hash": "de7f3c1390cd283f91f74a334aaf0ec3", + "mandatory": False, }, "execution_index": 0, "collection_id": "SDK YAML Tests", @@ -180,12 +187,14 @@ def create_test_run_execution_with_some_test_cases( "completed_at": "2023-05-23T21:44:28.937346", "errors": [], "created_at": "2023-05-23T21:43:31.451550", + "mandatory": False, "test_case_metadata": { "public_id": "TC-ACE-1.1", "title": "TC-ACE-1.1", "description": "42.1.1. [TC-ACE-1.1] Privileges", "version": "0.0.1", "source_hash": "de7f3c1390cd283f91f74a334aaf0ec3", + "mandatory": False, }, "execution_index": 0, "test_step_executions": [ diff --git a/cspell.json b/cspell.json index 04ff090c..17e760ba 100644 --- a/cspell.json +++ b/cspell.json @@ -37,6 +37,7 @@ "libdbus", "libgirepository", "loguru", + "mandatories", "MANYTOONE", "mypy", "noqa", @@ -59,7 +60,9 @@ "unpairing", "wlan", "yamls", - "yamltests" + "yamltests", + "wifipaf", + "WIFIPAF" ], "ignoreWords": [ "bdist", diff --git a/test_collections/matter/__init__.py b/test_collections/matter/__init__.py index 757e0312..e29eee29 100644 --- a/test_collections/matter/__init__.py +++ b/test_collections/matter/__init__.py @@ -13,9 +13,14 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .python_tests import onboarding_payload_collection -from .sdk_tests.support.python_testing import ( - custom_python_collection, - sdk_python_collection, -) -from .sdk_tests.support.yaml_tests import custom_collection, sdk_collection +import os + +# Verify if this execution comes from python_tests_validator. +if not os.getenv("DRY_RUN"): + from .python_tests import onboarding_payload_collection + from .sdk_tests.support.python_testing import ( + custom_python_collection, + sdk_mandatory_python_collection, + sdk_python_collection, + ) + from .sdk_tests.support.yaml_tests import custom_collection, sdk_collection diff --git a/test_collections/matter/config.py b/test_collections/matter/config.py index 9d5ac7fe..a5c11433 100644 --- a/test_collections/matter/config.py +++ b/test_collections/matter/config.py @@ -23,9 +23,9 @@ class MatterSettings(BaseSettings): # SDK Docker Image SDK_DOCKER_IMAGE: str = "connectedhomeip/chip-cert-bins" - SDK_DOCKER_TAG: str = "5db170117d3afd5b4c815c1c181cc8646779cc5a" + SDK_DOCKER_TAG: str = "d0d91272068f267cf880f9d56787ca28da885673" # SDK SHA: used to fetch test YAML from SDK. - SDK_SHA: str = "5db170117d3afd5b4c815c1c181cc8646779cc5a" + SDK_SHA: str = "d0d91272068f267cf880f9d56787ca28da885673" class Config: case_sensitive = True diff --git a/test_collections/matter/scripts/OTBR/otbr_srp_restart.sh b/test_collections/matter/scripts/OTBR/otbr_srp_restart.sh index dc3eea9f..c34d1b35 100755 --- a/test_collections/matter/scripts/OTBR/otbr_srp_restart.sh +++ b/test_collections/matter/scripts/OTBR/otbr_srp_restart.sh @@ -14,7 +14,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -ROOT_DIR=$(realpath $(dirname "$0")/../..) +ROOT_DIR=$(realpath $(dirname "$0")/../../../../..) TH_SCRIPTS_DIR="$ROOT_DIR/scripts" source "$TH_SCRIPTS_DIR/utils.sh" diff --git a/test_collections/matter/scripts/OTBR/otbr_start.sh b/test_collections/matter/scripts/OTBR/otbr_start.sh index a24f14b9..16306656 100755 --- a/test_collections/matter/scripts/OTBR/otbr_start.sh +++ b/test_collections/matter/scripts/OTBR/otbr_start.sh @@ -14,11 +14,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -ROOT_DIR=$(realpath $(dirname "$0")/../..) +ROOT_DIR=$(realpath $(dirname "$0")/../../../../..) TH_SCRIPTS_DIR="$ROOT_DIR/scripts" DEFAULT_OTBR_INTERFACE="eth0" -BR_INTERFACE=${1:-$DEAFULT_OTBR_INTERFACE} +BR_INTERFACE=${1:-$DEFAULT_OTBR_INTERFACE} BR_VARIANT="35" BR_CHANNEL=25 BR_IMAGE_BASE="nrfconnect/otbr" diff --git a/test_collections/matter/scripts/OTBR/otbr_stop.sh b/test_collections/matter/scripts/OTBR/otbr_stop.sh index 59c096a2..58b9c280 100755 --- a/test_collections/matter/scripts/OTBR/otbr_stop.sh +++ b/test_collections/matter/scripts/OTBR/otbr_stop.sh @@ -14,7 +14,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -ROOT_DIR=$(realpath $(dirname "$0")/../..) +ROOT_DIR=$(realpath $(dirname "$0")/../../../../..) TH_SCRIPTS_DIR="$ROOT_DIR/scripts" source "$TH_SCRIPTS_DIR/utils.sh" diff --git a/test_collections/matter/scripts/validate_sdk_python_tests_scripts.sh b/test_collections/matter/scripts/validate_sdk_python_tests_scripts.sh new file mode 100755 index 00000000..e4245ab4 --- /dev/null +++ b/test_collections/matter/scripts/validate_sdk_python_tests_scripts.sh @@ -0,0 +1,67 @@ +#! /usr/bin/env bash + + # + # Copyright (c) 2024 Project CHIP Authors + # + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + +MATTER_PROGRAM_DIR=$(realpath $(dirname "$0")/..) + +if [ $# == 1 ]; then + SDK_SHA=$1 +elif [ $# == 0 ]; then + # Get configured SDK_SHA (will default to value in test_collection/matter/config.py) + SDK_SHA=$(cat $MATTER_PROGRAM_DIR/config.py | grep SDK_SHA | cut -d'"' -f 2 | cut -d"'" -f 2) +else + echo "Usage:" + echo "./scripts/validate_sdk_python_tests_scripts.sh [sdk_sha]" + echo "Optional: The SDK SHA to checkout the Python Test Scripts" + exit 1 +fi + +printf "Using SDK SHA: $SDK_SHA\n" + +create_checkout_dir() +{ + temp_dir="/tmp/SDKPythonTestValidation-$SDK_SHA" + if [ -d "$temp_dir" ]; then + rm -rf $temp_dir + fi + + mkdir "$temp_dir" + echo "$temp_dir" +} + +CHECKOUT_DIR=$(create_checkout_dir) +SDK_PYTHON_SCRIPT_PATH="src/python_testing" +PYTHON_SCRIPT_PATH="$CHECKOUT_DIR/$SDK_PYTHON_SCRIPT_PATH" +VALIDATION_SCRIPT="$MATTER_PROGRAM_DIR/sdk_tests/support/python_testing/validate_python_test_scripts.py" +LOG_FILE="Log-$SDK_SHA.txt" + +# Checkout SDK sparsely +cd $CHECKOUT_DIR +git clone --filter=blob:none --no-checkout --depth 1 --sparse https://github.com/project-chip/connectedhomeip.git $CHECKOUT_DIR +git sparse-checkout init +git sparse-checkout set $SDK_PYTHON_SCRIPT_PATH +git checkout -q $SDK_SHA + +python_scripts=() +for script in $PYTHON_SCRIPT_PATH/*.py +do + python_scripts+=("$script") +done + +DRY_RUN=1 python $VALIDATION_SCRIPT "$LOG_FILE" "${python_scripts[@]}" + +printf "Please check the log file: $CHECKOUT_DIR/$LOG_FILE\n" + diff --git a/test_collections/matter/sdk_tests/scripts/fetch_sdk_tests_and_runner.sh b/test_collections/matter/sdk_tests/scripts/fetch_sdk_tests_and_runner.sh index 5012f352..12daa993 100755 --- a/test_collections/matter/sdk_tests/scripts/fetch_sdk_tests_and_runner.sh +++ b/test_collections/matter/sdk_tests/scripts/fetch_sdk_tests_and_runner.sh @@ -30,7 +30,6 @@ TMP_SDK_PATH="/tmp/$TMP_SDK_FOLDER" SDK_YAML_PATH="src/app/tests/suites/certification" SDK_PYTHON_SCRIPT_PATH="src/python_testing" -SDK_PYTHON_DATA_MODEL_PATH="data_model" SDK_SCRIPTS_PATH="scripts/" SDK_EXAMPLE_CHIP_TOOL_PATH="examples/chip-tool" SDK_EXAMPLE_PLACEHOLDER_PATH="examples/placeholder" @@ -101,7 +100,7 @@ then git clone --filter=blob:none --no-checkout --depth 1 --sparse https://github.com/project-chip/connectedhomeip.git $TMP_SDK_FOLDER cd $TMP_SDK_FOLDER git sparse-checkout init - git sparse-checkout set $SDK_YAML_PATH $SDK_SCRIPTS_PATH $SDK_EXAMPLE_PLACEHOLDER_PATH $SDK_EXAMPLE_CHIP_TOOL_PATH $SDK_DATA_MODEL_PATH $SDK_PYTHON_SCRIPT_PATH $SDK_PYTHON_DATA_MODEL_PATH + git sparse-checkout set $SDK_YAML_PATH $SDK_SCRIPTS_PATH $SDK_EXAMPLE_PLACEHOLDER_PATH $SDK_EXAMPLE_CHIP_TOOL_PATH $SDK_DATA_MODEL_PATH $SDK_PYTHON_SCRIPT_PATH git checkout -q $SDK_SHA SDK_PATH="$TMP_SDK_PATH" fi @@ -131,11 +130,6 @@ cp * "$SDK_YAML_DIR_YAML_TEST_COLLECTION_PATH/" cd "$SDK_PATH/$SDK_PYTHON_SCRIPT_PATH" cp -R * "$PYTHON_TESTING_SCRIPTS_TEST_COLLECTION_PATH/" -# Copy XML data models for SDK Python Testing -cd "$SDK_PATH/$SDK_PYTHON_DATA_MODEL_PATH" -mkdir -p "$PYTHON_TESTING_TEST_COLLECTION_PATH/data_model" -cp -R * "$PYTHON_TESTING_TEST_COLLECTION_PATH/data_model" - ### # Extract sdk runner and dependencies ### diff --git a/test_collections/matter/sdk_tests/support/chip/chip_server.py b/test_collections/matter/sdk_tests/support/chip/chip_server.py index 1a3decd2..bd27147b 100644 --- a/test_collections/matter/sdk_tests/support/chip/chip_server.py +++ b/test_collections/matter/sdk_tests/support/chip/chip_server.py @@ -15,7 +15,6 @@ # from __future__ import annotations -from asyncio import sleep from datetime import datetime from enum import Enum from pathlib import Path @@ -101,10 +100,6 @@ async def __wait_for_server_start(self, log_generator: Generator) -> bool: self.logger.log(CHIPTOOL_LEVEL, line) return True self.logger.log(CHIPTOOL_LEVEL, line) - # TODO - Workaround for truncated log from chip-tool - # https://github.com/project-chip/connectedhomeip/issues/34237 - await sleep(1) - return True else: return False diff --git a/test_collections/matter/sdk_tests/support/python_testing/__init__.py b/test_collections/matter/sdk_tests/support/python_testing/__init__.py index 9895805e..6ffac78f 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/__init__.py +++ b/test_collections/matter/sdk_tests/support/python_testing/__init__.py @@ -13,16 +13,27 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from typing import Optional +import os -from app.test_engine.models.test_declarations import TestCollectionDeclaration +# Verify if this execution comes from python_tests_validator. +if not os.getenv("DRY_RUN"): + from typing import Optional -from .sdk_python_tests import custom_python_test_collection, sdk_python_test_collection + from app.test_engine.models.test_declarations import TestCollectionDeclaration -# Test engine will auto load TestCollectionDeclarations declared inside the package -# initializer -sdk_python_collection: TestCollectionDeclaration = sdk_python_test_collection() + from .sdk_python_tests import ( + custom_python_test_collection, + sdk_mandatory_python_test_collection, + sdk_python_test_collection, + ) -custom_python_collection: Optional[ - TestCollectionDeclaration -] = custom_python_test_collection() + # Test engine will auto load TestCollectionDeclarations declared inside the package + # initializer + sdk_python_collection: TestCollectionDeclaration = sdk_python_test_collection() + sdk_mandatory_python_collection: TestCollectionDeclaration = ( + sdk_mandatory_python_test_collection() + ) + + custom_python_collection: Optional[ + TestCollectionDeclaration + ] = custom_python_test_collection() diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_models.py b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_models.py index c5e6991e..cd5cf9a6 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_models.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_models.py @@ -28,9 +28,11 @@ class PythonTestType(Enum): # - PythonTestType.NO_COMMISSIONING: test cases that follow the expected template # but don't have a commissioning first step # - PythonTestType.LEGACY: test cases that don't follow the expected template + # - PythonTestType.MANDATORY: test cases that are mandatory COMMISSIONING = 1 NO_COMMISSIONING = 2 LEGACY = 3 + MANDATORY = 4 class PythonTest(MatterTest): diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py index c8f6fe1d..4fd3539d 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/python_test_parser.py @@ -29,6 +29,13 @@ FunctionDefType = Union[ast.FunctionDef, ast.AsyncFunctionDef] +mandatory_python_tcs_public_id = [ + "TC_IDM_10_2", + "TC_IDM_10_3", + "TC_IDM_10_4", + "TC_IDM_12_1", +] + def parse_python_script(path: Path) -> list[PythonTest]: """Parse a python file into a list of PythonTest models. @@ -174,10 +181,14 @@ def __parse_test_case( # - PythonTestType.NO_COMMISSIONING: test cases that follow the expected template # but don't have a commissioning first step # - PythonTestType.LEGACY: test cases that don't follow the expected template + # - PythonTestType.MANDATORY: Mandatory test cases # We use the desc_[test_name] method as an indicator that the test case follows the # expected template python_test_type = PythonTestType.LEGACY - if tc_is_commissioning: + + if tc_name in mandatory_python_tcs_public_id: + python_test_type = PythonTestType.MANDATORY + elif tc_is_commissioning: python_test_type = PythonTestType.COMMISSIONING elif desc_method: python_test_type = PythonTestType.NO_COMMISSIONING diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/python_testing_hooks_proxy.py b/test_collections/matter/sdk_tests/support/python_testing/models/python_testing_hooks_proxy.py index 8e16dae6..b74dd7a2 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/python_testing_hooks_proxy.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/python_testing_hooks_proxy.py @@ -26,6 +26,7 @@ class SDKPythonTestResultEnum(str, Enum): STOP = "stop" TEST_START = "test_start" TEST_STOP = "test_stop" + TEST_SKIPPED = "test_skipped" STEP_SKIPPED = "step_skipped" STEP_START = "step_start" STEP_SUCCESS = "step_success" @@ -62,10 +63,16 @@ class SDKPythonTestResultTestStart(SDKPythonTestResultBase): class SDKPythonTestResultTestStop(SDKPythonTestResultBase): type = SDKPythonTestResultEnum.TEST_STOP - duration: Optional[str] + duration: Optional[int] exception: Any +class SDKPythonTestResultTestSkipped(SDKPythonTestResultBase): + type = SDKPythonTestResultEnum.TEST_SKIPPED + filename: Optional[str] + name: Optional[str] + + class SDKPythonTestResultStepSkipped(SDKPythonTestResultBase): type = SDKPythonTestResultEnum.STEP_SKIPPED name: Optional[str] @@ -145,12 +152,12 @@ def test_start( def test_stop(self, exception: Exception, duration: int) -> None: self.results.put( - SDKPythonTestResultTestStop( - exception=exception, - duration=duration, - ) + SDKPythonTestResultTestStop(exception=exception, duration=duration) ) + def test_skipped(self, filename: str, name: str) -> None: + self.results.put(SDKPythonTestResultTestSkipped(filename=filename, name=name)) + def step_skipped(self, name: str, expression: str) -> None: self.results.put(SDKPythonTestResultStepSkipped(expression=expression)) diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/test_case.py b/test_collections/matter/sdk_tests/support/python_testing/models/test_case.py index ad0f43c3..8deda8c5 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/test_case.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/test_case.py @@ -133,15 +133,18 @@ def test_start( def test_stop(self, exception: Exception, duration: int) -> None: self.test_stop_called = True + def test_skipped(self, filename: str, name: str) -> None: + self.mark_as_not_applicable() + self.skip_to_last_step() + def step_skipped(self, name: str, expression: str) -> None: self.current_test_step.mark_as_not_applicable("Test step skipped") - self.step_over() def step_start(self, name: str) -> None: - pass + self.step_over() def step_success(self, logger: Any, logs: str, duration: int, request: Any) -> None: - self.step_over() + pass def step_failure( self, logger: Any, logs: str, duration: int, request: Any, received: Any @@ -185,23 +188,30 @@ def pics(cls) -> set[str]: return cls.python_test.PICS @classmethod - def class_factory(cls, test: PythonTest, python_test_version: str) -> Type[T]: + def class_factory( + cls, test: PythonTest, python_test_version: str, mandatory: bool + ) -> Type[T]: """Dynamically declares a subclass based on the type of Python test.""" case_class: Type[PythonTestCase] if test.python_test_type == PythonTestType.NO_COMMISSIONING: case_class = NoCommissioningPythonTestCase - elif test.python_test_type == PythonTestType.LEGACY: + elif ( + test.python_test_type == PythonTestType.LEGACY + or test.python_test_type == PythonTestType.MANDATORY + ): case_class = LegacyPythonTestCase else: # Commissioning case_class = PythonTestCase return case_class.__class_factory( - test=test, python_test_version=python_test_version + test=test, python_test_version=python_test_version, mandatory=mandatory ) @classmethod - def __class_factory(cls, test: PythonTest, python_test_version: str) -> Type[T]: + def __class_factory( + cls, test: PythonTest, python_test_version: str, mandatory: bool + ) -> Type[T]: """class factory method for PythonTestCase.""" title = cls.__title(test.name) class_name = cls.__class_name(test.name) @@ -219,6 +229,7 @@ def __class_factory(cls, test: PythonTest, python_test_version: str) -> Type[T]: "version": "0.0.1", "title": title, "description": test.description, + "mandatory": mandatory, }, }, ) @@ -320,6 +331,9 @@ async def execute(self) -> None: # have already been moved to the correct step by the hooks' step methods. if self.python_test.python_test_type == PythonTestType.LEGACY: self.next_step() + + if self.current_test_step_index < len(self.test_steps) - 1: + self.skip_to_last_step() logger.info("---- Start of Python test logs ----") self.handle_logs_temp() diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/test_declarations.py b/test_collections/matter/sdk_tests/support/python_testing/models/test_declarations.py index 38e7be03..f8ff26d8 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/test_declarations.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/test_declarations.py @@ -28,8 +28,10 @@ class PythonCollectionDeclaration(TestCollectionDeclaration): - def __init__(self, folder: SDKTestFolder, name: str) -> None: - super().__init__(path=str(folder.path), name=name) + def __init__( + self, folder: SDKTestFolder, name: str, mandatory: bool = False + ) -> None: + super().__init__(path=str(folder.path), name=name, mandatory=mandatory) self.python_test_version = folder.version @@ -38,13 +40,21 @@ class PythonSuiteDeclaration(TestSuiteDeclaration): class_ref: Type[PythonTestSuite] - def __init__(self, name: str, suite_type: SuiteType, version: str) -> None: + def __init__( + self, + name: str, + suite_type: SuiteType, + version: str, + mandatory: bool = False, + ) -> None: super().__init__( PythonTestSuite.class_factory( name=name, suite_type=suite_type, python_test_version=version, - ) + mandatory=mandatory, + ), + mandatory=mandatory, ) @@ -53,10 +63,12 @@ class PythonCaseDeclaration(TestCaseDeclaration): class_ref: Type[PythonTestCase] - def __init__(self, test: PythonTest, python_test_version: str) -> None: + def __init__( + self, test: PythonTest, python_test_version: str, mandatory: bool + ) -> None: super().__init__( PythonTestCase.class_factory( - test=test, python_test_version=python_test_version + test=test, python_test_version=python_test_version, mandatory=mandatory ) ) diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/test_suite.py b/test_collections/matter/sdk_tests/support/python_testing/models/test_suite.py index 92a609e6..fbf1258b 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/test_suite.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/test_suite.py @@ -30,6 +30,7 @@ class SuiteType(Enum): COMMISSIONING = 1 NO_COMMISSIONING = 2 LEGACY = 3 + MANDATORY = 4 # Custom Type variable used to annotate the factory methods of classmethod. @@ -49,7 +50,7 @@ class PythonTestSuite(TestSuite): @classmethod def class_factory( - cls, suite_type: SuiteType, name: str, python_test_version: str + cls, suite_type: SuiteType, name: str, python_test_version: str, mandatory: bool ) -> Type[T]: """Dynamically declares a subclass based on the type of test suite.""" suite_class: Type[PythonTestSuite] @@ -60,11 +61,13 @@ def class_factory( suite_class = PythonTestSuite return suite_class.__class_factory( - name=name, python_test_version=python_test_version + name=name, python_test_version=python_test_version, mandatory=mandatory ) @classmethod - def __class_factory(cls, name: str, python_test_version: str) -> Type[T]: + def __class_factory( + cls, name: str, python_test_version: str, mandatory: bool + ) -> Type[T]: """Common class factory method for all subclasses of PythonTestSuite.""" return type( @@ -80,6 +83,7 @@ def __class_factory(cls, name: str, python_test_version: str) -> Type[T]: "version": "0.0.1", "title": name, "description": name, + "mandatory": mandatory, }, }, ) diff --git a/test_collections/matter/sdk_tests/support/python_testing/models/utils.py b/test_collections/matter/sdk_tests/support/python_testing/models/utils.py index 477112ca..9a62f6a1 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/models/utils.py +++ b/test_collections/matter/sdk_tests/support/python_testing/models/utils.py @@ -46,17 +46,26 @@ def generate_command_arguments( # Increase log level by adding trace log if dut_config.trace_log: arguments.append("--trace-to json:log") - # Retrieve arguments from dut_config - arguments.append(f"--discriminator {dut_config.discriminator}") - arguments.append(f"--passcode {dut_config.setup_code}") + if not omit_commissioning_method: arguments.append(f"--commissioning-method {pairing_mode}") # Retrieve arguments from test_parameters if test_parameters: + # If manual-code, discriminator and passcode are provided, the test will think + # that we're trying to commission 2 DUTs and it will fail + if "manual-code" not in test_parameters.keys(): + # Retrieve arguments from dut_config + arguments.append(f"--discriminator {dut_config.discriminator}") + arguments.append(f"--passcode {dut_config.setup_code}") + for name, value in test_parameters.items(): arg_value = str(value) if value is not None else "" arguments.append(f"--{name} {arg_value}") + else: + # Retrieve arguments from dut_config + arguments.append(f"--discriminator {dut_config.discriminator}") + arguments.append(f"--passcode {dut_config.setup_code}") return arguments diff --git a/test_collections/matter/sdk_tests/support/python_testing/sdk_python_tests.py b/test_collections/matter/sdk_tests/support/python_testing/sdk_python_tests.py index cdc5289c..5bd9a620 100644 --- a/test_collections/matter/sdk_tests/support/python_testing/sdk_python_tests.py +++ b/test_collections/matter/sdk_tests/support/python_testing/sdk_python_tests.py @@ -53,6 +53,12 @@ def _init_test_suites( python_test_version: str, ) -> dict[SuiteType, PythonSuiteDeclaration]: return { + SuiteType.MANDATORY: PythonSuiteDeclaration( + name="Python Testing Suite - Mandatories", + suite_type=SuiteType.MANDATORY, + version=python_test_version, + mandatory=True, + ), SuiteType.COMMISSIONING: PythonSuiteDeclaration( name="Python Testing Suite", suite_type=SuiteType.COMMISSIONING, @@ -77,15 +83,18 @@ def _parse_python_script_to_test_case_declarations( python_tests = parse_python_script(python_test_path) return [ - PythonCaseDeclaration(test=python_test, python_test_version=python_test_version) + PythonCaseDeclaration( + test=python_test, + python_test_version=python_test_version, + mandatory=python_test.python_test_type == PythonTestType.MANDATORY, + ) for python_test in python_tests ] -def _parse_all_sdk_python_tests( - python_test_files: list[Path], python_test_version: str +def __parse_python_tests( + python_test_files: list[Path], python_test_version: str, mandatory: bool ) -> list[PythonSuiteDeclaration]: - """Parse all python test files and add them into Automated Suite""" suites = _init_test_suites(python_test_version) for python_test_file in python_test_files: @@ -96,28 +105,34 @@ def _parse_all_sdk_python_tests( for test_case in test_cases: python_test_type = test_case.class_ref.python_test.python_test_type - if python_test_type == PythonTestType.COMMISSIONING: - suites[SuiteType.COMMISSIONING].add_test_case(test_case) - elif python_test_type == PythonTestType.NO_COMMISSIONING: - suites[SuiteType.NO_COMMISSIONING].add_test_case(test_case) + if mandatory: + if python_test_type == PythonTestType.MANDATORY: + suites[SuiteType.MANDATORY].add_test_case(test_case) else: - suites[SuiteType.LEGACY].add_test_case(test_case) + if python_test_type == PythonTestType.COMMISSIONING: + suites[SuiteType.COMMISSIONING].add_test_case(test_case) + elif python_test_type == PythonTestType.NO_COMMISSIONING: + suites[SuiteType.NO_COMMISSIONING].add_test_case(test_case) + elif python_test_type != PythonTestType.MANDATORY: + suites[SuiteType.LEGACY].add_test_case(test_case) return [s for s in list(suites.values()) if len(s.test_cases) != 0] -def sdk_python_test_collection( - python_test_folder: SDKTestFolder = SDK_PYTHON_TEST_FOLDER, +def __sdk_python_test_collection( + name: str, python_test_folder: SDKTestFolder, mandatory: bool ) -> PythonCollectionDeclaration: - """Declare a new collection of test suites.""" collection = PythonCollectionDeclaration( - name="SDK Python Tests", folder=python_test_folder + name=name, folder=python_test_folder, mandatory=mandatory ) - files = python_test_folder.file_paths(extension=".py") - version = python_test_folder.version - suites = _parse_all_sdk_python_tests( - python_test_files=files, python_test_version=version + python_test_files = python_test_folder.file_paths(extension=".py") + python_test_version = python_test_folder.version + + suites = __parse_python_tests( + python_test_files=python_test_files, + python_test_version=python_test_version, + mandatory=mandatory, ) for suite in suites: @@ -127,6 +142,26 @@ def sdk_python_test_collection( return collection +def sdk_python_test_collection( + python_test_folder: SDKTestFolder = SDK_PYTHON_TEST_FOLDER, +) -> PythonCollectionDeclaration: + """Declare a new collection of test suites.""" + return __sdk_python_test_collection( + name="SDK Python Tests", python_test_folder=python_test_folder, mandatory=False + ) + + +def sdk_mandatory_python_test_collection( + python_test_folder: SDKTestFolder = SDK_PYTHON_TEST_FOLDER, +) -> PythonCollectionDeclaration: + """Declare a new collection of test suites.""" + return __sdk_python_test_collection( + name="Mandatory SDK Python Tests", + python_test_folder=python_test_folder, + mandatory=True, + ) + + def custom_python_test_collection( python_test_folder: SDKTestFolder = CUSTOM_PYTHON_TEST_FOLDER, ) -> Optional[PythonCollectionDeclaration]: @@ -135,9 +170,11 @@ def custom_python_test_collection( name="Custom SDK Python Tests", folder=python_test_folder ) - files = python_test_folder.file_paths(extension=".py") - suites = _parse_all_sdk_python_tests( - python_test_files=files, python_test_version="custom" + python_test_files = python_test_folder.file_paths(extension=".py") + suites = __parse_python_tests( + python_test_files=python_test_files, + python_test_version="custom", + mandatory=False, ) for suite in suites: diff --git a/test_collections/matter/sdk_tests/support/python_testing/validate_python_test_scripts.py b/test_collections/matter/sdk_tests/support/python_testing/validate_python_test_scripts.py new file mode 100644 index 00000000..16d9121b --- /dev/null +++ b/test_collections/matter/sdk_tests/support/python_testing/validate_python_test_scripts.py @@ -0,0 +1,53 @@ +import sys +from pathlib import Path + +from app.test_engine.logger import test_engine_logger as logger +from test_collections.matter.sdk_tests.support.python_testing.models.python_test_models import ( + PythonTest, + PythonTestType, +) +from test_collections.matter.sdk_tests.support.python_testing.models.python_test_parser import ( + parse_python_script, +) + + +def log_message(message: str, break_line: bool = True) -> None: + text = f">>>>>>>>>> {message} <<<<<<<<<<" + if break_line: + text += "\n\n" + + logger.info(text) + + +def log_parsed_scripts(type: PythonTestType) -> None: + log_message(f"{type.name} rated scripts", False) + for script in parsed_scripts[type]: + logger.info(script) + log_message(f"{type.name} rated scripts") + + +log_file = sys.argv[1] +script_paths = sys.argv[2:] + +logger.add(log_file, format="{message}") +log_message("Starting script parsing. Errors will be listed below (If any)", False) + +parsed_scripts: dict[PythonTestType, list[str]] = {} +parsed_scripts[PythonTestType.COMMISSIONING] = [] +parsed_scripts[PythonTestType.NO_COMMISSIONING] = [] +parsed_scripts[PythonTestType.LEGACY] = [] +parsed_scripts[PythonTestType.MANDATORY] = [] + +for script in script_paths: + path: Path = Path(script) + result: list[PythonTest] = parse_python_script(path) + for parsed in result: + parsed_scripts[parsed.python_test_type].append( + f"Name: {parsed.class_name}, Description: {parsed.description}" + ) + +log_message("Script parsing finished") +log_message("All scripts were analyzed and separated into categories") + +for key in parsed_scripts: + log_parsed_scripts(key) diff --git a/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_case.py b/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_case.py index 95002676..a24e1c42 100644 --- a/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_case.py +++ b/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_case.py @@ -64,7 +64,7 @@ def test_python_test_name() -> None: # Create a subclass of PythonTest case_class: Type[PythonTestCase] = PythonTestCase.class_factory( - test=test, python_test_version="version" + test=test, python_test_version="version", mandatory=False ) assert case_class.metadata["title"] == name assert case_class.metadata["description"] == description @@ -76,7 +76,7 @@ def test_python_test_python_version() -> None: python_test_version = "best_version" # Create a subclass of PythonTest case_class: Type[PythonTestCase] = PythonTestCase.class_factory( - test=test, python_test_version=python_test_version + test=test, python_test_version=python_test_version, mandatory=False ) assert case_class.python_test_version == python_test_version @@ -86,7 +86,7 @@ def test_python_test_python() -> None: test = python_test_instance() # Create a subclass of PythonTest case_class: Type[PythonTestCase] = PythonTestCase.class_factory( - test=test, python_test_version="version" + test=test, python_test_version="version", mandatory=False ) assert case_class.python_test is test @@ -99,7 +99,7 @@ def test_python_test_case_class_pics() -> None: # Create a subclass of PythonTest case_class: Type[PythonTestCase] = PythonTestCase.class_factory( - test=test, python_test_version="version" + test=test, python_test_version="version", mandatory=False ) assert case_class.pics() == test_PICS @@ -113,11 +113,25 @@ def test_class_factory_test_public_id() -> None: for data in test_data: test = python_test_instance(name=data["name"]) case_class: Type[PythonTestCase] = PythonTestCase.class_factory( - test=test, python_test_version="version" + test=test, python_test_version="version", mandatory=False ) assert case_class.metadata["public_id"] == data["public_id"] +def test_class_factory_mandatory() -> None: + """Test that checks that metadata mandatory field is set""" + test_data = [ + {"name": "TC-AB-1.2", "class_name": "TC_AB_1_2"}, + {"name": "TC-CD-3.4", "class_name": "TC_CD_3_4"}, + ] + for data in test_data: + test = python_test_instance(name=data["name"]) + case_class: Type[PythonTestCase] = PythonTestCase.class_factory( + test=test, python_test_version="version", mandatory=True + ) + assert case_class.metadata["mandatory"] == True # type: ignore + + def test_class_factory_test_class_name() -> None: """Test that class factory correctly finds identifier 'TC-XX-1.1', convert it to a safe class name, eg TC_XX_1_1""" @@ -128,7 +142,7 @@ def test_class_factory_test_class_name() -> None: for data in test_data: test = python_test_instance(name=data["name"]) case_class: Type[PythonTestCase] = PythonTestCase.class_factory( - test=test, python_test_version="version" + test=test, python_test_version="version", mandatory=True ) assert case_class.__name__ == data["class_name"] @@ -140,7 +154,7 @@ async def test_python_version_logging() -> None: test = python_test_instance(type=type) test_python_version = "PythonVersionTest" case_class: Type[PythonTestCase] = PythonTestCase.class_factory( - test=test, python_test_version=test_python_version + test=test, python_test_version=test_python_version, mandatory=False ) instance = case_class(TestCaseExecution()) @@ -158,7 +172,7 @@ def test_normal_steps_for_python_tests() -> None: test_step = MatterTestStep(label="Step1") test = python_test_instance(type=type, steps=[test_step]) case_class: Type[PythonTestCase] = PythonTestCase.class_factory( - test=test, python_test_version="version" + test=test, python_test_version="version", mandatory=False ) instance = case_class(TestCaseExecution()) # Assert normal step is present @@ -173,7 +187,7 @@ def test_multiple_steps_for_python_tests() -> None: no_steps = 5 test = python_test_instance(type=type, steps=([test_step] * no_steps)) case_class: Type[PythonTestCase] = PythonTestCase.class_factory( - test=test, python_test_version="version" + test=test, python_test_version="version", mandatory=False ) instance = case_class(TestCaseExecution()) diff --git a/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_declarations.py b/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_declarations.py index 191f8681..dde09d28 100644 --- a/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_declarations.py +++ b/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_declarations.py @@ -37,7 +37,7 @@ def test_python_suite_declaration() -> None: ) as declaration_init: PythonSuiteDeclaration(name=name, suite_type=type, version=version) class_factory.assert_called_once_with( - name=name, suite_type=type, python_test_version=version + name=name, suite_type=type, python_test_version=version, mandatory=False ) declaration_init.assert_called_once() @@ -50,6 +50,7 @@ def test_python_case_declaration() -> None: steps=[], class_name="TC_TestTest", python_test_type=PythonTestType.COMMISSIONING, + mandatory=True, ) version = "SomeVersionStr" with mock.patch( @@ -57,6 +58,8 @@ def test_python_case_declaration() -> None: ) as class_factory, mock.patch( "app.test_engine.models.test_declarations.TestCaseDeclaration.__init__" ) as declaration_init: - PythonCaseDeclaration(test=test, python_test_version=version) - class_factory.assert_called_once_with(test=test, python_test_version=version) + PythonCaseDeclaration(test=test, python_test_version=version, mandatory=True) + class_factory.assert_called_once_with( + test=test, python_test_version=version, mandatory=True + ) declaration_init.assert_called_once() diff --git a/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_suite.py b/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_suite.py index bf6fe49b..6dfd0858 100644 --- a/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_suite.py +++ b/test_collections/matter/sdk_tests/support/tests/python_tests/test_python_test_suite.py @@ -40,13 +40,36 @@ def test_python_suite_class_factory_name() -> None: # Create a subclass of PythonTestSuite suite_class: Type[PythonTestSuite] = PythonTestSuite.class_factory( - suite_type=SuiteType.COMMISSIONING, name=name, python_test_version="version" + suite_type=SuiteType.COMMISSIONING, + name=name, + python_test_version="version", + mandatory=False, ) assert suite_class.__name__ == name assert suite_class.public_id() == name assert suite_class.metadata["title"] == name assert suite_class.metadata["description"] == name + assert suite_class.metadata["mandatory"] == False # type: ignore + + +def test_python_suite_class_factory_name_mandatory() -> None: + """Test that test mandatory field is set.""" + name = "AnotherTestSuite" + + # Create a subclass of PythonTestSuite + suite_class: Type[PythonTestSuite] = PythonTestSuite.class_factory( + suite_type=SuiteType.COMMISSIONING, + name=name, + python_test_version="version", + mandatory=True, + ) + + assert suite_class.__name__ == name + assert suite_class.public_id() == name + assert suite_class.metadata["title"] == name + assert suite_class.metadata["description"] == name + assert suite_class.metadata["mandatory"] == True # type: ignore def test_python_test_suite_python_version() -> None: @@ -57,6 +80,7 @@ def test_python_test_suite_python_version() -> None: suite_type=SuiteType.COMMISSIONING, name="SomeSuite", python_test_version=python_test_version, + mandatory=False, ) assert suite_class.python_test_version == python_test_version @@ -68,7 +92,10 @@ def test_commissioning_suite_subclass() -> None: type = SuiteType.COMMISSIONING # Create a subclass of PythonTestSuite suite_class: Type[PythonTestSuite] = PythonTestSuite.class_factory( - suite_type=type, name="SomeSuite", python_test_version="some_version" + suite_type=type, + name="SomeSuite", + python_test_version="some_version", + mandatory=False, ) assert issubclass(suite_class, CommissioningPythonTestSuite) @@ -82,7 +109,10 @@ async def test_suite_setup_log_python_version() -> None: python_test_version = "best_version" # Create a subclass of PythonTestSuite suite_class: Type[PythonTestSuite] = PythonTestSuite.class_factory( - suite_type=type, name="SomeSuite", python_test_version=python_test_version + suite_type=type, + name="SomeSuite", + python_test_version=python_test_version, + mandatory=False, ) suite_instance = suite_class(TestSuiteExecution()) @@ -121,7 +151,10 @@ async def test_suite_setup_without_pics() -> None: python_test_version = "best_version" # Create a subclass of PythonTestSuite suite_class: Type[PythonTestSuite] = PythonTestSuite.class_factory( - suite_type=type, name="SomeSuite", python_test_version=python_test_version + suite_type=type, + name="SomeSuite", + python_test_version=python_test_version, + mandatory=True, ) suite_instance = suite_class(TestSuiteExecution()) @@ -160,7 +193,10 @@ async def test_suite_setup_with_pics() -> None: python_test_version = "best_version" # Create a subclass of PythonTestSuite suite_class: Type[PythonTestSuite] = PythonTestSuite.class_factory( - suite_type=type, name="SomeSuite", python_test_version=python_test_version + suite_type=type, + name="SomeSuite", + python_test_version=python_test_version, + mandatory=False, ) suite_instance = suite_class(TestSuiteExecution()) @@ -201,6 +237,7 @@ async def test_commissioning_suite_setup_with_pics() -> None: suite_type=SuiteType.COMMISSIONING, name="SomeSuite", python_test_version=python_test_version, + mandatory=False, ) suite_instance = suite_class(TestSuiteExecution()) @@ -237,6 +274,7 @@ async def test_commissioning_suite_setup() -> None: suite_type=SuiteType.COMMISSIONING, name="SomeSuite", python_test_version="Some version", + mandatory=False, ) suite_instance = suite_class(TestSuiteExecution()) diff --git a/test_collections/matter/sdk_tests/support/tests/python_tests/test_utils.py b/test_collections/matter/sdk_tests/support/tests/python_tests/test_utils.py index d7404b47..1611b035 100644 --- a/test_collections/matter/sdk_tests/support/tests/python_tests/test_utils.py +++ b/test_collections/matter/sdk_tests/support/tests/python_tests/test_utils.py @@ -56,9 +56,9 @@ async def test_generate_command_arguments_with_null_value_attribute() -> None: assert [ "--trace-to json:log", + "--commissioning-method on-network", "--discriminator 123", "--passcode 1234", - "--commissioning-method on-network", "--test-argument ", ] == arguments @@ -89,9 +89,9 @@ async def test_generate_command_arguments_on_network() -> None: assert [ "--trace-to json:log", + "--commissioning-method on-network", "--discriminator 123", "--passcode 1234", - "--commissioning-method on-network", "--paa-trust-store-path /paa-root-certs", "--storage_path /root/admin_storage.json", ] == arguments @@ -121,9 +121,9 @@ async def test_generate_command_arguments_ble_wifi() -> None: assert [ "--trace-to json:log", + "--commissioning-method ble-wifi", "--discriminator 147", "--passcode 357", - "--commissioning-method ble-wifi", "--paa-trust-store-path /paa-root-certs", "--storage_path /root/admin_storage.json", ] == arguments @@ -152,9 +152,9 @@ async def test_generate_command_arguments_ble_thread() -> None: ) assert [ "--trace-to json:log", + "--commissioning-method ble-thread", "--discriminator 456", "--passcode 8765", - "--commissioning-method ble-thread", "--paa-trust-store-path /paa-root-certs", "--storage_path /root/admin_storage.json", ] == arguments @@ -181,9 +181,9 @@ async def test_generate_command_arguments_no_test_parameter_informed() -> None: assert [ "--trace-to json:log", + "--commissioning-method ble-thread", "--discriminator 456", "--passcode 8765", - "--commissioning-method ble-thread", ] == arguments @@ -208,9 +208,9 @@ async def test_generate_command_arguments_trace_log_false_informed() -> None: ) assert [ + "--commissioning-method ble-thread", "--discriminator 456", "--passcode 8765", - "--commissioning-method ble-thread", ] == arguments diff --git a/test_collections/matter/sdk_tests/support/yaml_tests/matter_yaml_runner.py b/test_collections/matter/sdk_tests/support/yaml_tests/matter_yaml_runner.py index 7709f021..4e3c755f 100644 --- a/test_collections/matter/sdk_tests/support/yaml_tests/matter_yaml_runner.py +++ b/test_collections/matter/sdk_tests/support/yaml_tests/matter_yaml_runner.py @@ -58,6 +58,7 @@ PAIRING_MODE_ONNETWORK = "onnetwork-long" PAIRING_MODE_BLE_WIFI = "ble-wifi" PAIRING_MODE_BLE_THREAD = "ble-thread" +PAIRING_MODE_WIFIPAF_WIFI = "wifipaf-wifi" PAIRING_MODE_UNPAIR = "unpair" # Websocket runner @@ -292,6 +293,22 @@ async def pairing_ble_wifi( discriminator, ) + async def pairing_wifipaf_wifi( + self, + ssid: str, + password: str, + setup_code: str, + discriminator: str, + ) -> bool: + return await self.pairing( + PAIRING_MODE_WIFIPAF_WIFI, + hex(self.chip_server.node_id), + ssid, + password, + setup_code, + discriminator, + ) + async def pairing_ble_thread( self, hex_dataset: str, diff --git a/test_collections/matter/sdk_tests/support/yaml_tests/models/chip_suite.py b/test_collections/matter/sdk_tests/support/yaml_tests/models/chip_suite.py index f8f5e279..f8da6e89 100644 --- a/test_collections/matter/sdk_tests/support/yaml_tests/models/chip_suite.py +++ b/test_collections/matter/sdk_tests/support/yaml_tests/models/chip_suite.py @@ -32,6 +32,7 @@ from ...chip.chip_server import ChipServerType from ...sdk_container import SDKContainer +from ...utils import prompt_for_commissioning_mode from ...yaml_tests.matter_yaml_runner import MatterYAMLRunner from ...yaml_tests.models.chip_test import PromptOption @@ -85,6 +86,7 @@ async def setup(self) -> None: self.__dut_commissioned_successfully = False if self.server_type == ChipServerType.CHIP_TOOL: logger.info("Commission DUT") + await prompt_for_commissioning_mode(self, logger, None, self.cancel) await self.__commission_dut_allowing_retries() elif self.server_type == ChipServerType.CHIP_APP: logger.info("Verify Test suite prerequisites") @@ -113,6 +115,11 @@ async def __pair_with_dut(self) -> None: self.config_matter.dut_config.pairing_mode is DutPairingModeEnum.BLE_THREAD ): pair_result = await self.__pair_with_dut_ble_thread() + elif ( + self.config_matter.dut_config.pairing_mode + is DutPairingModeEnum.WIFIPAF_WIFI + ): + pair_result = await self.__pair_with_dut_wifipaf_wifi() else: raise DUTCommissioningError("Unsupported DUT pairing mode") @@ -136,6 +143,17 @@ async def __pair_with_dut_ble_wifi(self) -> bool: discriminator=self.config_matter.dut_config.discriminator, ) + async def __pair_with_dut_wifipaf_wifi(self) -> bool: + if self.config_matter.network.wifi is None: + raise DUTCommissioningError("Tool config is missing wifi config.") + + return await self.runner.pairing_wifipaf_wifi( + ssid=self.config_matter.network.wifi.ssid, + password=self.config_matter.network.wifi.password, + setup_code=self.config_matter.dut_config.setup_code, + discriminator=self.config_matter.dut_config.discriminator, + ) + async def __pair_with_dut_ble_thread(self) -> bool: if self.config_matter.network.thread is None: raise DUTCommissioningError("Tool config is missing thread config.") diff --git a/test_collections/matter/setup.sh b/test_collections/matter/setup.sh index 1b77049c..1d17a633 100755 --- a/test_collections/matter/setup.sh +++ b/test_collections/matter/setup.sh @@ -24,64 +24,29 @@ print_start_of_script print_script_step "Installing Matter Dependencies" # TODO Comment on what dependency is required for: -UBUNTU_VERSION_NUMBER=$(lsb_release -sr) -if [[ $UBUNTU_VERSION_NUMBER = 22.04 ]]; then - packagelist=( - "apt-transport-https (>=2.4.11)" - "avahi-utils (>=0.8-5ubuntu5.2)" # Matter uses Avahi - "ca-certificates (=20230311ubuntu0.22.04.1)" - "figlet (=2.2.5-3)" - "g++ (=4:11.2.0-1ubuntu1)" - "gcc (=4:11.2.0-1ubuntu1)" - "generate-ninja (=0.0~git20220118.0725d78-1)" - "libavahi-client-dev (=0.8-5ubuntu5.2)" - "libcairo2-dev (=1.16.0-5ubuntu2)" - "libdbus-1-dev (=1.12.20-2ubuntu4.1)" - "libgirepository1.0-dev (=1.72.0-1)" - "libglib2.0-dev (>=2.72.4-0ubuntu2.2)" - "libreadline-dev (=8.1.2-1)" - "libssl-dev (>=3.0.2-0ubuntu1.14)" # Apparently with each update, previous versions of the library are removed - "net-tools (=1.60+git20181103.0eebece-1ubuntu5)" - "ninja-build (=1.10.1-1)" - "npm (=8.5.1~ds-1)" - "pkg-config (=0.29.2-1ubuntu3)" - "software-properties-common (=0.99.22.9)" - "toilet (=0.3-1.4)" - "unzip (>=6.0-26ubuntu3.1)" - ) -elif [[ $UBUNTU_VERSION_NUMBER = 24.04 ]]; then - packagelist=( - "apt-transport-https (>=2.7.14build2)" - "avahi-utils (>=0.8-13ubuntu6)" # Matter uses Avahi - "ca-certificates (=20240203)" - "figlet (=2.2.5-3)" - "g++ (=4:13.2.0-7ubuntu1)" - "gcc (=4:13.2.0-7ubuntu1)" - "generate-ninja (=0.0~git20240221.03d10f1-1)" - "libavahi-client-dev (=0.8-13ubuntu6)" - "libcairo2-dev (=1.18.0-3build1)" - "libdbus-1-dev (=1.14.10-4ubuntu4)" - "libgirepository1.0-dev (=1.80.1-1)" - "libglib2.0-dev (>=2.80.0-6ubuntu3.1)" - "libreadline-dev (=8.2-4build1)" - "libssl-dev (>=3.0.13-0ubuntu3.1)" # Apparently with each update, previous versions of the library are removed - "net-tools (=2.10-0.1ubuntu4)" - "ninja-build (=1.11.1-2)" - "npm (=9.2.0~ds1-2)" - "pkg-config (=1.8.1-2build1)" - "software-properties-common (=0.99.48)" - "toilet (=0.3-1.4build1)" - "unzip (>=6.0-28ubuntu4)" - ) -else - printf "###############################################################\n" - printf "###############################################################\n" - printf "########## This version of Ubuntu is not supported. ###########\n" - printf "###############################################################\n" - printf "###############################################################\n" - exit 1 -fi - +packagelist=( + "apt-transport-https (>=2.7.14build2)" + "avahi-utils (>=0.8-13ubuntu6)" # Matter uses Avahi + "ca-certificates (=20240203)" + "figlet (=2.2.5-3)" + "g++ (=4:13.2.0-7ubuntu1)" + "gcc (=4:13.2.0-7ubuntu1)" + "generate-ninja (=0.0~git20240221.03d10f1-1)" + "libavahi-client-dev (=0.8-13ubuntu6)" + "libcairo2-dev (=1.18.0-3build1)" + "libdbus-1-dev (=1.14.10-4ubuntu4)" + "libgirepository1.0-dev (=1.80.1-1)" + "libglib2.0-dev (>=2.80.0-6ubuntu3.1)" + "libreadline-dev (=8.2-4build1)" + "libssl-dev (>=3.0.13-0ubuntu3.1)" # Apparently with each update, previous versions of the library are removed + "net-tools (=2.10-0.1ubuntu4)" + "ninja-build (=1.11.1-2)" + "npm (=9.2.0~ds1-2)" + "pkg-config (=1.8.1-2build1)" + "software-properties-common (=0.99.48)" + "toilet (=0.3-1.4build1)" + "unzip (>=6.0-28ubuntu4)" +) SAVEIFS=$IFS IFS=$(echo -en "\r") diff --git a/test_collections/matter/test_environment_config.py b/test_collections/matter/test_environment_config.py index a9d0c5bc..aa196c48 100644 --- a/test_collections/matter/test_environment_config.py +++ b/test_collections/matter/test_environment_config.py @@ -29,6 +29,7 @@ class DutPairingModeEnum(str, Enum): ON_NETWORK = "onnetwork" BLE_WIFI = "ble-wifi" BLE_THREAD = "ble-thread" + WIFIPAF_WIFI = "wifipaf-wifi" class WiFiConfig(BaseModel): diff --git a/test_collections/sample_tests/sample_suite_3_mandatory/__init__.py b/test_collections/sample_tests/sample_suite_3_mandatory/__init__.py new file mode 100644 index 00000000..a5667a82 --- /dev/null +++ b/test_collections/sample_tests/sample_suite_3_mandatory/__init__.py @@ -0,0 +1,16 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .sample_suite_3_mandatory import SampleTestSuite3Mandatory diff --git a/test_collections/sample_tests/sample_suite_3_mandatory/sample_suite_3_mandatory.py b/test_collections/sample_tests/sample_suite_3_mandatory/sample_suite_3_mandatory.py new file mode 100644 index 00000000..2278ce96 --- /dev/null +++ b/test_collections/sample_tests/sample_suite_3_mandatory/sample_suite_3_mandatory.py @@ -0,0 +1,33 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from app.test_engine.logger import test_engine_logger as logger +from app.test_engine.models import TestSuite + + +class SampleTestSuite3Mandatory(TestSuite): + metadata = { + "public_id": "SampleTestSuite3Mandatory", + "version": "7.6.5", + "title": "This is Test Suite 3 Mandatory with version 7.6.5", + "description": "This is Test Suite 3 Mandatory, It is a mandatory suite", + "mandatory": True, # type: ignore + } + + async def setup(self) -> None: + logger.info("This is a test setup") + + async def cleanup(self) -> None: + logger.info("This is a test cleanup") diff --git a/test_collections/sample_tests/sample_suite_3_mandatory/tcss3001/__init__.py b/test_collections/sample_tests/sample_suite_3_mandatory/tcss3001/__init__.py new file mode 100644 index 00000000..2078ac0f --- /dev/null +++ b/test_collections/sample_tests/sample_suite_3_mandatory/tcss3001/__init__.py @@ -0,0 +1,16 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .tcss3001 import TCSS3001 diff --git a/test_collections/sample_tests/sample_suite_3_mandatory/tcss3001/tcss3001.py b/test_collections/sample_tests/sample_suite_3_mandatory/tcss3001/tcss3001.py new file mode 100644 index 00000000..fb95fce1 --- /dev/null +++ b/test_collections/sample_tests/sample_suite_3_mandatory/tcss3001/tcss3001.py @@ -0,0 +1,48 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from app.test_engine.logger import test_engine_logger as logger +from app.test_engine.models import TestCase, TestStep + + +class TCSS3001(TestCase): + metadata = { + "public_id": "TCSS3001", + "version": "1.2.3", + "title": "This is Test Case tcss3001", + "description": """This is Test Case tcss3001,\ + it will not get a very long description""", + "mandatory": True, # type: ignore + } + + def create_test_steps(self) -> None: + self.test_steps = [ + TestStep("Test Step 1"), + TestStep("Test Step 2"), + TestStep("Test Step 3"), + TestStep("Test Step 4"), + TestStep("Test Step 5"), + ] + + async def setup(self) -> None: + logger.info("This is a test case setup") + + async def execute(self) -> None: + for i in range(1, 5): + logger.info("Executing something in Step {}".format(i)) + self.next_step() + + async def cleanup(self) -> None: + logger.info("This is a test case cleanup") diff --git a/test_collections/tool_unit_tests/test_suite_expected/__init__.py b/test_collections/tool_unit_tests/test_suite_expected/__init__.py index 5b40c9a2..fbdca6e8 100644 --- a/test_collections/tool_unit_tests/test_suite_expected/__init__.py +++ b/test_collections/tool_unit_tests/test_suite_expected/__init__.py @@ -14,3 +14,4 @@ # limitations under the License. # from .test_suite_expected import TestSuiteExpected +from .test_suite_expected_2 import TestSuiteExpected2 diff --git a/test_collections/tool_unit_tests/test_suite_expected/tctr_expected_case_not_applicable/__init__.py b/test_collections/tool_unit_tests/test_suite_expected/tctr_expected_case_not_applicable/__init__.py new file mode 100644 index 00000000..13b2058f --- /dev/null +++ b/test_collections/tool_unit_tests/test_suite_expected/tctr_expected_case_not_applicable/__init__.py @@ -0,0 +1,16 @@ +# +# Copyright (c) 2024 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .tctr_expected_case_not_applicable import TCTRExpectedCaseNotApplicable diff --git a/test_collections/tool_unit_tests/test_suite_expected/tctr_expected_case_not_applicable/tctr_expected_case_not_applicable.py b/test_collections/tool_unit_tests/test_suite_expected/tctr_expected_case_not_applicable/tctr_expected_case_not_applicable.py new file mode 100644 index 00000000..ffa97540 --- /dev/null +++ b/test_collections/tool_unit_tests/test_suite_expected/tctr_expected_case_not_applicable/tctr_expected_case_not_applicable.py @@ -0,0 +1,47 @@ +# +# Copyright (c) 2024 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from app.test_engine.logger import test_engine_logger as logger +from app.test_engine.models import TestCase, TestStep + + +class TCTRExpectedCaseNotApplicable(TestCase): + metadata = { + "public_id": "TCTRExpectedCaseNotApplicable", + "version": "1.2.3", + "title": "This is Test Case tctr_expected_case_not_applicable", + "description": """This Test Case is built to test the test runner,\ + it is supposed to be marked as not applicable""", + } + + def create_test_steps(self) -> None: + self.test_steps = [ + TestStep("Test Step 1"), + TestStep("Test Step 2"), + TestStep("Test Step 3"), + ] + + async def setup(self) -> None: + logger.info("This is a test case setup") + + async def execute(self) -> None: + for step in self.test_steps: + logger.info("Executing something in" + step.name) + if step.name == "Test Step 2": + self.mark_as_not_applicable() + break + + async def cleanup(self) -> None: + logger.info("This is a test case cleanup") diff --git a/test_collections/tool_unit_tests/test_suite_expected/test_suite_expected_2.py b/test_collections/tool_unit_tests/test_suite_expected/test_suite_expected_2.py new file mode 100644 index 00000000..40c93d69 --- /dev/null +++ b/test_collections/tool_unit_tests/test_suite_expected/test_suite_expected_2.py @@ -0,0 +1,32 @@ +# +# Copyright (c) 2023 Project CHIP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from app.test_engine.logger import test_engine_logger as logger +from app.test_engine.models import TestSuite + + +class TestSuiteExpected2(TestSuite): + metadata = { + "public_id": "TestSuiteExpected2", + "version": "1.2.3", + "title": "This is Test Runner Test Suite", + "description": "This is Test Runner Test Suite", + } + + async def setup(self) -> None: + logger.info("This is a test setup") + + async def cleanup(self) -> None: + logger.info("This is a test cleanup")