diff --git a/test_collections/matter/sdk_tests/support/performance_tests/__init__.py b/test_collections/matter/sdk_tests/support/performance_tests/__init__.py index 4314cdb6..b9ceb2b8 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/__init__.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/__init__.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from typing import Optional - from app.test_engine.models.test_declarations import TestCollectionDeclaration from .sdk_performance_tests import sdk_performance_test_collection diff --git a/test_collections/matter/sdk_tests/support/performance_tests/models/__init__.py b/test_collections/matter/sdk_tests/support/performance_tests/models/__init__.py index d15bf56b..23ea8022 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/models/__init__.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/models/__init__.py @@ -13,5 +13,5 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from .test_case import PythonTestCase -from .test_suite import PythonTestSuite, SuiteType +from .test_case import PerformanceTest +from .test_suite import PerformanceSuiteType, PerformanceTestSuite diff --git a/test_collections/matter/sdk_tests/support/performance_tests/models/python_testing_hooks_proxy.py b/test_collections/matter/sdk_tests/support/performance_tests/models/performance_tests_hooks_proxy.py similarity index 55% rename from test_collections/matter/sdk_tests/support/performance_tests/models/python_testing_hooks_proxy.py rename to test_collections/matter/sdk_tests/support/performance_tests/models/performance_tests_hooks_proxy.py index b74dd7a2..5ed2a5ba 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/models/python_testing_hooks_proxy.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/models/performance_tests_hooks_proxy.py @@ -21,7 +21,7 @@ from pydantic import BaseModel -class SDKPythonTestResultEnum(str, Enum): +class SDKPerformanceResultEnum(str, Enum): START = "start" STOP = "stop" TEST_START = "test_start" @@ -36,64 +36,64 @@ class SDKPythonTestResultEnum(str, Enum): SHOW_PROMPT = "show_prompt" -class SDKPythonTestResultBase(BaseModel): - type: SDKPythonTestResultEnum +class SDKPerformanceResultBase(BaseModel): + type: SDKPerformanceResultEnum def params_dict(self) -> dict: return self.dict(exclude={"type"}) -class SDKPythonTestResultStart(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.START +class SDKPerformanceResultStart(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.START count: int -class SDKPythonTestResultStop(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.STOP +class SDKPerformanceResultStop(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STOP duration: int -class SDKPythonTestResultTestStart(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.TEST_START +class SDKPerformanceResultTestStart(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.TEST_START filename: Optional[str] name: Optional[str] count: Optional[int] steps: Optional[list[str]] -class SDKPythonTestResultTestStop(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.TEST_STOP +class SDKPerformanceResultTestStop(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.TEST_STOP duration: Optional[int] exception: Any -class SDKPythonTestResultTestSkipped(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.TEST_SKIPPED +class SDKPerformanceResultTestSkipped(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.TEST_SKIPPED filename: Optional[str] name: Optional[str] -class SDKPythonTestResultStepSkipped(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.STEP_SKIPPED +class SDKPerformanceResultStepSkipped(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_SKIPPED name: Optional[str] expression: Optional[str] -class SDKPythonTestResultStepStart(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.STEP_START +class SDKPerformanceResultStepStart(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_START name: Optional[str] -class SDKPythonTestResultStepSuccess(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.STEP_SUCCESS +class SDKPerformanceResultStepSuccess(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_SUCCESS logger: Any logs: Any duration: int request: Any -class SDKPythonTestResultStepFailure(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.STEP_FAILURE +class SDKPerformanceResultStepFailure(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_FAILURE logger: Any logs: Any duration: int @@ -101,28 +101,28 @@ class SDKPythonTestResultStepFailure(SDKPythonTestResultBase): received: Any -class SDKPythonTestResultStepUnknown(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.STEP_UNKNOWN +class SDKPerformanceResultStepUnknown(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_UNKNOWN -class SDKPythonTestResultStepManual(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.STEP_MANUAL +class SDKPerformanceResultStepManual(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_MANUAL -class SDKPythonTestResultShowPrompt(SDKPythonTestResultBase): - type = SDKPythonTestResultEnum.SHOW_PROMPT +class SDKPerformanceResultShowPrompt(SDKPerformanceResultBase): + type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.SHOW_PROMPT msg: str placeholder: Optional[str] default_value: Optional[str] -class SDKPythonTestRunnerHooks(TestRunnerHooks): +class SDKPerformanceRunnerHooks(TestRunnerHooks): finished = False results: Queue def __init__(self) -> None: - SDKPythonTestRunnerHooks.finished = False - SDKPythonTestRunnerHooks.results = Queue() + SDKPerformanceRunnerHooks.finished = False + SDKPerformanceRunnerHooks.results = Queue() def update_test(self) -> Union[dict, None]: try: @@ -132,41 +132,43 @@ def update_test(self) -> Union[dict, None]: return None def is_finished(self) -> bool: - return SDKPythonTestRunnerHooks.finished + return SDKPerformanceRunnerHooks.finished def start(self, count: int) -> None: - self.results.put(SDKPythonTestResultStart(count=count)) + self.results.put(SDKPerformanceResultStart(count=count)) def stop(self, duration: int) -> None: - self.results.put(SDKPythonTestResultStop(duration=duration)) - SDKPythonTestRunnerHooks.finished = True + self.results.put(SDKPerformanceResultStop(duration=duration)) + SDKPerformanceRunnerHooks.finished = True def test_start( self, filename: str, name: str, count: int, steps: list[str] = [] ) -> None: self.results.put( - SDKPythonTestResultTestStart( + SDKPerformanceResultTestStart( filename=filename, name=name, count=count, steps=steps ) ) def test_stop(self, exception: Exception, duration: int) -> None: self.results.put( - SDKPythonTestResultTestStop(exception=exception, duration=duration) + SDKPerformanceResultTestStop(exception=exception, duration=duration) ) def test_skipped(self, filename: str, name: str) -> None: - self.results.put(SDKPythonTestResultTestSkipped(filename=filename, name=name)) + self.results.put(SDKPerformanceResultTestSkipped(filename=filename, name=name)) def step_skipped(self, name: str, expression: str) -> None: - self.results.put(SDKPythonTestResultStepSkipped(expression=expression)) + self.results.put( + SDKPerformanceResultStepSkipped(name=name, expression=expression) + ) def step_start(self, name: str) -> None: - self.results.put(SDKPythonTestResultStepStart(name=name)) + self.results.put(SDKPerformanceResultStepStart(name=name)) def step_success(self, logger: Any, logs: Any, duration: int, request: Any) -> None: self.results.put( - SDKPythonTestResultStepSuccess( + SDKPerformanceResultStepSuccess( logger=logger, logs=logs, duration=duration, @@ -178,7 +180,7 @@ def step_failure( self, logger: Any, logs: Any, duration: int, request: Any, received: Any ) -> None: self.results.put( - SDKPythonTestResultStepFailure( + SDKPerformanceResultStepFailure( logger=logger, logs=logs, duration=duration, @@ -188,19 +190,20 @@ def step_failure( ) def step_unknown(self) -> None: - self.results.put(SDKPythonTestResultStepUnknown()) + self.results.put(SDKPerformanceResultStepUnknown()) - def step_manual(self) -> None: - self.results.put(SDKPythonTestResultStepManual()) + async def step_manual(self) -> None: + self.results.put(SDKPerformanceResultStepManual()) def show_prompt( self, msg: str, placeholder: Optional[str] = None, default_value: Optional[str] = None, + endpoint_id: Optional[int] = None, ) -> None: self.results.put( - SDKPythonTestResultShowPrompt( + SDKPerformanceResultShowPrompt( msg=msg, placeholder=placeholder, default_value=default_value ) ) diff --git a/test_collections/matter/sdk_tests/support/performance_tests/models/python_test_models.py b/test_collections/matter/sdk_tests/support/performance_tests/models/performance_tests_models.py similarity index 67% rename from test_collections/matter/sdk_tests/support/performance_tests/models/python_test_models.py rename to test_collections/matter/sdk_tests/support/performance_tests/models/performance_tests_models.py index c5e6991e..675db85a 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/models/python_test_models.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/models/performance_tests_models.py @@ -23,20 +23,14 @@ ### -class PythonTestType(Enum): - # - PythonTestType.COMMISSIONING: test cases that have a commissioning first step - # - PythonTestType.NO_COMMISSIONING: test cases that follow the expected template - # but don't have a commissioning first step - # - PythonTestType.LEGACY: test cases that don't follow the expected template - COMMISSIONING = 1 - NO_COMMISSIONING = 2 - LEGACY = 3 +class PerformanceTestType(Enum): + PERFORMANCE = 1 -class PythonTest(MatterTest): +class PerformanceTest(MatterTest): description: str class_name: str - python_test_type: PythonTestType + performance_test_type: PerformanceTestType def __init__(self, **kwargs: Any) -> None: super().__init__(**kwargs) diff --git a/test_collections/matter/sdk_tests/support/performance_tests/models/python_test_parser.py b/test_collections/matter/sdk_tests/support/performance_tests/models/performance_tests_parser.py similarity index 83% rename from test_collections/matter/sdk_tests/support/performance_tests/models/python_test_parser.py rename to test_collections/matter/sdk_tests/support/performance_tests/models/performance_tests_parser.py index 73002b4c..9ee0f80b 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/models/python_test_parser.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/models/performance_tests_parser.py @@ -19,7 +19,7 @@ from typing import Any, List, Optional, Union from ...models.matter_test_models import MatterTestStep, MatterTestType -from .python_test_models import PythonTest, PythonTestType +from .performance_tests_models import PerformanceTest, PerformanceTestType ARG_STEP_DESCRIPTION_INDEX = 1 KEYWORD_IS_COMISSIONING_INDEX = 0 @@ -31,8 +31,8 @@ FunctionDefType = Union[ast.FunctionDef, ast.AsyncFunctionDef] -def parse_python_script(path: Path) -> list[PythonTest]: - """Parse a python file into a list of PythonTest models. +def parse_performance_tests(path: Path) -> list[PerformanceTest]: + """Parse a python file into a list of PerformanceTestTest models. This will also annotate parsed python tests with their file path and test type. @@ -46,15 +46,15 @@ def parse_python_script(path: Path) -> list[PythonTest]: * steps_[test_name] - (optional) This method should return a list with the steps' descriptions - Example: file TC_ACE_1_3.py has the methods test_TC_ACE_1_3, desc_TC_ACE_1_3, - pics_TC_ACE_1_3 and steps_TC_ACE_1_3. + Example: file TC_COMMISSIONING_1_0.py has the methods test_TC_COMMISSIONING_1_0, + desc_TC_COMMISSIONING_1_0, and steps_TC_COMMISSIONING_1_0. """ with open(path, "r") as python_file: parsed_python_file = ast.parse(python_file.read()) test_classes = __test_classes(parsed_python_file) - test_cases: list[PythonTest] = [] + test_cases: list[PerformanceTest] = [] for c in test_classes: test_methods = __test_methods(c) test_names = __test_case_names(test_methods) @@ -132,7 +132,7 @@ def __test_case_names(methods: list[FunctionDefType]) -> list[str]: def __parse_test_case( tc_name: str, methods: list[FunctionDefType], class_name: str, path: Path -) -> PythonTest: +) -> PerformanceTest: # Currently config is not configured in Python Testing tc_config: dict = {} @@ -148,9 +148,6 @@ def __parse_test_case( if desc_method: tc_desc = __retrieve_description(desc_method) - # If the python test does not implement the steps template method, - # the test case will be presented in UI and the whole test case will be - # executed as one step steps_method = __get_method_by_name(steps_method_name, methods) if steps_method: tc_steps = __retrieve_steps(steps_method) @@ -159,19 +156,7 @@ def __parse_test_case( if pics_method: tc_pics = __retrieve_pics(pics_method) - # - PythonTestType.COMMISSIONING: test cases that have a commissioning first step - # - PythonTestType.NO_COMMISSIONING: test cases that follow the expected template - # but don't have a commissioning first step - # - PythonTestType.LEGACY: test cases that don't follow the expected template - # We use the desc_[test_name] method as an indicator that the test case follows the - # expected template - python_test_type = PythonTestType.LEGACY - if len(tc_steps) > 0 and tc_steps[0].is_commissioning: - python_test_type = PythonTestType.COMMISSIONING - elif desc_method: - python_test_type = PythonTestType.NO_COMMISSIONING - - return PythonTest( + return PerformanceTest( name=tc_name, description=tc_desc, steps=tc_steps, @@ -180,7 +165,7 @@ def __parse_test_case( path=path, type=MatterTestType.AUTOMATED, class_name=class_name, - python_test_type=python_test_type, + performance_test_type=PerformanceTestType.PERFORMANCE, ) @@ -209,7 +194,12 @@ def __retrieve_steps(method: FunctionDefType) -> List[MatterTestStep]: ].value.value python_steps.append( - MatterTestStep(label=step_name, is_commissioning=arg_is_commissioning) + MatterTestStep( + label=step_name, + command=None, + arguments=None, + is_commissioning=arg_is_commissioning, + ) ) return python_steps diff --git a/test_collections/matter/sdk_tests/support/performance_tests/models/test_case.py b/test_collections/matter/sdk_tests/support/performance_tests/models/test_case.py index 86f152f5..27e73f3a 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/models/test_case.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/models/test_case.py @@ -24,19 +24,18 @@ from app.models import TestCaseExecution from app.test_engine.logger import PYTHON_TEST_LEVEL from app.test_engine.logger import test_engine_logger as logger -from app.test_engine.models import TestStep +from app.test_engine.models import TestCase, TestStep from app.test_engine.models.test_case import CUSTOM_TEST_IDENTIFIER -from test_collections.matter.sdk_tests.support.python_testing.models.test_case import ( - PythonTestCase, -) +from app.user_prompt_support.user_prompt_support import UserPromptSupport from test_collections.matter.test_environment_config import TestEnvironmentConfigMatter from ...pics import PICS_FILE_PATH -from .python_test_models import PythonTest -from .python_testing_hooks_proxy import ( - SDKPythonTestResultBase, - SDKPythonTestRunnerHooks, +from ...sdk_container import SDKContainer +from .performance_tests_hooks_proxy import ( + SDKPerformanceResultBase, + SDKPerformanceRunnerHooks, ) +from .performance_tests_models import PerformanceTest from .utils import EXECUTABLE, RUNNER_CLASS_PATH, generate_command_arguments @@ -53,7 +52,7 @@ class PerformanceTestCaseError(Exception): pass -class PerformanceTestCase(PythonTestCase): +class PerformanceTestCase(TestCase, UserPromptSupport): """Base class for all Python Test based test cases. This class provides a class factory that will dynamically declare a new sub-class @@ -63,18 +62,37 @@ class PerformanceTestCase(PythonTestCase): in all instances of such subclass. """ + sdk_container: SDKContainer = SDKContainer() + performance_test: PerformanceTest + performance_test_version: str + def __init__(self, test_case_execution: TestCaseExecution) -> None: super().__init__(test_case_execution=test_case_execution) + self.test_stop_called = False self.step_execution_times = [] # type: ignore[var-annotated] + def start(self, count: int) -> None: + pass + + def stop(self, duration: int) -> None: + if not self.test_stop_called: + self.current_test_step.mark_as_completed() + def test_start( self, filename: str, name: str, count: int, steps: list[str] = [] ) -> None: - self.step_over() + self.next_step() + + def test_stop(self, exception: Exception, duration: int) -> None: + self.test_stop_called = True + + def test_skipped(self, filename: str, name: str) -> None: + self.mark_as_not_applicable() + self.skip_to_last_step() def step_skipped(self, name: str, expression: str) -> None: self.current_test_step.mark_as_not_applicable("Test step skipped") - self.step_over() + self.next_step() def step_start(self, name: str) -> None: pass @@ -83,7 +101,17 @@ def step_success(self, logger: Any, logs: str, duration: int, request: Any) -> N duration_ms = int(duration / 1000) self.step_execution_times.append(duration_ms) self.analytics = self.generate_analytics_data() - self.step_over() + self.next_step() + + def step_failure( + self, logger: Any, logs: str, duration: int, request: Any, received: Any + ) -> None: + failure_msg = "Performance test step failure" + if logs: + failure_msg += f": {logs}" + + self.mark_step_failure(failure_msg) + self.skip_to_last_step() def generate_analytics_data(self) -> dict[str, str]: print(self.step_execution_times) @@ -108,19 +136,23 @@ def generate_analytics_data(self) -> dict[str, str]: @classmethod def pics(cls) -> set[str]: """Test Case level PICS. Read directly from parsed Python Test.""" - return cls.python_test.PICS + return cls.performance_test.PICS @classmethod - def class_factory(cls, test: PythonTest, python_test_version: str) -> Type[T]: + def class_factory( + cls, test: PerformanceTest, performance_test_version: str, mandatory: bool + ) -> Type[T]: """Dynamically declares a subclass based on the type of Python test.""" case_class: Type[PerformanceTestCase] = PerformanceTestCase return case_class.__class_factory( - test=test, python_test_version=python_test_version + test=test, performance_test_version=performance_test_version ) @classmethod - def __class_factory(cls, test: PythonTest, python_test_version: str) -> Type[T]: + def __class_factory( + cls, test: PerformanceTest, performance_test_version: str + ) -> Type[T]: """class factory method for PerformanceTestCase.""" title = cls.__title(test.name) class_name = cls.__class_name(test.name) @@ -129,12 +161,12 @@ def __class_factory(cls, test: PythonTest, python_test_version: str) -> Type[T]: class_name, (cls,), { - "python_test": test, - "python_test_version": python_test_version, + "performance_test": test, + "performance_test_version": performance_test_version, "metadata": { "public_id": ( test.name - if python_test_version != CUSTOM_TEST_IDENTIFIER + if performance_test_version != CUSTOM_TEST_IDENTIFIER else test.name + "-" + CUSTOM_TEST_IDENTIFIER ), "version": "0.0.1", @@ -162,6 +194,9 @@ def __title(identifier: str) -> str: return title + async def setup(self) -> None: + logger.info("Test Setup") + async def cleanup(self) -> None: logger.info("Test Cleanup") try: @@ -195,31 +230,32 @@ def handle_logs_temp(self) -> None: for line in f: if any(specific_string in line for specific_string in filter_entries): logger.log(PYTHON_TEST_LEVEL, line) - # lines = f.read() - # logger.log(PYTHON_TEST_LEVEL, lines) async def execute(self) -> None: try: - logger.info("Running Stress & Stability Test: " + self.python_test.name) + logger.info( + "Running Stress & Stability Test: " + self.performance_test.name + ) - BaseManager.register("TestRunnerHooks", SDKPythonTestRunnerHooks) + BaseManager.register("TestRunnerHooks", SDKPerformanceRunnerHooks) manager = BaseManager(address=("0.0.0.0", 50000), authkey=b"abc") manager.start() test_runner_hooks = manager.TestRunnerHooks() # type: ignore - if not self.python_test.path: + if not self.performance_test.path: raise PerformanceTestCaseError( - f"Missing file path for python test {self.python_test.name}" + f"Missing file path for python test {self.performance_test.name}" ) # get script path including folder (sdk or custom) and excluding extension test_script_relative_path = Path( - *self.python_test.path.parts[-2:] + *self.performance_test.path.parts[-2:] ).with_suffix("") command = [ f"{RUNNER_CLASS_PATH} {test_script_relative_path}" - f" {self.python_test.class_name} --tests test_{self.python_test.name}" + f" {self.performance_test.class_name}" + f" --tests test_{self.performance_test.name}" ] # Generate the command argument by getting the test_parameters from @@ -255,14 +291,6 @@ async def execute(self) -> None: # Step: Show test logs - # Python tests that don't follow the template only have the 2 default steps - # and, at this point, will still be in the first step because of the - # step_over method. So we have to explicitly move on to the next step here. - # The tests that do follow the template will have additional steps and will - # have already been moved to the correct step by the hooks' step methods. - if len(self.test_steps) == 2: - self.next_step() - logger.info("---- Start of Performance test logs ----") self.handle_logs_temp() # Uncomment line bellow when the workaround has a definitive solution @@ -274,7 +302,12 @@ async def execute(self) -> None: finally: pass - async def __handle_update(self, update: SDKPythonTestResultBase) -> None: + def skip_to_last_step(self) -> None: + self.current_test_step.mark_as_completed() + self.current_test_step_index = len(self.test_steps) - 1 + self.current_test_step.mark_as_executing() + + async def __handle_update(self, update: SDKPerformanceResultBase) -> None: await self.__call_function_from_name(update.type.value, update.params_dict()) async def __call_function_from_name(self, func_name: str, kwargs: Any) -> None: @@ -291,7 +324,7 @@ async def __call_function_from_name(self, func_name: str, kwargs: Any) -> None: def create_test_steps(self) -> None: self.test_steps = [TestStep("Start Performance test")] - for step in self.python_test.steps: - python_test_step = TestStep(step.label) - self.test_steps.append(python_test_step) + for step in self.performance_test.steps: + performance_test_step = TestStep(step.label) + self.test_steps.append(performance_test_step) self.test_steps.append(TestStep("Show test logs")) diff --git a/test_collections/matter/sdk_tests/support/performance_tests/models/test_declarations.py b/test_collections/matter/sdk_tests/support/performance_tests/models/test_declarations.py index 16f5bb2b..ebdf3f64 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/models/test_declarations.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/models/test_declarations.py @@ -22,44 +22,48 @@ ) from ...models.sdk_test_folder import SDKTestFolder -from .python_test_models import MatterTestType, PythonTest +from .performance_tests_models import MatterTestType, PerformanceTest from .test_case import PerformanceTestCase -from .test_suite import PythonTestSuite, SuiteType +from .test_suite import PerformanceSuiteType, PerformanceTestSuite -class PythonCollectionDeclaration(TestCollectionDeclaration): +class PerformanceCollectionDeclaration(TestCollectionDeclaration): def __init__(self, folder: SDKTestFolder, name: str) -> None: super().__init__(path=str(folder.path), name=name) - self.python_test_version = folder.version + self.performance_test_version = folder.version -class PythonSuiteDeclaration(TestSuiteDeclaration): +class PerformanceSuiteDeclaration(TestSuiteDeclaration): """Direct initialization for Python Test Suite.""" - class_ref: Type[PythonTestSuite] + class_ref: Type[PerformanceTestSuite] - def __init__(self, name: str, suite_type: SuiteType, version: str) -> None: + def __init__( + self, name: str, suite_type: PerformanceSuiteType, version: str + ) -> None: super().__init__( - PythonTestSuite.class_factory( + PerformanceTestSuite.class_factory( name=name, suite_type=suite_type, - python_test_version=version, + performance_test_version=version, ) ) -class PythonCaseDeclaration(TestCaseDeclaration): +class PerformanceCaseDeclaration(TestCaseDeclaration): """Direct initialization for Python Test Case.""" class_ref: Type[PerformanceTestCase] - def __init__(self, test: PythonTest, python_test_version: str) -> None: + def __init__(self, test: PerformanceTest, performance_test_version: str) -> None: super().__init__( PerformanceTestCase.class_factory( - test=test, python_test_version=python_test_version + test=test, + performance_test_version=performance_test_version, + mandatory=False, ) ) @property def test_type(self) -> MatterTestType: - return self.class_ref.python_test.type + return self.class_ref.performance_test.type diff --git a/test_collections/matter/sdk_tests/support/performance_tests/models/test_suite.py b/test_collections/matter/sdk_tests/support/performance_tests/models/test_suite.py index da4d5376..e73a3d94 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/models/test_suite.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/models/test_suite.py @@ -18,52 +18,42 @@ from app.test_engine.logger import test_engine_logger as logger from app.test_engine.models import TestSuite -from app.user_prompt_support.user_prompt_support import UserPromptSupport from ...sdk_container import SDKContainer -from ...utils import prompt_for_commissioning_mode -from .utils import commission_device -class SuiteType(Enum): - COMMISSIONING = 1 - NO_COMMISSIONING = 2 - LEGACY = 3 +class PerformanceSuiteType(Enum): + PERFORMANCE = 1 # Custom Type variable used to annotate the factory methods of classmethod. -T = TypeVar("T", bound="PythonTestSuite") +T = TypeVar("T", bound="PerformanceTestSuite") -class PythonTestSuite(TestSuite): - """Base class for all Python tests based test suites. +class PerformanceTestSuite(TestSuite): + """Base class for all Performance tests based test suites. This class provides a class factory that will dynamically declare a new sub-class based on the suite-type. """ - python_test_version: str + performance_test_version: str suite_name: str - sdk_container: SDKContainer = SDKContainer(logger) + sdk_container: SDKContainer = SDKContainer() @classmethod def class_factory( - cls, suite_type: SuiteType, name: str, python_test_version: str + cls, suite_type: PerformanceSuiteType, name: str, performance_test_version: str ) -> Type[T]: """Dynamically declares a subclass based on the type of test suite.""" - suite_class: Type[PythonTestSuite] - - if suite_type == SuiteType.COMMISSIONING: - suite_class = CommissioningPythonTestSuite - else: - suite_class = PythonTestSuite + suite_class: Type[PerformanceTestSuite] = PerformanceTestSuite return suite_class.__class_factory( - name=name, python_test_version=python_test_version + name=name, performance_test_version=performance_test_version ) @classmethod - def __class_factory(cls, name: str, python_test_version: str) -> Type[T]: + def __class_factory(cls, name: str, performance_test_version: str) -> Type[T]: """Common class factory method for all subclasses of PythonTestSuite.""" return type( @@ -71,10 +61,12 @@ def __class_factory(cls, name: str, python_test_version: str) -> Type[T]: (cls,), { "name": name, - "python_test_version": python_test_version, + "performance_test_version": performance_test_version, "metadata": { "public_id": ( - name if python_test_version != "custom" else name + "-custom" + name + if performance_test_version != "custom" + else name + "-custom" ), "version": "0.0.1", "title": name, @@ -86,17 +78,11 @@ def __class_factory(cls, name: str, python_test_version: str) -> Type[T]: async def setup(self) -> None: """Override Setup to log Python Test version and set PICS.""" logger.info("Suite Setup") - logger.info(f"Python Test Version: {self.python_test_version}") + logger.info(f"Python Test Version: {self.performance_test_version}") logger.info("Setting up SDK container") await self.sdk_container.start() - if len(self.pics.clusters) > 0: - logger.info("Create PICS file for DUT") - self.sdk_container.set_pics(pics=self.pics) - else: - self.sdk_container.reset_pics_state() - async def cleanup(self) -> None: logger.info("Suite Cleanup") @@ -105,13 +91,3 @@ async def cleanup(self) -> None: self.sdk_container.destroy() except Exception: pass - - -class CommissioningPythonTestSuite(PythonTestSuite, UserPromptSupport): - async def setup(self) -> None: - await super().setup() - - await prompt_for_commissioning_mode(self, logger, None, self.cancel) - - logger.info("Commission DUT") - commission_device(self.config, logger) # type: ignore diff --git a/test_collections/matter/sdk_tests/support/performance_tests/models/utils.py b/test_collections/matter/sdk_tests/support/performance_tests/models/utils.py index 6573eba2..92f4509e 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/models/utils.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/models/utils.py @@ -77,7 +77,7 @@ def commission_device( config: TestEnvironmentConfig, logger: loguru.Logger, ) -> None: - sdk_container = SDKContainer(logger) + sdk_container: SDKContainer = SDKContainer() command = [f"{RUNNER_CLASS_PATH} commission"] command_arguments = generate_command_arguments(config) diff --git a/test_collections/matter/sdk_tests/support/performance_tests/sdk_performance_tests.py b/test_collections/matter/sdk_tests/support/performance_tests/sdk_performance_tests.py index d737ee52..32275b11 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/sdk_performance_tests.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/sdk_performance_tests.py @@ -16,18 +16,17 @@ from pathlib import Path from ..models.sdk_test_folder import SDKTestFolder -from .models.python_test_models import PythonTestType -from .models.python_test_parser import parse_python_script +from .models.performance_tests_parser import parse_performance_tests from .models.test_declarations import ( - PythonCaseDeclaration, - PythonCollectionDeclaration, - PythonSuiteDeclaration, + PerformanceCaseDeclaration, + PerformanceCollectionDeclaration, + PerformanceSuiteDeclaration, ) -from .models.test_suite import SuiteType +from .models.test_suite import PerformanceSuiteType ### # This file hosts logic to load and parse Stress/Stability test cases, located in -# `./models/rpc_client/`. +# `./scripts/sdk/`. # # This is a temporary solution since those tests should come from SDK. # @@ -38,74 +37,60 @@ def _init_test_suites( - python_test_version: str, -) -> dict[SuiteType, PythonSuiteDeclaration]: + performance_test_version: str, +) -> dict[PerformanceSuiteType, PerformanceSuiteDeclaration]: return { - SuiteType.COMMISSIONING: PythonSuiteDeclaration( + PerformanceSuiteType.PERFORMANCE: PerformanceSuiteDeclaration( name="Performance Test Suite", - suite_type=SuiteType.COMMISSIONING, - version=python_test_version, - ), - SuiteType.NO_COMMISSIONING: PythonSuiteDeclaration( - name="Performance Test Suite", - suite_type=SuiteType.NO_COMMISSIONING, - version=python_test_version, - ), - SuiteType.LEGACY: PythonSuiteDeclaration( - name="Performance Test Suite", - suite_type=SuiteType.LEGACY, - version=python_test_version, + suite_type=PerformanceSuiteType.PERFORMANCE, + version=performance_test_version, ), } -def _parse_python_script_to_test_case_declarations( - python_test_path: Path, python_test_version: str -) -> list[PythonCaseDeclaration]: - python_tests = parse_python_script(python_test_path) +def _parse_performance_tests_to_test_case_declarations( + performance_test_path: Path, performance_test_version: str +) -> list[PerformanceCaseDeclaration]: + performance_tests = parse_performance_tests(performance_test_path) return [ - PythonCaseDeclaration(test=python_test, python_test_version=python_test_version) - for python_test in python_tests + PerformanceCaseDeclaration( + test=performance_test, performance_test_version=performance_test_version + ) + for performance_test in performance_tests ] def _parse_all_sdk_python_tests( - python_test_files: list[Path], python_test_version: str -) -> list[PythonSuiteDeclaration]: + performance_test_files: list[Path], performance_test_version: str +) -> list[PerformanceSuiteDeclaration]: """Parse all python test files and add them into Automated Suite""" - suites = _init_test_suites(python_test_version) + suites = _init_test_suites(performance_test_version) - for python_test_file in python_test_files: - test_cases = _parse_python_script_to_test_case_declarations( - python_test_path=python_test_file, - python_test_version=python_test_version, + for performance_test_file in performance_test_files: + test_cases = _parse_performance_tests_to_test_case_declarations( + performance_test_path=performance_test_file, + performance_test_version=performance_test_version, ) for test_case in test_cases: - python_test_type = test_case.class_ref.python_test.python_test_type - if python_test_type == PythonTestType.COMMISSIONING: - suites[SuiteType.COMMISSIONING].add_test_case(test_case) - elif python_test_type == PythonTestType.NO_COMMISSIONING: - suites[SuiteType.NO_COMMISSIONING].add_test_case(test_case) - else: - suites[SuiteType.LEGACY].add_test_case(test_case) + suites[PerformanceSuiteType.PERFORMANCE].add_test_case(test_case) return [s for s in list(suites.values()) if len(s.test_cases) != 0] def sdk_performance_test_collection( - python_test_folder: SDKTestFolder = STRESS_TEST_FOLDER, -) -> PythonCollectionDeclaration: + performance_test_folder: SDKTestFolder = STRESS_TEST_FOLDER, +) -> PerformanceCollectionDeclaration: """Declare a new collection of test suites.""" - collection = PythonCollectionDeclaration( - name="SDK Performance Tests", folder=python_test_folder + collection = PerformanceCollectionDeclaration( + name="SDK Performance Tests", folder=performance_test_folder ) - files = python_test_folder.file_paths(extension=".py") - version = python_test_folder.version + files = performance_test_folder.file_paths(extension=".py") + version = performance_test_folder.version suites = _parse_all_sdk_python_tests( - python_test_files=files, python_test_version=version + performance_test_files=files, performance_test_version=version ) for suite in suites: diff --git a/test_collections/matter/sdk_tests/support/performance_tests/utils.py b/test_collections/matter/sdk_tests/support/performance_tests/utils.py index db930e9e..1759e694 100644 --- a/test_collections/matter/sdk_tests/support/performance_tests/utils.py +++ b/test_collections/matter/sdk_tests/support/performance_tests/utils.py @@ -30,6 +30,8 @@ def create_summary_report( timestamp: str, log_lines: list, commissioning_method: str ) -> tuple[str, str]: + tc_name = "" + tc_suite = "" log_lines_list = "\n".join(log_lines) LOGS_FOLDER = "/test_collections/logs" @@ -58,8 +60,6 @@ def create_summary_report( file_path = os.path.join(CONTAINER_OUT_FOLDER, file_name) commissioning_obj: Optional[Commissioning] = None file_execution_time = None - tc_name = "" - tc_suite = "" tc_result = None tc_execution_in_file = 0