Skip to content

Commit

Permalink
adjustments
Browse files Browse the repository at this point in the history
  • Loading branch information
gladystonfranca committed Nov 13, 2024
1 parent 0cfb15f commit 56fb594
Show file tree
Hide file tree
Showing 11 changed files with 211 additions and 228 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Optional

from app.test_engine.models.test_declarations import TestCollectionDeclaration

from .sdk_performance_tests import sdk_performance_test_collection
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,5 +13,5 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .test_case import PythonTestCase
from .test_suite import PythonTestSuite, SuiteType
from .test_case import PerformanceTest
from .test_suite import PerformanceSuiteType, PerformanceTestSuite
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
from pydantic import BaseModel


class SDKPythonTestResultEnum(str, Enum):
class SDKPerformanceResultEnum(str, Enum):
START = "start"
STOP = "stop"
TEST_START = "test_start"
Expand All @@ -36,93 +36,93 @@ class SDKPythonTestResultEnum(str, Enum):
SHOW_PROMPT = "show_prompt"


class SDKPythonTestResultBase(BaseModel):
type: SDKPythonTestResultEnum
class SDKPerformanceResultBase(BaseModel):
type: SDKPerformanceResultEnum

def params_dict(self) -> dict:
return self.dict(exclude={"type"})


class SDKPythonTestResultStart(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.START
class SDKPerformanceResultStart(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.START
count: int


class SDKPythonTestResultStop(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.STOP
class SDKPerformanceResultStop(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STOP
duration: int


class SDKPythonTestResultTestStart(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.TEST_START
class SDKPerformanceResultTestStart(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.TEST_START
filename: Optional[str]
name: Optional[str]
count: Optional[int]
steps: Optional[list[str]]


class SDKPythonTestResultTestStop(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.TEST_STOP
class SDKPerformanceResultTestStop(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.TEST_STOP
duration: Optional[int]
exception: Any


class SDKPythonTestResultTestSkipped(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.TEST_SKIPPED
class SDKPerformanceResultTestSkipped(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.TEST_SKIPPED
filename: Optional[str]
name: Optional[str]


class SDKPythonTestResultStepSkipped(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.STEP_SKIPPED
class SDKPerformanceResultStepSkipped(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_SKIPPED
name: Optional[str]
expression: Optional[str]


class SDKPythonTestResultStepStart(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.STEP_START
class SDKPerformanceResultStepStart(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_START
name: Optional[str]


class SDKPythonTestResultStepSuccess(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.STEP_SUCCESS
class SDKPerformanceResultStepSuccess(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_SUCCESS
logger: Any
logs: Any
duration: int
request: Any


class SDKPythonTestResultStepFailure(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.STEP_FAILURE
class SDKPerformanceResultStepFailure(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_FAILURE
logger: Any
logs: Any
duration: int
request: Any
received: Any


class SDKPythonTestResultStepUnknown(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.STEP_UNKNOWN
class SDKPerformanceResultStepUnknown(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_UNKNOWN


class SDKPythonTestResultStepManual(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.STEP_MANUAL
class SDKPerformanceResultStepManual(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.STEP_MANUAL


class SDKPythonTestResultShowPrompt(SDKPythonTestResultBase):
type = SDKPythonTestResultEnum.SHOW_PROMPT
class SDKPerformanceResultShowPrompt(SDKPerformanceResultBase):
type: SDKPerformanceResultEnum = SDKPerformanceResultEnum.SHOW_PROMPT
msg: str
placeholder: Optional[str]
default_value: Optional[str]


class SDKPythonTestRunnerHooks(TestRunnerHooks):
class SDKPerformanceRunnerHooks(TestRunnerHooks):
finished = False
results: Queue

def __init__(self) -> None:
SDKPythonTestRunnerHooks.finished = False
SDKPythonTestRunnerHooks.results = Queue()
SDKPerformanceRunnerHooks.finished = False
SDKPerformanceRunnerHooks.results = Queue()

def update_test(self) -> Union[dict, None]:
try:
Expand All @@ -132,41 +132,43 @@ def update_test(self) -> Union[dict, None]:
return None

def is_finished(self) -> bool:
return SDKPythonTestRunnerHooks.finished
return SDKPerformanceRunnerHooks.finished

def start(self, count: int) -> None:
self.results.put(SDKPythonTestResultStart(count=count))
self.results.put(SDKPerformanceResultStart(count=count))

def stop(self, duration: int) -> None:
self.results.put(SDKPythonTestResultStop(duration=duration))
SDKPythonTestRunnerHooks.finished = True
self.results.put(SDKPerformanceResultStop(duration=duration))
SDKPerformanceRunnerHooks.finished = True

def test_start(
self, filename: str, name: str, count: int, steps: list[str] = []
) -> None:
self.results.put(
SDKPythonTestResultTestStart(
SDKPerformanceResultTestStart(
filename=filename, name=name, count=count, steps=steps
)
)

def test_stop(self, exception: Exception, duration: int) -> None:
self.results.put(
SDKPythonTestResultTestStop(exception=exception, duration=duration)
SDKPerformanceResultTestStop(exception=exception, duration=duration)
)

def test_skipped(self, filename: str, name: str) -> None:
self.results.put(SDKPythonTestResultTestSkipped(filename=filename, name=name))
self.results.put(SDKPerformanceResultTestSkipped(filename=filename, name=name))

def step_skipped(self, name: str, expression: str) -> None:
self.results.put(SDKPythonTestResultStepSkipped(expression=expression))
self.results.put(
SDKPerformanceResultStepSkipped(name=name, expression=expression)
)

def step_start(self, name: str) -> None:
self.results.put(SDKPythonTestResultStepStart(name=name))
self.results.put(SDKPerformanceResultStepStart(name=name))

def step_success(self, logger: Any, logs: Any, duration: int, request: Any) -> None:
self.results.put(
SDKPythonTestResultStepSuccess(
SDKPerformanceResultStepSuccess(
logger=logger,
logs=logs,
duration=duration,
Expand All @@ -178,7 +180,7 @@ def step_failure(
self, logger: Any, logs: Any, duration: int, request: Any, received: Any
) -> None:
self.results.put(
SDKPythonTestResultStepFailure(
SDKPerformanceResultStepFailure(
logger=logger,
logs=logs,
duration=duration,
Expand All @@ -188,19 +190,20 @@ def step_failure(
)

def step_unknown(self) -> None:
self.results.put(SDKPythonTestResultStepUnknown())
self.results.put(SDKPerformanceResultStepUnknown())

def step_manual(self) -> None:
self.results.put(SDKPythonTestResultStepManual())
async def step_manual(self) -> None:
self.results.put(SDKPerformanceResultStepManual())

def show_prompt(
self,
msg: str,
placeholder: Optional[str] = None,
default_value: Optional[str] = None,
endpoint_id: Optional[int] = None,
) -> None:
self.results.put(
SDKPythonTestResultShowPrompt(
SDKPerformanceResultShowPrompt(
msg=msg, placeholder=placeholder, default_value=default_value
)
)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,14 @@
###


class PythonTestType(Enum):
# - PythonTestType.COMMISSIONING: test cases that have a commissioning first step
# - PythonTestType.NO_COMMISSIONING: test cases that follow the expected template
# but don't have a commissioning first step
# - PythonTestType.LEGACY: test cases that don't follow the expected template
COMMISSIONING = 1
NO_COMMISSIONING = 2
LEGACY = 3
class PerformanceTestType(Enum):
PERFORMANCE = 1


class PythonTest(MatterTest):
class PerformanceTest(MatterTest):
description: str
class_name: str
python_test_type: PythonTestType
performance_test_type: PerformanceTestType

def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from typing import Any, List, Optional, Union

from ...models.matter_test_models import MatterTestStep, MatterTestType
from .python_test_models import PythonTest, PythonTestType
from .performance_tests_models import PerformanceTest, PerformanceTestType

ARG_STEP_DESCRIPTION_INDEX = 1
KEYWORD_IS_COMISSIONING_INDEX = 0
Expand All @@ -31,8 +31,8 @@
FunctionDefType = Union[ast.FunctionDef, ast.AsyncFunctionDef]


def parse_python_script(path: Path) -> list[PythonTest]:
"""Parse a python file into a list of PythonTest models.
def parse_performance_tests(path: Path) -> list[PerformanceTest]:
"""Parse a python file into a list of PerformanceTestTest models.
This will also annotate parsed python tests with their file path and test type.
Expand All @@ -46,15 +46,15 @@ def parse_python_script(path: Path) -> list[PythonTest]:
* steps_[test_name] - (optional) This method should return a list with the steps'
descriptions
Example: file TC_ACE_1_3.py has the methods test_TC_ACE_1_3, desc_TC_ACE_1_3,
pics_TC_ACE_1_3 and steps_TC_ACE_1_3.
Example: file TC_COMMISSIONING_1_0.py has the methods test_TC_COMMISSIONING_1_0,
desc_TC_COMMISSIONING_1_0, and steps_TC_COMMISSIONING_1_0.
"""
with open(path, "r") as python_file:
parsed_python_file = ast.parse(python_file.read())

test_classes = __test_classes(parsed_python_file)

test_cases: list[PythonTest] = []
test_cases: list[PerformanceTest] = []
for c in test_classes:
test_methods = __test_methods(c)
test_names = __test_case_names(test_methods)
Expand Down Expand Up @@ -132,7 +132,7 @@ def __test_case_names(methods: list[FunctionDefType]) -> list[str]:

def __parse_test_case(
tc_name: str, methods: list[FunctionDefType], class_name: str, path: Path
) -> PythonTest:
) -> PerformanceTest:
# Currently config is not configured in Python Testing
tc_config: dict = {}

Expand All @@ -148,9 +148,6 @@ def __parse_test_case(
if desc_method:
tc_desc = __retrieve_description(desc_method)

# If the python test does not implement the steps template method,
# the test case will be presented in UI and the whole test case will be
# executed as one step
steps_method = __get_method_by_name(steps_method_name, methods)
if steps_method:
tc_steps = __retrieve_steps(steps_method)
Expand All @@ -159,19 +156,7 @@ def __parse_test_case(
if pics_method:
tc_pics = __retrieve_pics(pics_method)

# - PythonTestType.COMMISSIONING: test cases that have a commissioning first step
# - PythonTestType.NO_COMMISSIONING: test cases that follow the expected template
# but don't have a commissioning first step
# - PythonTestType.LEGACY: test cases that don't follow the expected template
# We use the desc_[test_name] method as an indicator that the test case follows the
# expected template
python_test_type = PythonTestType.LEGACY
if len(tc_steps) > 0 and tc_steps[0].is_commissioning:
python_test_type = PythonTestType.COMMISSIONING
elif desc_method:
python_test_type = PythonTestType.NO_COMMISSIONING

return PythonTest(
return PerformanceTest(
name=tc_name,
description=tc_desc,
steps=tc_steps,
Expand All @@ -180,7 +165,7 @@ def __parse_test_case(
path=path,
type=MatterTestType.AUTOMATED,
class_name=class_name,
python_test_type=python_test_type,
performance_test_type=PerformanceTestType.PERFORMANCE,
)


Expand Down Expand Up @@ -209,7 +194,12 @@ def __retrieve_steps(method: FunctionDefType) -> List[MatterTestStep]:
].value.value

python_steps.append(
MatterTestStep(label=step_name, is_commissioning=arg_is_commissioning)
MatterTestStep(
label=step_name,
command=None,
arguments=None,
is_commissioning=arg_is_commissioning,
)
)

return python_steps
Expand Down
Loading

0 comments on commit 56fb594

Please sign in to comment.