Skip to content

Commit

Permalink
Merge branch 'main' into v2.11+fall2024
Browse files Browse the repository at this point in the history
  • Loading branch information
rquidute authored Dec 17, 2024
2 parents 2caab2e + 1e042fa commit b989b6e
Show file tree
Hide file tree
Showing 42 changed files with 2,792 additions and 65 deletions.
1 change: 1 addition & 0 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,4 @@ per-file-ignores =
test_collections/manual_tests/**/*:E501,W291
test_collections/app1_tests/**/*:E501
test_collections/semi_automated_tests/**/*:E501
alembic/versions/**/*:E128,W293,F401
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,5 @@ test_environment.config
SerialTests.lock
test_db_creation.lock
.sha_information
test_collections/matter/sdk_tests/sdk_checkout
test_collections/matter/sdk_tests/sdk_checkout
performance-logs
2 changes: 1 addition & 1 deletion .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
"editor.defaultFormatter": "ms-python.black-formatter", // black
"editor.formatOnSave": true, // black
"editor.codeActionsOnSave": {
"source.organizeImports": true // isort
"source.organizeImports": "explicit"
},
},
// black
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
"""Adding count on metadata to support Performance Test
Revision ID: 0a251edfd975
Revises: 96ee37627a48
Create Date: 2024-05-16 06:36:51.663230
"""

from alembic import op
import sqlalchemy as sa


# revision identifiers, used by Alembic.
revision = "0a251edfd975"
down_revision = "e2c185af1226"
branch_labels = None
depends_on = None


def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("testcasemetadata", sa.Column("count", sa.Text(), nullable=True))
# ### end Alembic commands ###


def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("testcasemetadata", "count")
# ### end Alembic commands ###
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
Create Date: 2023-08-15 14:42:39.893126
"""

import sqlalchemy as sa

from alembic import op
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
Create Date: 2024-04-24 17:26:26.770729
"""

from alembic import op


Expand Down
1 change: 1 addition & 0 deletions alembic/versions/e2c185af1226_pics_v2_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
Create Date: 2024-06-19 11:46:15.158526
"""

from alembic import op
import sqlalchemy as sa

Expand Down
91 changes: 91 additions & 0 deletions app/api/api_v1/endpoints/test_run_executions.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,12 @@
# limitations under the License.
#
import json
import os
from datetime import datetime
from http import HTTPStatus
from typing import Any, Dict, List, Optional

import requests
from fastapi import APIRouter, BackgroundTasks, Depends, File, HTTPException, UploadFile
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse, StreamingResponse
Expand All @@ -37,6 +40,10 @@
selected_tests_from_execution,
)
from app.version import version_information
from test_collections.matter.sdk_tests.support.performance_tests.utils import (
create_summary_report,
)
from test_collections.matter.test_environment_config import TestEnvironmentConfigMatter

router = APIRouter()

Expand Down Expand Up @@ -479,3 +486,87 @@ def import_test_run_execution(
status_code=HTTPStatus.UNPROCESSABLE_ENTITY,
detail=str(error),
)


date_pattern_out_file = "%Y_%m_%d_%H_%M_%S"


@router.post("/{id}/performance_summary")
def generate_summary_log(
*,
db: Session = Depends(get_db),
id: int,
project_id: int,
) -> JSONResponse:
"""
Imports a test run execution to the the given project_id.
"""

project = crud.project.get(db=db, id=project_id)

if not project:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Project not found"
)

project_config = TestEnvironmentConfigMatter(**project.config)
matter_qa_url = None
LOGS_FOLDER = "/test_collections/logs"
HOST_BACKEND = os.getenv("BACKEND_FILEPATH_ON_HOST") or ""
HOST_OUT_FOLDER = HOST_BACKEND + LOGS_FOLDER

if (
project_config.test_parameters
and "matter_qa_url" in project_config.test_parameters
):
matter_qa_url = project_config.test_parameters["matter_qa_url"]
else:
raise HTTPException(
status_code=HTTPStatus.UNPROCESSABLE_ENTITY,
detail="matter_qa_url must be configured",
)

page = requests.get(f"{matter_qa_url}/home")
if page.status_code is not int(HTTPStatus.OK):
raise HTTPException(
status_code=page.status_code,
detail=(
"The LogDisplay server is not responding.\n"
"Verify if the tool was installed, configured and initiated properly"
),
)

commissioning_method = project_config.dut_config.pairing_mode

test_run_execution = crud.test_run_execution.get(db=db, id=id)
if not test_run_execution:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Test Run Execution not found"
)

log_lines_list = log_utils.convert_execution_log_to_list(
log=test_run_execution.log, json_entries=False
)

timestamp = ""
if test_run_execution.started_at:
timestamp = test_run_execution.started_at.strftime(date_pattern_out_file)
else:
timestamp = datetime.now().strftime(date_pattern_out_file)

tc_name, execution_time_folder = create_summary_report(
timestamp, log_lines_list, commissioning_method
)

target_dir = f"{HOST_OUT_FOLDER}/{execution_time_folder}/{tc_name}"
url_report = f"{matter_qa_url}/home/displayLogFolder?dir_path={target_dir}"

summary_report: dict = {}
summary_report["url"] = url_report

options: dict = {"media_type": "application/json"}

return JSONResponse(
jsonable_encoder(summary_report),
**options,
)
2 changes: 2 additions & 0 deletions app/models/test_case_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ class TestCaseMetadata(Base):
id: Mapped[int] = mapped_column(primary_key=True, index=True)
public_id: Mapped[str] = mapped_column(nullable=False)

count: Mapped[str] = mapped_column(Text, nullable=True)

title: Mapped[str] = mapped_column(nullable=False)
description: Mapped[str] = mapped_column(Text, nullable=False)
version: Mapped[str] = mapped_column(nullable=False)
Expand Down
1 change: 1 addition & 0 deletions app/test_engine/models/test_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def __init__(self, test_case_execution: TestCaseExecution):
self.create_test_steps()
self.__state = TestStateEnum.PENDING
self.errors: List[str] = []
self.analytics: dict[str, str] = {} # Move to dictionary

# Make pics a class method as they are mostly needed at class level.
@classmethod
Expand Down
52 changes: 44 additions & 8 deletions app/test_engine/test_script_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
)
from app.singleton import Singleton
from app.test_engine.models.test_run import TestRun
from app.test_engine.models.test_step import TestStep

from .models import TestCase, TestSuite
from .models.test_declarations import (
Expand Down Expand Up @@ -162,9 +163,19 @@ def ___pending_test_cases_for_test_suite(
test_case_declaration = self.__test_case_declaration(
public_id=test_case_id, test_suite_declaration=test_suite
)
test_cases = self.__pending_test_cases_for_iterations(
test_case=test_case_declaration, iterations=iterations
)
test_cases = []

if test_suite.public_id == "Performance Test Suite":
test_cases = self.__pending_test_cases_for_iterations(
test_case=test_case_declaration, iterations=1
)

test_cases[0].test_case_metadata.count = iterations
else:
test_cases = self.__pending_test_cases_for_iterations(
test_case=test_case_declaration, iterations=iterations
)

suite_test_cases.extend(test_cases)

return suite_test_cases
Expand Down Expand Up @@ -273,16 +284,41 @@ def __load_test_suite_test_cases(
test_case_executions: List[TestCaseExecution],
) -> None:
test_suite.test_cases = []
for test_case_execution in test_case_executions:
# TODO: request correct TestCase from TestScriptManager

if test_suite_declaration.public_id == "Performance Test Suite":
test_case_declaration = self.__test_case_declaration(
test_case_execution.public_id,
test_case_executions[0].public_id,
test_suite_declaration=test_suite_declaration,
)
TestCaseClass = test_case_declaration.class_ref
test_case = TestCaseClass(test_case_execution=test_case_execution)
self.create_pending_teststeps_execution(db, test_case, test_case_execution)
test_case = TestCaseClass(test_case_execution=test_case_executions[0])

additional_step_count = (
int(test_case_executions[0].test_case_metadata.count) - 1
)

for index in range(2, additional_step_count + 2):
test_case.test_steps.insert(
index, TestStep(f"Loop Commissioning ... {index}")
)

self.create_pending_teststeps_execution(
db, test_case, test_case_executions[0]
)
test_suite.test_cases.append(test_case)
else:
for test_case_execution in test_case_executions:
# TODO: request correct TestCase from TestScriptManager
test_case_declaration = self.__test_case_declaration(
test_case_execution.public_id,
test_suite_declaration=test_suite_declaration,
)
TestCaseClass = test_case_declaration.class_ref
test_case = TestCaseClass(test_case_execution=test_case_execution)
self.create_pending_teststeps_execution(
db, test_case, test_case_execution
)
test_suite.test_cases.append(test_case)

def create_pending_teststeps_execution(
self,
Expand Down
1 change: 1 addition & 0 deletions app/test_engine/test_ui_observer.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ def __onTestCaseUpdate(self, observable: TestCase) -> None:
"test_case_execution_index": test_case_execution.execution_index,
"state": observable.state,
"errors": observable.errors,
"analytics": observable.analytics,
}
self.__send_test_update_message(
{"test_type": TestUpdateTypeEnum.TEST_CASE, "body": update}
Expand Down
64 changes: 33 additions & 31 deletions app/tests/test_engine/test_ui_observer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,23 +15,25 @@
#
from typing import Any, Dict
from unittest import mock
from unittest.mock import call

# from unittest.mock import call
import pytest
from sqlalchemy.orm import Session

from app.constants.websockets_constants import MessageKeysEnum, MessageTypeEnum
from app.models.test_enums import TestStateEnum
from app.models.test_run_execution import TestRunExecution
from app.schemas.test_run_log_entry import TestRunLogEntry
from app.socket_connection_manager import socket_connection_manager

# from app.socket_connection_manager import socket_connection_manager
from app.test_engine.models import TestRun
from app.test_engine.test_ui_observer import TestUIObserver, TestUpdateTypeEnum
from app.tests.test_engine.test_runner import load_and_run_tool_unit_tests
from test_collections.tool_unit_tests.test_suite_async import TestSuiteAsync
from test_collections.tool_unit_tests.test_suite_async.tctr_instant_pass import (
TCTRInstantPass,
)

# from app.tests.test_engine.test_runner import load_and_run_tool_unit_tests
# from test_collections.tool_unit_tests.test_suite_async import TestSuiteAsync
# from test_collections.tool_unit_tests.test_suite_async.tctr_instant_pass import (
# TCTRInstantPass,
# )


@pytest.mark.asyncio
Expand Down Expand Up @@ -72,30 +74,30 @@ async def test_test_ui_observer_test_run_log(db: Session) -> None:
await ui_observer.complete_tasks()


@pytest.mark.asyncio
async def test_test_ui_observer_send_message(db: Session) -> None:
with mock.patch.object(
target=socket_connection_manager,
attribute="broadcast",
) as broadcast:
runner, run, suite, case = await load_and_run_tool_unit_tests(
db, TestSuiteAsync, TCTRInstantPass
)

run_id = run.test_run_execution.id
suite_index = suite.test_suite_execution.execution_index
case_index = case.test_case_execution.execution_index
step_index = case.test_case_execution.test_step_executions[0].execution_index

# Assert broadcast was called with test updates
args_list = broadcast.call_args_list
assert call(__expected_test_run_state_dict(run_id)) in args_list
assert call(__expected_test_suite_dict(suite_index)) in args_list
assert call(__expected_test_case_dict(case_index, suite_index)) in args_list
assert (
call(__expected_test_step_dict(step_index, case_index, suite_index))
in args_list
)
# @pytest.mark.asyncio
# async def test_test_ui_observer_send_message(db: Session) -> None:
# with mock.patch.object(
# target=socket_connection_manager,
# attribute="broadcast",
# ) as broadcast:
# runner, run, suite, case = await load_and_run_tool_unit_tests(
# db, TestSuiteAsync, TCTRInstantPass
# )

# run_id = run.test_run_execution.id
# suite_index = suite.test_suite_execution.execution_index
# case_index = case.test_case_execution.execution_index
# step_index = case.test_case_execution.test_step_executions[0].execution_index

# # Assert broadcast was called with test updates
# args_list = broadcast.call_args_list
# assert call(__expected_test_run_state_dict(run_id)) in args_list
# assert call(__expected_test_suite_dict(suite_index)) in args_list
# assert call(__expected_test_case_dict(case_index, suite_index)) in args_list
# assert (
# call(__expected_test_step_dict(step_index, case_index, suite_index))
# in args_list
# )


def __expected_test_run_log_dict() -> Dict[str, Any]:
Expand Down
Loading

0 comments on commit b989b6e

Please sign in to comment.