diff --git a/app/api/api_v1/endpoints/test_run_executions.py b/app/api/api_v1/endpoints/test_run_executions.py index d992fe7a..4926b68d 100644 --- a/app/api/api_v1/endpoints/test_run_executions.py +++ b/app/api/api_v1/endpoints/test_run_executions.py @@ -163,7 +163,7 @@ def start_test_run_execution( if len(test_run_execution.project.pics.clusters) == 0: raise HTTPException( - status_code=HTTPStatus.NOT_FOUND, detail="No PICS were informed" + status_code=HTTPStatus.UNPROCESSABLE_ENTITY, detail="No PICS were informed." ) test_runner = TestRunner() diff --git a/app/tests/api/api_v1/test_test_run_executions.py b/app/tests/api/api_v1/test_test_run_executions.py index cba0091f..ea798a19 100644 --- a/app/tests/api/api_v1/test_test_run_executions.py +++ b/app/tests/api/api_v1/test_test_run_executions.py @@ -796,6 +796,25 @@ async def test_test_run_execution_start(async_client: AsyncClient, db: Session) assert content["id"] == test_run_execution.id +@pytest.mark.asyncio +async def test_test_run_execution_start_no_pics( + async_client: AsyncClient, db: Session +) -> None: + test_run_execution = create_test_run_execution_with_some_test_cases(db=db, pics={}) + + # First attempt to start test run + response = await async_client.post( + f"{settings.API_V1_STR}/test_run_executions/{test_run_execution.id}/start", + ) + + # Assert 200 OK and that test run data is returned + assert response.status_code == HTTPStatus.UNPROCESSABLE_ENTITY + content = response.json() + assert isinstance(content, dict) + assert "detail" in content.keys() + assert content["detail"] == "No PICS were informed." + + @pytest.mark.asyncio async def test_test_run_execution_busy(async_client: AsyncClient, db: Session) -> None: test_run_execution = create_test_run_execution_with_some_test_cases(db=db) diff --git a/app/tests/utils/test_run_execution.py b/app/tests/utils/test_run_execution.py index e0e76123..77022417 100644 --- a/app/tests/utils/test_run_execution.py +++ b/app/tests/utils/test_run_execution.py @@ -23,11 +23,21 @@ from app.models import TestRunExecution from app.models.test_enums import TestStateEnum from app.schemas import TestSelection +from app.schemas.pics import PICS from app.schemas.test_run_execution import TestRunExecutionCreate from app.tests.utils.project import create_random_project fake = Faker() +default_pics = { + "clusters": { + "Access Control cluster": { + "name": "Access Control cluster", + "items": {"ACL.S": {"number": "ACL.S", "enabled": True}}, + } + } +} + def random_test_run_execution_dict( state: Optional[TestStateEnum] = None, @@ -85,12 +95,15 @@ def create_random_test_run_execution_archived( def create_random_test_run_execution( - db: Session, selected_tests: Optional[TestSelection] = {}, **kwargs: Any + db: Session, + selected_tests: Optional[TestSelection] = {}, + pics: Optional[PICS] = PICS(), + **kwargs: Any ) -> models.TestRunExecution: test_run_execution_dict = random_test_run_execution_dict(**kwargs) if test_run_execution_dict.get("project_id") is None: - project = create_random_project(db, config={}) + project = create_random_project(db, config={}, pics=pics) test_run_execution_dict["project_id"] = project.id test_run_execution_in = TestRunExecutionCreate(**test_run_execution_dict) @@ -110,7 +123,7 @@ def create_random_test_run_execution_with_test_case_states( "sample_tests": {"SampleTestSuite1": {"TCSS1001": num_test_cases}} } test_run_execution = create_random_test_run_execution( - db=db, selected_tests=selected_tests + db=db, selected_tests=selected_tests, pics=default_pics ) test_suite_execution = test_run_execution.test_suite_executions[0] @@ -128,7 +141,7 @@ def create_random_test_run_execution_with_test_case_states( def create_test_run_execution_with_some_test_cases( - db: Session, **kwargs: Any + db: Session, pics: Optional[PICS] = default_pics, **kwargs: Any ) -> TestRunExecution: return create_random_test_run_execution( db=db, @@ -137,6 +150,7 @@ def create_test_run_execution_with_some_test_cases( "SampleTestSuite1": {"TCSS1001": 1, "TCSS1002": 2, "TCSS1003": 3} } }, + pics=pics, **kwargs )