Skip to content

Commit

Permalink
Improve test run report validation
Browse files Browse the repository at this point in the history
  • Loading branch information
BenjaminPelletier committed Oct 30, 2023
1 parent 9ca9031 commit 6cf64ca
Show file tree
Hide file tree
Showing 23 changed files with 902 additions and 118 deletions.
60 changes: 56 additions & 4 deletions monitoring/uss_qualifier/common_data_definitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
class Severity(str, Enum):
Critical = "Critical"
"""The system under test has a critical problem that justifies the discontinuation of testing.
This kind of issue not only makes the current test scenario unable to
succeed, but is likely to cause spurious failures in other separate test
scenarios as well. This may occur, for instance, if the system was left
Expand All @@ -15,21 +15,73 @@ class Severity(str, Enum):

High = "High"
"""The system under test has a problem that prevents the current test scenario from continuing.
Error interrupts a test scenario but likely doesn't impact other, separate
test scenarios. For instance, the test step necessary to enable later test
steps in the test scenario did not complete successfully.
"""

Medium = "Medium"
"""The system does not meet requirements, but the current test scenario can continue.
Further test steps will likely still result in reasonable evaluations.
"""

Low = "Low"
"""The system meets requirements but could be improved.
Further test steps can be executed without impact. A test run with only
Low-Severity issues will be considered successful.
"""

def __eq__(self, other):
if isinstance(other, Severity):
other_str = other.value
elif isinstance(other, str):
other_str = other
else:
raise ValueError(f"Cannot compare Severity to {type(other)}")
return self.value == other_str

def __ne__(self, other):
return not (self == other)

def __gt__(self, other):
if isinstance(other, Severity):
pass
elif isinstance(other, str):
other = Severity(other)
else:
raise ValueError(f"Cannot compare Severity to {type(other)}")

if self == Severity.Critical:
return False
elif self == Severity.High:
return other == Severity.Critical
elif self == Severity.Medium:
return other == Severity.Low
elif self == Severity.Low:
return True
else:
raise ValueError(f"Unknown Severity type: '{self}'")

def __ge__(self, other):
return self == other or self > other

def __lt__(self, other):
if isinstance(other, Severity):
pass
elif isinstance(other, str):
other = Severity(other)
else:
raise ValueError(f"Cannot compare Severity to {type(other)}")
return other > self

def __le__(self, other):
if isinstance(other, Severity):
pass
elif isinstance(other, str):
other = Severity(other)
else:
raise ValueError(f"Cannot compare Severity to {type(other)}")
return other >= self
4 changes: 1 addition & 3 deletions monitoring/uss_qualifier/configurations/dev/dss_probing.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,4 @@ v1:
uss1: all_astm_dss_requirements
uss2: all_astm_dss_requirements
validation:
criteria:
- full_success: {}
- no_skipped_actions: {}
$ref: ./library/validation.yaml#/normal_test
Original file line number Diff line number Diff line change
Expand Up @@ -113,3 +113,21 @@ v1:

# Write out a human-readable report showing the sequence of events of the test
sequence_view: {}

# This block defines whether to return an error code from the execution of uss_qualifier, based on the content of the
# test run report. All of the criteria must be met to return a successful code.
validation:
criteria:
# applicability indicates which test report elements the pass_condition applies to
- applicability:
# We want to make sure there are no failed checks...
failed_checks:
# ...at least, no failed checks with severity higher than "Low".
has_severity:
higher_than: Low
pass_condition:
# When considering all of the applicable elements...
elements:
# ...the number of applicable elements should be zero.
count:
equal_to: 0
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
normal_test:
$content_schema: monitoring/uss_qualifier/reports/validation/report_validation/ValidationConfiguration.json
criteria:
- full_success: {}
- no_skipped_actions: {}
- applicability:
failed_checks:
has_severity:
higher_than: Low
pass_condition:
elements:
count:
equal_to: 0
2 changes: 1 addition & 1 deletion monitoring/uss_qualifier/reports/report.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ def query_passed_checks(

def query_failed_checks(
self, participant_id: Optional[str] = None
) -> Iterator[Tuple[JSONPathExpression, PassedCheck]]:
) -> Iterator[Tuple[JSONPathExpression, FailedCheck]]:
for i, case in enumerate(self.cases):
for path, fc in case.query_failed_checks(participant_id):
yield f"cases[{i}].{path}", fc
Expand Down
139 changes: 132 additions & 7 deletions monitoring/uss_qualifier/reports/validation/definitions.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,153 @@
from __future__ import annotations
from typing import Optional, List

from implicitdict import ImplicitDict
from monitoring.monitorlib.dicts import JSONAddress
from monitoring.uss_qualifier.common_data_definitions import Severity


class FullSuccessCriterion(ImplicitDict):
"""Validation criterion that every element of the report must indicate success."""
# ===== Shared logic =====


class SeverityComparison(ImplicitDict):
"""Exactly one field must be specified."""

equal_to: Optional[Severity]
at_least: Optional[Severity]
higher_than: Optional[Severity]
no_higher_than: Optional[Severity]
lower_than: Optional[Severity]


class NumericComparison(ImplicitDict):
"""Exactly one field must be specified."""

equal_to: Optional[float]
at_least: Optional[float]
more_than: Optional[float]
no_more_than: Optional[float]
less_than: Optional[float]

pass

# ===== Applicability =====

class NoSkippedActionsCriterion(ImplicitDict):
"""Validation criterion that no actions in the entire test run may be skipped."""

class FailedCheckApplicability(ImplicitDict):
"""FailedCheck test report elements are applicable according to this specification."""

has_severity: Optional[SeverityComparison]
"""If specified, only FailedChecks with specified severity are applicable."""


class SkippedCheckApplicability(ImplicitDict):
"""SkippedCheckReport test report elements are applicable according to this specification."""

pass


class AllCriteriaApplicability(ImplicitDict):
"""All criteria must be met for an element to be applicable."""

criteria: List[ValidationCriterionApplicability]
"""Criteria that must all be met."""


class AnyCriteriaApplicability(ImplicitDict):
"""Any criterion or criteria must be met for an element to be applicable."""

criteria: List[ValidationCriterionApplicability]
"""Options for criterion/criteria to meet."""


class ValidationCriterionApplicability(ImplicitDict):
"""A single criterion for determining whether a test report element is applicable.
Exactly one field must be specified."""

failed_checks: Optional[FailedCheckApplicability]
"""Only this kind of FailedCheck elements are applicable."""

skipped_actions: Optional[SkippedCheckApplicability]
"""Only this kind of SkippedCheckReport elements are applicable."""

address_is: Optional[JSONAddress]
"""Only the element at this JSONAddress in the test report is applicable."""

does_not_satisfy: Optional[ValidationCriterionApplicability]
"""Only elements that do not satisfy this criterion are applicable."""

satisfies_all: Optional[AllCriteriaApplicability]
"""Only elements which satisfy all these criteria are applicable."""

satisfies_any: Optional[AnyCriteriaApplicability]
"""Elements which satisfy any of these criteria are applicable."""


# ===== Pass conditions =====


class EachElementCondition(ImplicitDict):
"""A single applicable element must meet this condition. Exactly one field must be specified."""

has_severity: Optional[SeverityComparison]
"""The element must be a FailedCheck that has this specified kind of severity."""


class ElementGroupCondition(ImplicitDict):
"""A group of applicable elements must meet this condition. Exactly one field must be specified."""

count: Optional[NumericComparison]
"""The number of applicable elements must have this specified kind of count."""


class AllPassConditions(ImplicitDict):
"""All specific conditions must be met."""

conditions: List[PassCondition]
"""Conditions that all must be met."""


class AnyPassCondition(ImplicitDict):
"""Any specific condition must be met."""

conditions: List[PassCondition]
"""Options for conditions to meet."""


class PassCondition(ImplicitDict):
"""Condition for passing validation. Exactly one field must be specified."""

each_element: Optional[EachElementCondition]
"""Condition applies to each applicable element."""

elements: Optional[ElementGroupCondition]
"""Condition applies to the group of applicable elements."""

does_not_pass: Optional[PassCondition]
"""Overall condition is met only if this specified condition is not met."""

all_of: Optional[AllPassConditions]
"""Overall condition is met only if all of these specified conditions are met."""

any_of: Optional[AnyPassCondition]
"""Overall condition is met if any of these specified conditions are met."""


# ===== Configuration =====


class ValidationCriterion(ImplicitDict):
"""Wrapper for all the potential types of validation criteria."""

full_success: Optional[FullSuccessCriterion] = None
no_skipped_actions: Optional[NoSkippedActionsCriterion] = None
applicability: ValidationCriterionApplicability
"""Definition of the test report elements to which the `pass_condition` is applicable."""

pass_condition: PassCondition
"""Condition that must be met by the applicable test report element(s) in order to pass validation."""


class ValidationConfiguration(ImplicitDict):
"""Complete set of validation criteria that a test run report must satisfy."""

criteria: List[ValidationCriterion]
"""Set of criteria which must all pass in order to pass validation."""
Loading

0 comments on commit 6cf64ca

Please sign in to comment.