diff --git a/monitoring/uss_qualifier/common_data_definitions.py b/monitoring/uss_qualifier/common_data_definitions.py index 767c1ffc72..bae869e3db 100644 --- a/monitoring/uss_qualifier/common_data_definitions.py +++ b/monitoring/uss_qualifier/common_data_definitions.py @@ -4,7 +4,7 @@ class Severity(str, Enum): Critical = "Critical" """The system under test has a critical problem that justifies the discontinuation of testing. - + This kind of issue not only makes the current test scenario unable to succeed, but is likely to cause spurious failures in other separate test scenarios as well. This may occur, for instance, if the system was left @@ -15,7 +15,7 @@ class Severity(str, Enum): High = "High" """The system under test has a problem that prevents the current test scenario from continuing. - + Error interrupts a test scenario but likely doesn't impact other, separate test scenarios. For instance, the test step necessary to enable later test steps in the test scenario did not complete successfully. @@ -23,13 +23,65 @@ class Severity(str, Enum): Medium = "Medium" """The system does not meet requirements, but the current test scenario can continue. - + Further test steps will likely still result in reasonable evaluations. """ Low = "Low" """The system meets requirements but could be improved. - + Further test steps can be executed without impact. A test run with only Low-Severity issues will be considered successful. """ + + def __eq__(self, other): + if isinstance(other, Severity): + other_str = other.value + elif isinstance(other, str): + other_str = other + else: + raise ValueError(f"Cannot compare Severity to {type(other)}") + return self.value == other_str + + def __ne__(self, other): + return not (self == other) + + def __gt__(self, other): + if isinstance(other, Severity): + pass + elif isinstance(other, str): + other = Severity(other) + else: + raise ValueError(f"Cannot compare Severity to {type(other)}") + + if self == Severity.Critical: + return False + elif self == Severity.High: + return other == Severity.Critical + elif self == Severity.Medium: + return other == Severity.Low + elif self == Severity.Low: + return True + else: + raise ValueError(f"Unknown Severity type: '{self}'") + + def __ge__(self, other): + return self == other or self > other + + def __lt__(self, other): + if isinstance(other, Severity): + pass + elif isinstance(other, str): + other = Severity(other) + else: + raise ValueError(f"Cannot compare Severity to {type(other)}") + return other > self + + def __le__(self, other): + if isinstance(other, Severity): + pass + elif isinstance(other, str): + other = Severity(other) + else: + raise ValueError(f"Cannot compare Severity to {type(other)}") + return other >= self diff --git a/monitoring/uss_qualifier/configurations/dev/dss_probing.yaml b/monitoring/uss_qualifier/configurations/dev/dss_probing.yaml index af807b8ab7..a6c1c6394b 100644 --- a/monitoring/uss_qualifier/configurations/dev/dss_probing.yaml +++ b/monitoring/uss_qualifier/configurations/dev/dss_probing.yaml @@ -41,6 +41,4 @@ v1: uss1: all_astm_dss_requirements uss2: all_astm_dss_requirements validation: - criteria: - - full_success: {} - - no_skipped_actions: {} + $ref: ./library/validation.yaml#/normal_test diff --git a/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml b/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml index e2f6665e06..bec2b8c036 100644 --- a/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml +++ b/monitoring/uss_qualifier/configurations/dev/f3548_self_contained.yaml @@ -113,3 +113,21 @@ v1: # Write out a human-readable report showing the sequence of events of the test sequence_view: {} + + # This block defines whether to return an error code from the execution of uss_qualifier, based on the content of the + # test run report. All of the criteria must be met to return a successful code. + validation: + criteria: + # applicability indicates which test report elements the pass_condition applies to + - applicability: + # We want to make sure there are no failed checks... + failed_checks: + # ...at least, no failed checks with severity higher than "Low". + has_severity: + higher_than: Low + pass_condition: + # When considering all of the applicable elements... + elements: + # ...the number of applicable elements should be zero. + count: + equal_to: 0 diff --git a/monitoring/uss_qualifier/configurations/dev/library/validation.yaml b/monitoring/uss_qualifier/configurations/dev/library/validation.yaml index b049c2247c..16db8582c3 100644 --- a/monitoring/uss_qualifier/configurations/dev/library/validation.yaml +++ b/monitoring/uss_qualifier/configurations/dev/library/validation.yaml @@ -1,5 +1,11 @@ normal_test: $content_schema: monitoring/uss_qualifier/reports/validation/report_validation/ValidationConfiguration.json criteria: - - full_success: {} - - no_skipped_actions: {} + - applicability: + failed_checks: + has_severity: + higher_than: Low + pass_condition: + elements: + count: + equal_to: 0 diff --git a/monitoring/uss_qualifier/reports/report.py b/monitoring/uss_qualifier/reports/report.py index 3f3eebbe41..59c4afd001 100644 --- a/monitoring/uss_qualifier/reports/report.py +++ b/monitoring/uss_qualifier/reports/report.py @@ -273,7 +273,7 @@ def query_passed_checks( def query_failed_checks( self, participant_id: Optional[str] = None - ) -> Iterator[Tuple[JSONPathExpression, PassedCheck]]: + ) -> Iterator[Tuple[JSONPathExpression, FailedCheck]]: for i, case in enumerate(self.cases): for path, fc in case.query_failed_checks(participant_id): yield f"cases[{i}].{path}", fc diff --git a/monitoring/uss_qualifier/reports/validation/definitions.py b/monitoring/uss_qualifier/reports/validation/definitions.py index ab9f75bfe0..8dab37dcc8 100644 --- a/monitoring/uss_qualifier/reports/validation/definitions.py +++ b/monitoring/uss_qualifier/reports/validation/definitions.py @@ -1,28 +1,153 @@ +from __future__ import annotations from typing import Optional, List from implicitdict import ImplicitDict +from monitoring.monitorlib.dicts import JSONAddress +from monitoring.uss_qualifier.common_data_definitions import Severity -class FullSuccessCriterion(ImplicitDict): - """Validation criterion that every element of the report must indicate success.""" +# ===== Shared logic ===== + + +class SeverityComparison(ImplicitDict): + """Exactly one field must be specified.""" + + equal_to: Optional[Severity] + at_least: Optional[Severity] + higher_than: Optional[Severity] + no_higher_than: Optional[Severity] + lower_than: Optional[Severity] + + +class NumericComparison(ImplicitDict): + """Exactly one field must be specified.""" + + equal_to: Optional[float] + at_least: Optional[float] + more_than: Optional[float] + no_more_than: Optional[float] + less_than: Optional[float] - pass +# ===== Applicability ===== -class NoSkippedActionsCriterion(ImplicitDict): - """Validation criterion that no actions in the entire test run may be skipped.""" + +class FailedCheckApplicability(ImplicitDict): + """FailedCheck test report elements are applicable according to this specification.""" + + has_severity: Optional[SeverityComparison] + """If specified, only FailedChecks with specified severity are applicable.""" + + +class SkippedCheckApplicability(ImplicitDict): + """SkippedCheckReport test report elements are applicable according to this specification.""" pass +class AllCriteriaApplicability(ImplicitDict): + """All criteria must be met for an element to be applicable.""" + + criteria: List[ValidationCriterionApplicability] + """Criteria that must all be met.""" + + +class AnyCriteriaApplicability(ImplicitDict): + """Any criterion or criteria must be met for an element to be applicable.""" + + criteria: List[ValidationCriterionApplicability] + """Options for criterion/criteria to meet.""" + + +class ValidationCriterionApplicability(ImplicitDict): + """A single criterion for determining whether a test report element is applicable. + + Exactly one field must be specified.""" + + failed_checks: Optional[FailedCheckApplicability] + """Only this kind of FailedCheck elements are applicable.""" + + skipped_actions: Optional[SkippedCheckApplicability] + """Only this kind of SkippedCheckReport elements are applicable.""" + + address_is: Optional[JSONAddress] + """Only the element at this JSONAddress in the test report is applicable.""" + + does_not_satisfy: Optional[ValidationCriterionApplicability] + """Only elements that do not satisfy this criterion are applicable.""" + + satisfies_all: Optional[AllCriteriaApplicability] + """Only elements which satisfy all these criteria are applicable.""" + + satisfies_any: Optional[AnyCriteriaApplicability] + """Elements which satisfy any of these criteria are applicable.""" + + +# ===== Pass conditions ===== + + +class EachElementCondition(ImplicitDict): + """A single applicable element must meet this condition. Exactly one field must be specified.""" + + has_severity: Optional[SeverityComparison] + """The element must be a FailedCheck that has this specified kind of severity.""" + + +class ElementGroupCondition(ImplicitDict): + """A group of applicable elements must meet this condition. Exactly one field must be specified.""" + + count: Optional[NumericComparison] + """The number of applicable elements must have this specified kind of count.""" + + +class AllPassConditions(ImplicitDict): + """All specific conditions must be met.""" + + conditions: List[PassCondition] + """Conditions that all must be met.""" + + +class AnyPassCondition(ImplicitDict): + """Any specific condition must be met.""" + + conditions: List[PassCondition] + """Options for conditions to meet.""" + + +class PassCondition(ImplicitDict): + """Condition for passing validation. Exactly one field must be specified.""" + + each_element: Optional[EachElementCondition] + """Condition applies to each applicable element.""" + + elements: Optional[ElementGroupCondition] + """Condition applies to the group of applicable elements.""" + + does_not_pass: Optional[PassCondition] + """Overall condition is met only if this specified condition is not met.""" + + all_of: Optional[AllPassConditions] + """Overall condition is met only if all of these specified conditions are met.""" + + any_of: Optional[AnyPassCondition] + """Overall condition is met if any of these specified conditions are met.""" + + +# ===== Configuration ===== + + class ValidationCriterion(ImplicitDict): """Wrapper for all the potential types of validation criteria.""" - full_success: Optional[FullSuccessCriterion] = None - no_skipped_actions: Optional[NoSkippedActionsCriterion] = None + applicability: ValidationCriterionApplicability + """Definition of the test report elements to which the `pass_condition` is applicable.""" + + pass_condition: PassCondition + """Condition that must be met by the applicable test report element(s) in order to pass validation.""" class ValidationConfiguration(ImplicitDict): """Complete set of validation criteria that a test run report must satisfy.""" criteria: List[ValidationCriterion] + """Set of criteria which must all pass in order to pass validation.""" diff --git a/monitoring/uss_qualifier/reports/validation/report_validation.py b/monitoring/uss_qualifier/reports/validation/report_validation.py index 6939617071..eb70c2c41f 100644 --- a/monitoring/uss_qualifier/reports/validation/report_validation.py +++ b/monitoring/uss_qualifier/reports/validation/report_validation.py @@ -1,81 +1,271 @@ +import json +from dataclasses import dataclass +from typing import Iterator, Union, List + from loguru import logger +import yaml from monitoring.monitorlib.dicts import JSONAddress -from monitoring.uss_qualifier.reports.report import TestRunReport, TestSuiteActionReport +from monitoring.monitorlib.inspection import fullname +from monitoring.uss_qualifier.common_data_definitions import Severity +from monitoring.uss_qualifier.reports.report import ( + TestRunReport, + TestSuiteActionReport, + FailedCheck, + TestSuiteReport, + ActionGeneratorReport, + SkippedActionReport, + TestScenarioReport, +) from monitoring.uss_qualifier.reports.validation.definitions import ( ValidationConfiguration, + ValidationCriterion, + ValidationCriterionApplicability, + SeverityComparison, + PassCondition, + EachElementCondition, + ElementGroupCondition, + NumericComparison, ) -def _validate_action_full_success( - report: TestSuiteActionReport, context: JSONAddress +# ===== Shared logic ===== + + +@dataclass +class TestReportElement(object): + element: Union[FailedCheck, SkippedActionReport] + location: JSONAddress + + +def _compare_number(value: float, comparison: NumericComparison) -> bool: + if "equal_to" in comparison and comparison.equal_to is not None: + return value == comparison.equal_to + elif "at_least" in comparison and comparison.at_least is not None: + return value >= comparison.at_least + elif "more_than" in comparison and comparison.more_than is not None: + return value > comparison.more_than + elif "no_more_than" in comparison and comparison.no_more_than is not None: + return value <= comparison.no_more_than + elif "less_than" in comparison and comparison.less_than is not None: + return value < comparison.less_than + else: + raise ValueError( + "Invalid NumericComparison; must specify exactly one of the comparison options" + ) + + +def _compare_severity(severity: Severity, comparison: SeverityComparison) -> bool: + if "equal_to" in comparison and comparison.equal_to: + return severity == comparison.equal_to + elif "at_least" in comparison and comparison.at_least: + return severity >= comparison.at_least + elif "higher_than" in comparison and comparison.higher_than: + return severity > comparison.higher_than + elif "no_higher_than" in comparison and comparison.no_higher_than: + return severity <= comparison.no_higher_than + elif "lower_than" in comparison and comparison.lower_than: + return severity < comparison.lower_than + else: + raise ValueError( + "Invalid SeverityComparison; must specify exactly one of the comparison options" + ) + + +# ===== Enumeration of applicable elements ===== + + +def _is_applicable( + element: TestReportElement, applicability: ValidationCriterionApplicability ) -> bool: + if "failed_checks" in applicability and applicability.failed_checks is not None: + if not isinstance(element.element, FailedCheck): + return False + if ( + "has_severity" in applicability.failed_checks + and applicability.failed_checks.has_severity is not None + ): + if not _compare_severity( + element.element.severity, applicability.failed_checks.has_severity + ): + return False + return True + + elif ( + "skipped_actions" in applicability and applicability.skipped_actions is not None + ): + if not isinstance(element.element, SkippedActionReport): + return False + return True + + elif "address_is" in applicability and applicability.address_is is not None: + return applicability.address_is == element.location + + elif ( + "does_not_satisfy" in applicability + and applicability.does_not_satisfy is not None + ): + return not _is_applicable(element, applicability.does_not_satisfy) + + elif "satisfies_all" in applicability and applicability.satisfies_all is not None: + return all(_is_applicable(element, a) for a in applicability.satisfies_all) + + elif "satisfies_any" in applicability and applicability.satisfies_any is not None: + return any(_is_applicable(element, a) for a in applicability.satisfies_any) + + else: + raise ValueError( + "Invalid ValidationCriterionApplicability; must specify exactly one of the applicability criteria" + ) + + +def _get_applicable_elements_from_test_scenario( + applicability: ValidationCriterionApplicability, + report: TestScenarioReport, + location: JSONAddress, +) -> Iterator[TestReportElement]: + for i, (fc_location, fc) in enumerate(report.query_failed_checks()): + element = TestReportElement( + element=fc, location=JSONAddress(location + "." + fc_location) + ) + if _is_applicable(element, applicability): + yield element + + +def _get_applicable_elements_from_test_suite( + applicability: ValidationCriterionApplicability, + report: TestSuiteReport, + location: JSONAddress, +) -> Iterator[TestReportElement]: + for a, action in enumerate(report.actions): + for e in _get_applicable_elements_from_action( + applicability, action, JSONAddress(location + f".actions[{a}]") + ): + yield e + + +def _get_applicable_elements_from_action_generator( + applicability: ValidationCriterionApplicability, + report: ActionGeneratorReport, + location: JSONAddress, +) -> Iterator[TestReportElement]: + for a, action in enumerate(report.actions): + for e in _get_applicable_elements_from_action( + applicability, action, JSONAddress(location + f".actions[{a}]") + ): + yield e + + +def _get_applicable_elements_from_skipped_action( + applicability: ValidationCriterionApplicability, + report: SkippedActionReport, + location: JSONAddress, +) -> Iterator[TestReportElement]: + element = TestReportElement(element=report, location=location) + if _is_applicable(element, applicability): + yield element + + +def _get_applicable_elements_from_action( + applicability: ValidationCriterionApplicability, + report: TestSuiteActionReport, + location: JSONAddress, +) -> Iterator[TestReportElement]: test_suite, test_scenario, action_generator = report.get_applicable_report() if test_scenario: - success = report.test_scenario.successful - if not success: - logger.error( - f"Full success not achieved because {context}.test_scenario.successful was False" - ) + return _get_applicable_elements_from_test_scenario( + applicability, + report.test_scenario, + JSONAddress(location + ".test_scenario"), + ) elif test_suite: - if report.test_suite.successful: - success = True - else: - success = False - logger.error( - f"Full success not achieved because {context}.test_suite.successful was False" - ) - for i, a in enumerate(report.test_suite.actions): - success = success and _validate_action_full_success( - a, JSONAddress(context + f".test_suite.actions[{i}]") - ) + return _get_applicable_elements_from_test_suite( + applicability, report.test_suite, JSONAddress(location + ".test_suite") + ) elif action_generator: - if report.action_generator.successful: - success = True + return _get_applicable_elements_from_action_generator( + applicability, + report.action_generator, + JSONAddress(location + ".action_generator"), + ) + else: + return _get_applicable_elements_from_skipped_action( + applicability, + report.skipped_action, + JSONAddress(location + ".skipped_action"), + ) + + +# ===== Evaluation of conditions ===== + + +def _evaluate_element_condition( + condition: EachElementCondition, element: TestReportElement +) -> bool: + if "has_severity" in condition and condition.has_severity is not None: + if isinstance(element.element, FailedCheck): + return _compare_severity(element.element.severity, condition.has_severity) else: - success = False - logger.error( - f"Full success not achieved because {context}.action_generator.successful was False" + logger.warning( + f"has_severity condition applied to non-FailedCheck element type {fullname(type(element.element))}" ) - for i, a in enumerate(report.action_generator.actions): - success = success and _validate_action_full_success( - a, JSONAddress(context + f".action_generator.actions[{i}]") - ) - return success + return False + else: + raise ValueError( + "Invalid EachElementCondition; must specify exactly one of the options" + ) + + +def _evaluate_elements_condition( + condition: ElementGroupCondition, elements: List[TestReportElement] +) -> bool: + if "count" in condition and condition.count is not None: + return _compare_number(len(elements), condition.count) -def _validate_full_success(report: TestRunReport) -> bool: - return _validate_action_full_success(report.report, JSONAddress("$")) + else: + raise ValueError( + "Invalid ElementGroupCondition; must specify exactly one of the options" + ) -def _validate_action_no_skipped_actions( - report: TestSuiteActionReport, context: JSONAddress +def _evaluate_condition( + condition: PassCondition, elements: List[TestReportElement] ) -> bool: - test_suite, test_scenario, action_generator = report.get_applicable_report() - if test_scenario: - success = True - elif test_suite: - success = True - for i, a in enumerate(report.test_suite.actions): - success = success and _validate_action_no_skipped_actions( - a, JSONAddress(context + f".test_suite.actions[{i}]") - ) - elif action_generator: - success = True - for i, a in enumerate(report.action_generator.actions): - success = success and _validate_action_no_skipped_actions( - a, JSONAddress(context + f".action_generator.actions[{i}]") - ) + if "each_element" in condition and condition.each_element is not None: + for element in elements: + if not _evaluate_element_condition(condition.each_element, element): + return False + return True + + elif "elements" in condition and condition.elements is not None: + return _evaluate_elements_condition(condition.elements, elements) + + elif "does_not_pass" in condition and condition.does_not_pass is not None: + return not _evaluate_condition(condition.does_not_pass, elements) + + elif "all_of" in condition and condition.all_of is not None: + return all(_evaluate_condition(c, elements) for c in condition.all_of) + + elif "any_of" in condition and condition.any_of is not None: + return any(_evaluate_condition(c, elements) for c in condition.any_of) + else: - logger.error( - f"No skipped actions not achieved because {context}.test_suite had a skipped action for action index {report.skipped_action.action_declaration_index}: {report.skipped_action.reason}" + raise ValueError( + "Invalid PassCondition; must specify exactly one of the options" ) - success = False - return success -def _validate_no_skipped_actions(report: TestRunReport) -> bool: - return _validate_action_no_skipped_actions(report.report, JSONAddress("$")) +# ===== Validation ===== + + +def _criterion_validated(criterion: ValidationCriterion, report: TestRunReport) -> bool: + elements = list( + _get_applicable_elements_from_action( + criterion.applicability, report.report, "$.report" + ) + ) + return _evaluate_condition(criterion.pass_condition, elements) def validate_report(report: TestRunReport, validation: ValidationConfiguration) -> bool: @@ -90,9 +280,11 @@ def validate_report(report: TestRunReport, validation: ValidationConfiguration) Returns: True if the report satisfies all criteria, False otherwise. """ success = True - for criterion in validation.criteria: - if criterion.full_success is not None: - success = success and _validate_full_success(report) - if criterion.no_skipped_actions is not None: - success = success and _validate_no_skipped_actions(report) + for c, criterion in enumerate(validation.criteria): + if not _criterion_validated(criterion, report): + success = False + logger.error( + f"Validation criterion {c} failed to validate. Criterion definition:\n" + + yaml.dump(json.loads(json.dumps(criterion))) + ) return success diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/AllCriteriaApplicability.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/AllCriteriaApplicability.json new file mode 100644 index 0000000000..67f32f04e7 --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/AllCriteriaApplicability.json @@ -0,0 +1,22 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/AllCriteriaApplicability.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "All criteria must be met for an element to be applicable.\n\nmonitoring.uss_qualifier.reports.validation.definitions.AllCriteriaApplicability, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "criteria": { + "description": "Criteria that must all be met.", + "items": { + "$ref": "ValidationCriterionApplicability.json" + }, + "type": "array" + } + }, + "required": [ + "criteria" + ], + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/AllPassConditions.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/AllPassConditions.json new file mode 100644 index 0000000000..cd4ddebf22 --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/AllPassConditions.json @@ -0,0 +1,22 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/AllPassConditions.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "All specific conditions must be met.\n\nmonitoring.uss_qualifier.reports.validation.definitions.AllPassConditions, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "conditions": { + "description": "Conditions that all must be met.", + "items": { + "$ref": "PassCondition.json" + }, + "type": "array" + } + }, + "required": [ + "conditions" + ], + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/AnyCriteriaApplicability.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/AnyCriteriaApplicability.json new file mode 100644 index 0000000000..79872c84eb --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/AnyCriteriaApplicability.json @@ -0,0 +1,22 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/AnyCriteriaApplicability.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "Any criterion or criteria must be met for an element to be applicable.\n\nmonitoring.uss_qualifier.reports.validation.definitions.AnyCriteriaApplicability, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "criteria": { + "description": "Options for criterion/criteria to meet.", + "items": { + "$ref": "ValidationCriterionApplicability.json" + }, + "type": "array" + } + }, + "required": [ + "criteria" + ], + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/AnyPassCondition.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/AnyPassCondition.json new file mode 100644 index 0000000000..f0c3e36dd5 --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/AnyPassCondition.json @@ -0,0 +1,22 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/AnyPassCondition.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "Any specific condition must be met.\n\nmonitoring.uss_qualifier.reports.validation.definitions.AnyPassCondition, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "conditions": { + "description": "Options for conditions to meet.", + "items": { + "$ref": "PassCondition.json" + }, + "type": "array" + } + }, + "required": [ + "conditions" + ], + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/EachElementCondition.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/EachElementCondition.json new file mode 100644 index 0000000000..3f6c9030c5 --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/EachElementCondition.json @@ -0,0 +1,23 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/EachElementCondition.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "A single applicable element must meet this condition. Exactly one field must be specified.\n\nmonitoring.uss_qualifier.reports.validation.definitions.EachElementCondition, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "has_severity": { + "description": "The element must be a FailedCheck that has this specified kind of severity.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "SeverityComparison.json" + } + ] + } + }, + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/ElementGroupCondition.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/ElementGroupCondition.json new file mode 100644 index 0000000000..959af47501 --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/ElementGroupCondition.json @@ -0,0 +1,23 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/ElementGroupCondition.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "A group of applicable elements must meet this condition. Exactly one field must be specified.\n\nmonitoring.uss_qualifier.reports.validation.definitions.ElementGroupCondition, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "count": { + "description": "The number of applicable elements must have this specified kind of count.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "NumericComparison.json" + } + ] + } + }, + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/FailedCheckApplicability.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/FailedCheckApplicability.json new file mode 100644 index 0000000000..caeff8e0cf --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/FailedCheckApplicability.json @@ -0,0 +1,23 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/FailedCheckApplicability.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "FailedCheck test report elements are applicable according to this specification.\n\nmonitoring.uss_qualifier.reports.validation.definitions.FailedCheckApplicability, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "has_severity": { + "description": "If specified, only FailedChecks with specified severity are applicable.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "SeverityComparison.json" + } + ] + } + }, + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/FullSuccessCriterion.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/FullSuccessCriterion.json deleted file mode 100644 index 9644ce991a..0000000000 --- a/schemas/monitoring/uss_qualifier/reports/validation/definitions/FullSuccessCriterion.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/FullSuccessCriterion.json", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "description": "Validation criterion that every element of the report must indicate success.\n\nmonitoring.uss_qualifier.reports.validation.definitions.FullSuccessCriterion, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", - "properties": { - "$ref": { - "description": "Path to content that replaces the $ref", - "type": "string" - } - }, - "type": "object" -} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/NoSkippedActionsCriterion.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/NoSkippedActionsCriterion.json deleted file mode 100644 index 29ca95eff0..0000000000 --- a/schemas/monitoring/uss_qualifier/reports/validation/definitions/NoSkippedActionsCriterion.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/NoSkippedActionsCriterion.json", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "description": "Validation criterion that no actions in the entire test run may be skipped.\n\nmonitoring.uss_qualifier.reports.validation.definitions.NoSkippedActionsCriterion, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", - "properties": { - "$ref": { - "description": "Path to content that replaces the $ref", - "type": "string" - } - }, - "type": "object" -} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/NumericComparison.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/NumericComparison.json new file mode 100644 index 0000000000..ca8910f445 --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/NumericComparison.json @@ -0,0 +1,42 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/NumericComparison.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "Exactly one field must be specified.\n\nmonitoring.uss_qualifier.reports.validation.definitions.NumericComparison, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "at_least": { + "type": [ + "number", + "null" + ] + }, + "equal_to": { + "type": [ + "number", + "null" + ] + }, + "less_than": { + "type": [ + "number", + "null" + ] + }, + "more_than": { + "type": [ + "number", + "null" + ] + }, + "no_more_than": { + "type": [ + "number", + "null" + ] + } + }, + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/PassCondition.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/PassCondition.json new file mode 100644 index 0000000000..4f2cabb82e --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/PassCondition.json @@ -0,0 +1,67 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/PassCondition.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "Condition for passing validation. Exactly one field must be specified.\n\nmonitoring.uss_qualifier.reports.validation.definitions.PassCondition, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "all_of": { + "description": "Overall condition is met only if all of these specified conditions are met.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "AllPassConditions.json" + } + ] + }, + "any_of": { + "description": "Overall condition is met if any of these specified conditions are met.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "AnyPassCondition.json" + } + ] + }, + "does_not_pass": { + "description": "Overall condition is met only if this specified condition is not met.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "PassCondition.json" + } + ] + }, + "each_element": { + "description": "Condition applies to each applicable element.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "EachElementCondition.json" + } + ] + }, + "elements": { + "description": "Condition applies to the group of applicable elements.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "ElementGroupCondition.json" + } + ] + } + }, + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/SeverityComparison.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/SeverityComparison.json new file mode 100644 index 0000000000..0012e90c5d --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/SeverityComparison.json @@ -0,0 +1,72 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/SeverityComparison.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "Exactly one field must be specified.\n\nmonitoring.uss_qualifier.reports.validation.definitions.SeverityComparison, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "at_least": { + "enum": [ + "Critical", + "High", + "Medium", + "Low" + ], + "type": [ + "string", + "null" + ] + }, + "equal_to": { + "enum": [ + "Critical", + "High", + "Medium", + "Low" + ], + "type": [ + "string", + "null" + ] + }, + "higher_than": { + "enum": [ + "Critical", + "High", + "Medium", + "Low" + ], + "type": [ + "string", + "null" + ] + }, + "lower_than": { + "enum": [ + "Critical", + "High", + "Medium", + "Low" + ], + "type": [ + "string", + "null" + ] + }, + "no_higher_than": { + "enum": [ + "Critical", + "High", + "Medium", + "Low" + ], + "type": [ + "string", + "null" + ] + } + }, + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/SkippedCheckApplicability.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/SkippedCheckApplicability.json new file mode 100644 index 0000000000..d7773bba30 --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/SkippedCheckApplicability.json @@ -0,0 +1,12 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/SkippedCheckApplicability.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "SkippedCheckReport test report elements are applicable according to this specification.\n\nmonitoring.uss_qualifier.reports.validation.definitions.SkippedCheckApplicability, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + } + }, + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationConfiguration.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationConfiguration.json index 2d74bc1075..d23c3eef32 100644 --- a/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationConfiguration.json +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationConfiguration.json @@ -8,6 +8,7 @@ "type": "string" }, "criteria": { + "description": "Set of criteria which must all pass in order to pass validation.", "items": { "$ref": "ValidationCriterion.json" }, diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationCriterion.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationCriterion.json index 86a503e3b4..6f103e7160 100644 --- a/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationCriterion.json +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationCriterion.json @@ -7,26 +7,18 @@ "description": "Path to content that replaces the $ref", "type": "string" }, - "full_success": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "FullSuccessCriterion.json" - } - ] + "applicability": { + "$ref": "ValidationCriterionApplicability.json", + "description": "Definition of the test report elements to which the `pass_condition` is applicable." }, - "no_skipped_actions": { - "oneOf": [ - { - "type": "null" - }, - { - "$ref": "NoSkippedActionsCriterion.json" - } - ] + "pass_condition": { + "$ref": "PassCondition.json", + "description": "Condition that must be met by the applicable test report element(s) in order to pass validation." } }, + "required": [ + "applicability", + "pass_condition" + ], "type": "object" } \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationCriterionApplicability.json b/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationCriterionApplicability.json new file mode 100644 index 0000000000..c55b8a790a --- /dev/null +++ b/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationCriterionApplicability.json @@ -0,0 +1,74 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/validation/definitions/ValidationCriterionApplicability.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "A single criterion for determining whether a test report element is applicable.\n\nExactly one field must be specified.\n\nmonitoring.uss_qualifier.reports.validation.definitions.ValidationCriterionApplicability, as defined in monitoring/uss_qualifier/reports/validation/definitions.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "address_is": { + "description": "Only the element at this JSONAddress in the test report is applicable.", + "type": [ + "string", + "null" + ] + }, + "does_not_satisfy": { + "description": "Only elements that do not satisfy this criterion are applicable.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "ValidationCriterionApplicability.json" + } + ] + }, + "failed_checks": { + "description": "Only this kind of FailedCheck elements are applicable.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "FailedCheckApplicability.json" + } + ] + }, + "satisfies_all": { + "description": "Only elements which satisfy all these criteria are applicable.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "AllCriteriaApplicability.json" + } + ] + }, + "satisfies_any": { + "description": "Elements which satisfy any of these criteria are applicable.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "AnyCriteriaApplicability.json" + } + ] + }, + "skipped_actions": { + "description": "Only this kind of SkippedCheckReport elements are applicable.", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "SkippedCheckApplicability.json" + } + ] + } + }, + "type": "object" +} \ No newline at end of file