From 77be89fdc8c7d302e5158fb3d3beca59da8b15bf Mon Sep 17 00:00:00 2001 From: Benjamin Pelletier Date: Fri, 15 Sep 2023 08:24:12 +0000 Subject: [PATCH] Add tested_requirements artifact --- .../configurations/configuration.py | 10 +- .../configurations/dev/f3548.yaml | 2 + .../configurations/dev/netrid_v22a.yaml | 2 + .../configurations/dev/uspace.yaml | 2 + monitoring/uss_qualifier/main.py | 8 + .../participant_tested_requirements.html | 111 +++++ .../tested_requirements/test_run_report.html | 12 + .../reports/tested_requirements.py | 406 ++++++++++++++++++ schemas/manage_type_schemas.py | 9 +- .../configuration/ArtifactsConfiguration.json | 11 + .../TestedRequirementsConfiguration.json | 19 + .../TestedRolesConfiguration.json | 16 +- .../uss_qualifier/reports/graphs/Node.json | 44 -- .../templates/InjectedConfiguration.json | 19 - 14 files changed, 595 insertions(+), 76 deletions(-) create mode 100644 monitoring/uss_qualifier/reports/templates/tested_requirements/participant_tested_requirements.html create mode 100644 monitoring/uss_qualifier/reports/templates/tested_requirements/test_run_report.html create mode 100644 monitoring/uss_qualifier/reports/tested_requirements.py create mode 100644 schemas/monitoring/uss_qualifier/configurations/configuration/TestedRequirementsConfiguration.json delete mode 100644 schemas/monitoring/uss_qualifier/reports/graphs/Node.json delete mode 100644 schemas/monitoring/uss_qualifier/reports/templates/InjectedConfiguration.json diff --git a/monitoring/uss_qualifier/configurations/configuration.py b/monitoring/uss_qualifier/configurations/configuration.py index f62ea774f1..a8749e6ec8 100644 --- a/monitoring/uss_qualifier/configurations/configuration.py +++ b/monitoring/uss_qualifier/configurations/configuration.py @@ -27,7 +27,12 @@ class TestConfiguration(ImplicitDict): class TestedRolesConfiguration(ImplicitDict): report_path: str - """Path of HTML file to contain a fulfilled-requirements-based view of the test report""" + """Path of folder to write HTML files containing a fulfilled-requirements-based view of the test report""" + + +class TestedRequirementsConfiguration(ImplicitDict): + output_path: str + """Path of a folder into which report HTML files should be written""" class ReportHTMLConfiguration(ImplicitDict): @@ -79,6 +84,9 @@ class ArtifactsConfiguration(ImplicitDict): tested_roles: Optional[TestedRolesConfiguration] = None """If specified, configuration describing a desired report summarizing tested requirements for each specified participant and role""" + tested_requirements: Optional[TestedRequirementsConfiguration] = None + """If specified, configuration describing a desired report summarizing all tested requirements for each participant""" + class USSQualifierConfigurationV1(ImplicitDict): test_run: Optional[TestConfiguration] = None diff --git a/monitoring/uss_qualifier/configurations/dev/f3548.yaml b/monitoring/uss_qualifier/configurations/dev/f3548.yaml index 3007e038f9..a807b0b9c9 100644 --- a/monitoring/uss_qualifier/configurations/dev/f3548.yaml +++ b/monitoring/uss_qualifier/configurations/dev/f3548.yaml @@ -18,3 +18,5 @@ v1: report_path: output/tested_roles_f3548 report: report_path: output/report_f3548.json + tested_requirements: + output_path: output/tested_requirements_f3548 diff --git a/monitoring/uss_qualifier/configurations/dev/netrid_v22a.yaml b/monitoring/uss_qualifier/configurations/dev/netrid_v22a.yaml index da0feb02aa..f8b2223104 100644 --- a/monitoring/uss_qualifier/configurations/dev/netrid_v22a.yaml +++ b/monitoring/uss_qualifier/configurations/dev/netrid_v22a.yaml @@ -20,3 +20,5 @@ v1: report_path: output/report_netrid_v22a.json tested_roles: report_path: output/tested_roles_netrid_v22a + tested_requirements: + output_path: output/tested_requirements_f3411v22a diff --git a/monitoring/uss_qualifier/configurations/dev/uspace.yaml b/monitoring/uss_qualifier/configurations/dev/uspace.yaml index 95721504f1..43c8d5343a 100644 --- a/monitoring/uss_qualifier/configurations/dev/uspace.yaml +++ b/monitoring/uss_qualifier/configurations/dev/uspace.yaml @@ -31,3 +31,5 @@ v1: templated_reports: - template_url: https://github.com/Orbitalize/reports/releases/download/v0.0.17/app-v0.0.17.zip output_path: output/capabilities_uspace.html + tested_requirements: + output_path: output/tested_requirements_uspace diff --git a/monitoring/uss_qualifier/main.py b/monitoring/uss_qualifier/main.py index c499a3c603..0aa919e982 100644 --- a/monitoring/uss_qualifier/main.py +++ b/monitoring/uss_qualifier/main.py @@ -19,6 +19,9 @@ ) from monitoring.uss_qualifier.fileio import load_dict_with_references from monitoring.uss_qualifier.reports.documents import make_report_html +from monitoring.uss_qualifier.reports.tested_requirements import ( + generate_tested_requirements, +) from monitoring.uss_qualifier.reports.tested_roles import generate_tested_roles from monitoring.uss_qualifier.reports.graphs import make_graph from monitoring.uss_qualifier.reports.report import TestRunReport, redact_access_tokens @@ -196,6 +199,11 @@ def main() -> int: logger.info("Writing tested roles view to {}", path) generate_tested_roles(report, path) + if config.artifacts.tested_requirements: + path = config.artifacts.tested_requirements.output_path + logger.info(f"Writing tested requirements view to {path}") + generate_tested_requirements(report, path) + return os.EX_OK diff --git a/monitoring/uss_qualifier/reports/templates/tested_requirements/participant_tested_requirements.html b/monitoring/uss_qualifier/reports/templates/tested_requirements/participant_tested_requirements.html new file mode 100644 index 0000000000..0ff0a9111d --- /dev/null +++ b/monitoring/uss_qualifier/reports/templates/tested_requirements/participant_tested_requirements.html @@ -0,0 +1,111 @@ + + + + + +
+

{{ participant_id }} tested requirements

+ + + + + + + + + + + + {% set first_row = namespace(package=True, requirement=True, scenario=True, case=True, step=True) %} + {% for package in breakdown.packages %} + {% set first_row.package = True %} + {% for requirement in package.requirements %} + {% set first_row.requirement = True %} + {% for test_scenario in requirement.scenarios %} + {% set first_row.scenario = True %} + {% for test_case in test_scenario.cases %} + {% set first_row.case = True %} + {% for test_step in test_case.steps %} + {% set first_row.step = True %} + {% for check in test_step.checks %} + + {% if first_row.package %} + + {% endif %} + {% if first_row.requirement %} + + {% endif %} + + {% if first_row.scenario %} + + {% endif %} + {% if first_row.case %} + + {% endif %} + {% if first_row.step %} + + {% endif %} + + + + {% set first_row.package = False %} + {% set first_row.requirement = False %} + {% set first_row.scenario = False %} + {% set first_row.case = False %} + {% set first_row.step = False %} + {% endfor %} + {% endfor %} + {% endfor %} + {% endfor %} + {% endfor %} + {% endfor %} +
PackageRequirementResultScenarioCaseStepCheck
{{ package.name }}{{ requirement.id }}{{ check.result }} + {% if test_scenario.url %} + {{ test_scenario.name }} + {% else %} + {{ test_scenario.name }} + {% endif %} + + {% if test_case.url %} + {{ test_case.name }} + {% else %} + {{ test_case.name }} + {% endif %} + + {% if test_step.url %} + {{ test_step.name }} + {% else %} + {{ test_step.name }} + {% endif %} + + {% if check.url %} + {{ check.name }} + {% else %} + {{ check.name }} + {% endif %} + {% if check.successes + check.failures > 1 %} + ({{ check.successes + check.failures }}x) + {% endif %} +
+
+ + diff --git a/monitoring/uss_qualifier/reports/templates/tested_requirements/test_run_report.html b/monitoring/uss_qualifier/reports/templates/tested_requirements/test_run_report.html new file mode 100644 index 0000000000..172ed333ec --- /dev/null +++ b/monitoring/uss_qualifier/reports/templates/tested_requirements/test_run_report.html @@ -0,0 +1,12 @@ + + +
+

Participants

+ +
+ + diff --git a/monitoring/uss_qualifier/reports/tested_requirements.py b/monitoring/uss_qualifier/reports/tested_requirements.py new file mode 100644 index 0000000000..426ff4bbec --- /dev/null +++ b/monitoring/uss_qualifier/reports/tested_requirements.py @@ -0,0 +1,406 @@ +import os +import shutil +from typing import List, Union + +from implicitdict import ImplicitDict +from monitoring.monitorlib.inspection import import_submodules +from monitoring.uss_qualifier import scenarios, suites, action_generators +from monitoring.uss_qualifier.action_generators.documentation.definitions import ( + PotentialGeneratedAction, +) +from monitoring.uss_qualifier.action_generators.documentation.documentation import ( + list_potential_actions_for_action_generator_definition, +) +from monitoring.uss_qualifier.configurations.configuration import ParticipantID +from monitoring.uss_qualifier.fileio import load_dict_with_references +from monitoring.uss_qualifier.reports import jinja_env +from monitoring.uss_qualifier.reports.report import ( + TestRunReport, + TestSuiteActionReport, + TestScenarioReport, + PassedCheck, + FailedCheck, +) +from monitoring.uss_qualifier.scenarios.definitions import TestScenarioTypeName +from monitoring.uss_qualifier.scenarios.documentation.parsing import get_documentation +from monitoring.uss_qualifier.scenarios.scenario import get_scenario_type_by_name +from monitoring.uss_qualifier.suites.definitions import ( + TestSuiteActionDeclaration, + ActionType, + TestSuiteDefinition, +) + + +class TestedCheck(ImplicitDict): + name: str + url: str + successes: int = 0 + failures: int = 0 + + @property + def result(self) -> str: + if self.failures > 0: + return "Fail" + if self.not_tested == 0: + return "Not tested" + else: + return "Pass" + + @property + def classname(self) -> str: + if self.failures > 0: + return "fail_result" + if self.successes + self.failures == 0: + return "not_tested" + else: + return "pass_result" + + @property + def not_tested(self) -> bool: + return self.successes + self.failures == 0 + + +class TestedStep(ImplicitDict): + name: str + url: str + checks: List[TestedCheck] + + @property + def rows(self) -> int: + return len(self.checks) + + @property + def no_failures(self) -> bool: + return all(c.failures == 0 for c in self.checks) + + @property + def not_tested(self) -> bool: + return all(c.not_tested for c in self.checks) + + +class TestedCase(ImplicitDict): + name: str + url: str + steps: List[TestedStep] + + @property + def rows(self) -> int: + return sum(s.rows for s in self.steps) + + @property + def no_failures(self) -> bool: + return all(s.no_failures for s in self.steps) + + @property + def not_tested(self) -> bool: + return all(s.not_tested for s in self.steps) + + +class TestedScenario(ImplicitDict): + type: TestScenarioTypeName + name: str + url: str + cases: List[TestedCase] + + @property + def rows(self) -> int: + return sum(c.rows for c in self.cases) + + @property + def no_failures(self) -> bool: + return all(c.no_failures for c in self.cases) + + @property + def not_tested(self) -> bool: + return all(c.not_tested for c in self.cases) + + +class TestedRequirement(ImplicitDict): + id: str + scenarios: List[TestedScenario] + + @property + def rows(self) -> int: + return sum(s.rows for s in self.scenarios) + + @property + def classname(self) -> str: + if not all(s.no_failures for s in self.scenarios): + return "fail_result" + elif all(s.not_tested for s in self.scenarios): + return "not_tested" + else: + return "pass_result" + + +class TestedPackage(ImplicitDict): + id: str + name: str + requirements: List[TestedRequirement] + + @property + def rows(self) -> int: + return sum(r.rows for r in self.requirements) + + +class TestedBreakdown(ImplicitDict): + packages: List[TestedPackage] + + +def generate_tested_requirements(report: TestRunReport, output_path: str) -> None: + import_submodules(scenarios) + import_submodules(suites) + import_submodules(action_generators) + + if os.path.exists(output_path): + shutil.rmtree(output_path) + index_file = os.path.join(output_path, "index.html") + os.makedirs(os.path.dirname(index_file), exist_ok=True) + + participant_ids = report.report.participant_ids() + template = jinja_env.get_template("tested_requirements/test_run_report.html") + with open(index_file, "w") as f: + f.write(template.render(participant_ids=participant_ids)) + + template = jinja_env.get_template( + "tested_requirements/participant_tested_requirements.html" + ) + for participant_id in participant_ids: + participant_breakdown = TestedBreakdown(packages=[]) + _populate_breakdown_with_action_report( + participant_breakdown, report.report, participant_id + ) + _populate_breakdown_with_action_declaration( + participant_breakdown, report.configuration.action + ) + _sort_breakdown(participant_breakdown) + participant_file = os.path.join(output_path, f"{participant_id}.html") + with open(participant_file, "w") as f: + f.write( + template.render( + participant_id=participant_id, breakdown=participant_breakdown + ) + ) + + +def _sort_breakdown(breakdown: TestedBreakdown) -> None: + breakdown.packages.sort(key=lambda p: p.id) + for package in breakdown.packages: + package.requirements.sort(key=lambda r: r.id) + for requirement in package.requirements: + requirement.scenarios.sort(key=lambda s: s.name) + + +def _populate_breakdown_with_action_report( + breakdown: TestedBreakdown, + action: TestSuiteActionReport, + participant_id: ParticipantID, +) -> None: + test_suite, test_scenario, action_generator = action.get_applicable_report() + if test_scenario: + return _populate_breakdown_with_scenario_report( + breakdown, action.test_scenario, participant_id + ) + elif test_suite: + for subaction in action.test_suite.actions: + _populate_breakdown_with_action_report(breakdown, subaction, participant_id) + elif action_generator: + for subaction in action.action_generator.actions: + _populate_breakdown_with_action_report(breakdown, subaction, participant_id) + else: + raise ValueError(f"Unsupported test suite report type") + + +def _populate_breakdown_with_scenario_report( + breakdown: TestedBreakdown, + scenario_report: TestScenarioReport, + participant_id: ParticipantID, +) -> None: + scenario_type_name = scenario_report.scenario_type + for case in scenario_report.cases: + for step in case.steps: + for check in step.passed_checks + step.failed_checks: + if participant_id not in check.participants: + continue + for req_id in check.requirements: + package_id = ".".join(req_id.split(".")[0:-1]) + package_name = "
.".join(package_id.split(".")) + matches = [p for p in breakdown.packages if p.id == package_id] + if matches: + tested_package = matches[0] + else: + # TODO: Improve name of package by using title of page + tested_package = TestedPackage( + id=package_id, name=package_name, requirements=[] + ) + breakdown.packages.append(tested_package) + + short_req_id = req_id.split(".")[-1] + matches = [ + r for r in tested_package.requirements if r.id == short_req_id + ] + if matches: + tested_requirement = matches[0] + else: + tested_requirement = TestedRequirement( + id=short_req_id, scenarios=[] + ) + tested_package.requirements.append(tested_requirement) + + matches = [ + s + for s in tested_requirement.scenarios + if s.type == scenario_type_name + ] + if matches: + tested_scenario = matches[0] + else: + tested_scenario = TestedScenario( + type=scenario_type_name, + name=scenario_report.name, + url=scenario_report.documentation_url, + cases=[], + ) + tested_requirement.scenarios.append(tested_scenario) + + matches = [c for c in tested_scenario.cases if c.name == case.name] + if matches: + tested_case = matches[0] + else: + tested_case = TestedCase( + name=case.name, url=case.documentation_url, steps=[] + ) + tested_scenario.cases.append(tested_case) + + matches = [s for s in tested_case.steps if s.name == step.name] + if matches: + tested_step = matches[0] + else: + tested_step = TestedStep( + name=step.name, url=step.documentation_url, checks=[] + ) + tested_case.steps.append(tested_step) + + matches = [c for c in tested_step.checks if c.name == check.name] + if matches: + tested_check = matches[0] + else: + tested_check = TestedCheck(name=check.name, url="") + if isinstance(check, FailedCheck): + tested_check.url = check.documentation_url + tested_step.checks.append(tested_check) + if isinstance(check, PassedCheck): + tested_check.successes += 1 + elif isinstance(check, FailedCheck): + tested_check.failures += 1 + else: + raise ValueError("Check is neither PassedCheck nor FailedCheck") + + +def _populate_breakdown_with_action_declaration( + breakdown: TestedBreakdown, + action: Union[TestSuiteActionDeclaration, PotentialGeneratedAction], +) -> None: + action_type = action.get_action_type() + if action_type == ActionType.TestScenario: + _populate_breakdown_with_scenario(breakdown, action.test_scenario.scenario_type) + elif action_type == ActionType.TestSuite: + if "suite_type" in action.test_suite and action.test_suite.suite_type: + suite_def: TestSuiteDefinition = ImplicitDict.parse( + load_dict_with_references(action.test_suite.suite_type), + TestSuiteDefinition, + ) + for action in suite_def.actions: + _populate_breakdown_with_action_declaration(breakdown, action) + elif ( + "suite_definition" in action.test_suite + and action.test_suite.suite_definition + ): + for action in action.test_suite.suite_definition: + _populate_breakdown_with_action_declaration(breakdown, action) + else: + raise ValueError(f"Test suite action missing suite type or definition") + elif action_type == ActionType.ActionGenerator: + potential_actions = list_potential_actions_for_action_generator_definition( + action.action_generator + ) + for action in potential_actions: + _populate_breakdown_with_action_declaration(breakdown, action) + else: + raise NotImplementedError(f"Unsupported test suite action type: {action_type}") + + +def _populate_breakdown_with_scenario( + breakdown: TestedBreakdown, scenario_type_name: TestScenarioTypeName +) -> None: + scenario_type = get_scenario_type_by_name(scenario_type_name) + scenario_doc = get_documentation(scenario_type) + for case in scenario_doc.cases: + for step in case.steps: + for check in step.checks: + for req_id in check.applicable_requirements: + package_id = ".".join(req_id.split(".")[0:-1]) + package_name = "
.".join(package_id.split(".")) + matches = [p for p in breakdown.packages if p.id == package_id] + if matches: + tested_package = matches[0] + else: + # TODO: Improve name of package by using title of page + tested_package = TestedPackage( + id=package_id, name=package_name, requirements=[] + ) + breakdown.packages.append(tested_package) + + short_req_id = req_id.split(".")[-1] + matches = [ + r for r in tested_package.requirements if r.id == short_req_id + ] + if matches: + tested_requirement = matches[0] + else: + tested_requirement = TestedRequirement( + id=short_req_id, scenarios=[] + ) + tested_package.requirements.append(tested_requirement) + + matches = [ + s + for s in tested_requirement.scenarios + if s.type == scenario_type_name + ] + if matches: + tested_scenario = matches[0] + else: + tested_scenario = TestedScenario( + type=scenario_type_name, + name=scenario_doc.name, + url=scenario_doc.url, + cases=[], + ) + tested_requirement.scenarios.append(tested_scenario) + + matches = [c for c in tested_scenario.cases if c.name == case.name] + if matches: + tested_case = matches[0] + else: + tested_case = TestedCase(name=case.name, url=case.url, steps=[]) + tested_scenario.cases.append(tested_case) + + matches = [s for s in tested_case.steps if s.name == step.name] + if matches: + tested_step = matches[0] + else: + tested_step = TestedStep( + name=step.name, url=step.url, checks=[] + ) + tested_case.steps.append(tested_step) + + matches = [c for c in tested_step.checks if c.name == check.name] + if matches: + tested_check = matches[0] + else: + tested_check = TestedCheck(name=check.name, url=check.url) + if not check.has_todo: + tested_step.checks.append(tested_check) + if not tested_check.url: + tested_check.url = check.url diff --git a/schemas/manage_type_schemas.py b/schemas/manage_type_schemas.py index 8b0c3a503a..db3c23ab86 100644 --- a/schemas/manage_type_schemas.py +++ b/schemas/manage_type_schemas.py @@ -12,8 +12,6 @@ from loguru import logger import monitoring -from monitoring.uss_qualifier import configurations, reports -from monitoring.monitorlib.inspection import import_submodules class Action(str, enum.Enum): @@ -116,8 +114,11 @@ def path_to(t_dest: Type, t_src: Type) -> str: description=f"{full_name(schema_type)}, as defined in {path_of_py_file(schema_type)}", ) - for module in (configurations, reports): - import_submodules(module) + from monitoring.uss_qualifier.reports.report import TestRunReport + from monitoring.uss_qualifier.configurations.configuration import ( + USSQualifierConfiguration, + ) + schemas = _find_type_schemas(monitoring, schema_vars_resolver) repo_root = os.path.abspath( diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json b/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json index ab24fcf792..67fb992e24 100644 --- a/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json +++ b/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json @@ -51,6 +51,17 @@ }, "type": "array" }, + "tested_requirements": { + "description": "If specified, configuration describing a desired report summarizing all tested requirements for each participant", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "TestedRequirementsConfiguration.json" + } + ] + }, "tested_roles": { "description": "If specified, configuration describing a desired report summarizing tested requirements for each specified participant and role", "oneOf": [ diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRequirementsConfiguration.json b/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRequirementsConfiguration.json new file mode 100644 index 0000000000..87ced53307 --- /dev/null +++ b/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRequirementsConfiguration.json @@ -0,0 +1,19 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRequirementsConfiguration.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "monitoring.uss_qualifier.configurations.configuration.TestedRequirementsConfiguration, as defined in monitoring/uss_qualifier/configurations/configuration.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "output_path": { + "description": "Path of a folder into which report HTML files should be written", + "type": "string" + } + }, + "required": [ + "output_path" + ], + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRolesConfiguration.json b/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRolesConfiguration.json index ea3f234b56..af8cf7a590 100644 --- a/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRolesConfiguration.json +++ b/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRolesConfiguration.json @@ -1,19 +1,19 @@ { + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRolesConfiguration.json", "$schema": "https://json-schema.org/draft/2020-12/schema", - "type": "object", + "description": "monitoring.uss_qualifier.configurations.configuration.TestedRolesConfiguration, as defined in monitoring/uss_qualifier/configurations/configuration.py", "properties": { "$ref": { - "type": "string", - "description": "Path to content that replaces the $ref" + "description": "Path to content that replaces the $ref", + "type": "string" }, "report_path": { - "type": "string", - "description": "Path of HTML file to contain a fulfilled-requirements-based view of the test report" + "description": "Path of folder to write HTML files containing a fulfilled-requirements-based view of the test report", + "type": "string" } }, - "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/configurations/configuration/TestedRolesConfiguration.json", - "description": "monitoring.uss_qualifier.configurations.configuration.TestedRolesConfiguration, as defined in monitoring/uss_qualifier/configurations/configuration.py", "required": [ "report_path" - ] + ], + "type": "object" } \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/graphs/Node.json b/schemas/monitoring/uss_qualifier/reports/graphs/Node.json deleted file mode 100644 index fb0e789cd8..0000000000 --- a/schemas/monitoring/uss_qualifier/reports/graphs/Node.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "$schema": "https://json-schema.org/draft/2020-12/schema", - "type": "object", - "properties": { - "$ref": { - "type": "string", - "description": "Path to content that replaces the $ref" - }, - "label": { - "type": [ - "string", - "null" - ] - }, - "name": { - "type": "string" - }, - "children": { - "type": "array", - "items": { - "type": "string" - } - }, - "attributes": { - "type": "object", - "properties": { - "$ref": { - "type": "string", - "description": "Path to content that replaces the $ref" - } - }, - "additionalProperties": { - "type": "string" - } - } - }, - "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/graphs/Node.json", - "description": "Represents a node to be used in a GraphViz graph.\n\nmonitoring.uss_qualifier.reports.graphs.Node, as defined in monitoring/uss_qualifier/reports/graphs.py", - "required": [ - "attributes", - "children", - "name" - ] -} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/templates/InjectedConfiguration.json b/schemas/monitoring/uss_qualifier/reports/templates/InjectedConfiguration.json deleted file mode 100644 index 2b97b74ba3..0000000000 --- a/schemas/monitoring/uss_qualifier/reports/templates/InjectedConfiguration.json +++ /dev/null @@ -1,19 +0,0 @@ -{ - "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/templates/InjectedConfiguration.json", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "description": "monitoring.uss_qualifier.reports.templates.InjectedConfiguration, as defined in monitoring/uss_qualifier/reports/templates.py", - "properties": { - "$ref": { - "description": "Path to content that replaces the $ref", - "type": "string" - }, - "report": { - "$ref": "../report/TestRunReport.json", - "description": "Report instance to inject in the templated report" - } - }, - "required": [ - "report" - ], - "type": "object" -} \ No newline at end of file