diff --git a/monitoring/mock_uss/templates/tracer/log.html b/monitoring/mock_uss/templates/tracer/log.html index 5fac93169e..dd0158b975 100644 --- a/monitoring/mock_uss/templates/tracer/log.html +++ b/monitoring/mock_uss/templates/tracer/log.html @@ -3,6 +3,6 @@ {% block content %} {{ explorer_header() }} - {{ explorer_content(log) }} - {{ explorer_footer() }} + {{ explorer_content("top_node", log) }} + {{ explorer_footer(["top_node"]) }} {% endblock %} diff --git a/monitoring/monitorlib/fetch/__init__.py b/monitoring/monitorlib/fetch/__init__.py index f9b1cadcdf..b99816b69a 100644 --- a/monitoring/monitorlib/fetch/__init__.py +++ b/monitoring/monitorlib/fetch/__init__.py @@ -6,6 +6,7 @@ from typing import Dict, Optional, List from enum import Enum +from urllib.parse import urlparse import flask from loguru import logger @@ -56,6 +57,10 @@ def timestamp(self) -> datetime.datetime: "RequestDescription missing both initiated_at and received_at" ) + @property + def url_hostname(self) -> str: + return urlparse(self.url).hostname + yaml.add_representer(RequestDescription, Representer.represent_dict) diff --git a/monitoring/monitorlib/html/templates/explorer.html b/monitoring/monitorlib/html/templates/explorer.html index 9b6b1ee03c..af2e2d2bfa 100644 --- a/monitoring/monitorlib/html/templates/explorer.html +++ b/monitoring/monitorlib/html/templates/explorer.html @@ -1,7 +1,7 @@ {# Renders the provided `obj` dict as interactive HTML #} {# Content of explorer_header() should be added to the header of the page #} -{# Content of explorer_content(obj) represents the `obj` dict as interactive HTML content #} -{# Content of explorer_footer() should be added to the page such that it is loaded after draw_node #} +{# Content of explorer_content(div_id, obj) represents the `obj` dict as interactive HTML content #} +{# Content of explorer_footer(div_ids) should be added to the page such that it is loaded after explorer_content/draw_node #} {% macro collapseable(v) %}{% if v is mapping or (v is iterable and v is not string) %}collapseable{% else %}not_collapseable{% endif %}{% endmacro %} @@ -66,13 +66,13 @@ {% endmacro %} -{% macro explorer_content(obj) %} -
+{% macro explorer_content(div_id, obj) %} +
{{ draw_node(obj) }}
{% endmacro %} -{% macro explorer_footer() %} +{% macro explorer_footer(div_ids) %} {% endmacro %} diff --git a/monitoring/uss_qualifier/configurations/configuration.py b/monitoring/uss_qualifier/configurations/configuration.py index 398d4374fc..e653164609 100644 --- a/monitoring/uss_qualifier/configurations/configuration.py +++ b/monitoring/uss_qualifier/configurations/configuration.py @@ -50,9 +50,14 @@ class TestedRequirementsConfiguration(ImplicitDict): """If a requirement collection is specified for a participant, only the requirements in the specified collection will be listed on that participant's report.""" +class SequenceViewConfiguration(ImplicitDict): + output_path: str + """Path of a folder into which report HTML files should be written""" + + class ReportHTMLConfiguration(ImplicitDict): html_path: str - """Path of HTML file to contain an HTML rendering of the test report""" + """Path of HTML file to contain an HTML rendering of the raw test report object""" class TemplatedReportInjectedConfiguration(ImplicitDict): @@ -102,6 +107,9 @@ class ArtifactsConfiguration(ImplicitDict): tested_requirements: Optional[TestedRequirementsConfiguration] = None """If specified, configuration describing a desired report summarizing all tested requirements for each participant""" + sequence_view: Optional[SequenceViewConfiguration] = None + """If specified, configuration describing a desired report describing the sequence of events that occurred during the test""" + class USSQualifierConfigurationV1(ImplicitDict): test_run: Optional[TestConfiguration] = None diff --git a/monitoring/uss_qualifier/configurations/dev/uspace.yaml b/monitoring/uss_qualifier/configurations/dev/uspace.yaml index 45f32f6913..e7b54c7711 100644 --- a/monitoring/uss_qualifier/configurations/dev/uspace.yaml +++ b/monitoring/uss_qualifier/configurations/dev/uspace.yaml @@ -44,3 +44,5 @@ v1: participant_requirements: uss1: uspace uss2: uspace + sequence_view: + output_path: output/sequence_uspace diff --git a/monitoring/uss_qualifier/main.py b/monitoring/uss_qualifier/main.py index 240435f433..a8e9afd2a7 100644 --- a/monitoring/uss_qualifier/main.py +++ b/monitoring/uss_qualifier/main.py @@ -19,6 +19,7 @@ ) from monitoring.uss_qualifier.fileio import load_dict_with_references from monitoring.uss_qualifier.reports.documents import make_report_html +from monitoring.uss_qualifier.reports.sequence_view import generate_sequence_view from monitoring.uss_qualifier.reports.tested_requirements import ( generate_tested_requirements, ) @@ -204,6 +205,11 @@ def main() -> int: logger.info(f"Writing tested requirements view to {path}") generate_tested_requirements(report, config.artifacts.tested_requirements) + if config.artifacts.sequence_view: + path = config.artifacts.sequence_view.output_path + logger.info(f"Writing sequence view to {path}") + generate_sequence_view(report, config.artifacts.sequence_view) + return os.EX_OK diff --git a/monitoring/uss_qualifier/reports/report.py b/monitoring/uss_qualifier/reports/report.py index 818f274119..b0e1a2bef4 100644 --- a/monitoring/uss_qualifier/reports/report.py +++ b/monitoring/uss_qualifier/reports/report.py @@ -59,6 +59,9 @@ class PassedCheck(ImplicitDict): name: str """Name of the check that passed""" + timestamp: StringBasedDateTime + """Time the issue was discovered""" + requirements: List[RequirementID] """Requirements that would not have been met if this check had failed""" diff --git a/monitoring/uss_qualifier/reports/sequence_view.py b/monitoring/uss_qualifier/reports/sequence_view.py new file mode 100644 index 0000000000..40c19c774d --- /dev/null +++ b/monitoring/uss_qualifier/reports/sequence_view.py @@ -0,0 +1,491 @@ +from __future__ import annotations +import os +from dataclasses import dataclass +from datetime import datetime +from enum import Enum +from typing import List, Dict, Optional, Iterator + +from implicitdict import ImplicitDict + +from monitoring.monitorlib.fetch import Query +from monitoring.uss_qualifier.configurations.configuration import ( + ParticipantID, + SequenceViewConfiguration, +) +from monitoring.uss_qualifier.reports import jinja_env +from monitoring.uss_qualifier.reports.report import ( + TestRunReport, + TestSuiteActionReport, + TestScenarioReport, + PassedCheck, + FailedCheck, +) +from monitoring.uss_qualifier.scenarios.definitions import TestScenarioTypeName + + +class NoteEvent(ImplicitDict): + key: str + message: str + timestamp: datetime + + +class EventType(str, Enum): + PassedCheck = "PassedCheck" + FailedCheck = "FailedCheck" + Query = "Query" + Note = "Note" + + +class Event(ImplicitDict): + event_index: int = 0 + passed_check: Optional[PassedCheck] = None + failed_check: Optional[FailedCheck] = None + query: Optional[Query] = None + note: Optional[NoteEvent] = None + + @property + def type(self) -> EventType: + if self.passed_check: + return EventType.PassedCheck + elif self.failed_check: + return EventType.FailedCheck + elif self.query: + return EventType.Query + elif self.note: + return EventType.Note + else: + raise ValueError("Invalid Event type") + + @property + def timestamp(self) -> datetime: + if self.passed_check: + return self.passed_check.timestamp.datetime + elif self.failed_check: + return self.failed_check.timestamp.datetime + elif self.query: + return self.query.request.timestamp + elif self.note: + return self.note.timestamp + else: + raise ValueError("Invalid Event type") + + +class TestedStep(ImplicitDict): + name: str + url: str + events: List[Event] + + @property + def rows(self) -> int: + return len(self.events) + + +class TestedCase(ImplicitDict): + name: str + url: str + steps: List[TestedStep] + + @property + def rows(self) -> int: + return sum(s.rows for s in self.steps) + + +class EpochType(str, Enum): + Case = "Case" + Events = "Events" + + +class Epoch(ImplicitDict): + case: Optional[TestedCase] = None + events: Optional[List[Event]] = None + + @property + def type(self) -> EpochType: + if self.case: + return EpochType.Case + elif self.events: + return EpochType.Events + else: + raise ValueError("Invalid Epoch did not specify case or events") + + @property + def rows(self) -> int: + if self.case: + return self.case.rows + elif self.events: + return len(self.events) + else: + raise ValueError("Invalid Epoch did not specify case or events") + + +@dataclass +class TestedParticipant(object): + has_failures: bool + + +class TestedScenario(ImplicitDict): + type: TestScenarioTypeName + name: str + url: str + scenario_index: int + epochs: List[Epoch] + participants: Dict[ParticipantID, TestedParticipant] + + @property + def rows(self) -> int: + return sum(c.rows for c in self.epochs) + + +class ActionNodeType(str, Enum): + Scenario = "Scenario" + Suite = "Suite" + ActionGenerator = "ActionGenerator" + + +class ActionNode(ImplicitDict): + name: str + node_type: ActionNodeType + children: List[ActionNode] + scenario: Optional[TestedScenario] = None + + @property + def rows(self) -> int: + return sum(c.rows for c in self.children) if self.children else 1 + + @property + def cols(self) -> int: + return 1 + max(c.cols for c in self.children) if self.children else 1 + + +@dataclass +class Indexer(object): + scenario_index: int = 1 + + +@dataclass +class SuiteCell(object): + node: Optional[ActionNode] + first_row: bool + rowspan: int = 1 + colspan: int = 1 + + +@dataclass +class OverviewRow(object): + suite_cells: List[SuiteCell] + scenario_node: ActionNode + filled: bool = False + + +def _compute_tested_scenario( + report: TestScenarioReport, indexer: Indexer +) -> TestedScenario: + epochs = [] + event_index = 1 + + def append_notes(new_notes): + nonlocal event_index + events = [] + for k, v in new_notes.items(): + events.append( + Event( + note=NoteEvent( + key=k, message=v.message, timestamp=v.timestamp.datetime + ), + event_index=event_index, + ) + ) + event_index += 1 + events.sort(key=lambda e: e.timestamp) + epochs.append(Epoch(events=events)) + + # Add any notes that occurred before the first test step + if "notes" in report and report.notes: + if len(report.cases) >= 1 and len(report.cases[0].steps) >= 1: + first_step_start = report.cases[0].steps[0].start_time.datetime + pre_notes = { + k: v + for k, v in report.notes.items() + if v.timestamp.datetime < first_step_start + } + else: + pre_notes = report.notes + if pre_notes: + append_notes(pre_notes) + + scenario_participants: Dict[ParticipantID, TestedParticipant] = {} + + latest_step_time = None + for case in report.cases: + steps = [] + last_step = None + for step in case.steps: + if "notes" in report and report.notes: + # Add events (notes) that happened in between the previous step and this one + if last_step is not None: + inter_notes = { + k: v + for k, v in report.notes.items() + if last_step.end_time.datetime + < v.timestamp.datetime + < step.start_time.datetime + } + if inter_notes: + append_notes(inter_notes) + else: + last_step = step + + # Enumerate the events of this step + events = [] + for passed_check in step.passed_checks: + events.append(Event(passed_check=passed_check)) + for pid in passed_check.participants: + p = scenario_participants.get( + pid, TestedParticipant(has_failures=False) + ) + scenario_participants[pid] = p + for failed_check in step.failed_checks: + events.append(Event(failed_check=failed_check)) + for pid in failed_check.participants: + p = scenario_participants.get( + pid, TestedParticipant(has_failures=True) + ) + p.has_failures = True + scenario_participants[pid] = p + if "queries" in step and step.queries: + for query in step.queries: + events.append(Event(query=query)) + if "server_id" in query and query.server_id: + p = scenario_participants.get( + query.server_id, TestedParticipant(has_failures=False) + ) + scenario_participants[query.server_id] = p + if "notes" in report and report.notes: + for key, note in report.notes.items(): + if step.start_time.datetime <= note.timestamp.datetime: + if ( + "end_time" not in step + or note.timestamp.datetime <= step.end_time.datetime + ): + events.append( + Event( + note=NoteEvent( + key=key, + message=note.message, + timestamp=note.timestamp.datetime, + ) + ) + ) + + # Sort this step's events by time + events.sort(key=lambda e: e.timestamp) + + # Label this step's events with event_index + for e in events: + e.event_index = event_index + event_index += 1 + + # Look for the latest time something happened + for e in events: + if latest_step_time is None or e.timestamp > latest_step_time: + latest_step_time = e.timestamp + if "end_time" in step and step.end_time: + if ( + latest_step_time is None + or step.end_time.datetime > latest_step_time + ): + latest_step_time = step.end_time.datetime + + # Add this step + steps.append( + TestedStep( + name=step.name, + url=step.documentation_url, + events=events, + ) + ) + epochs.append( + Epoch( + case=TestedCase(name=case.name, url=case.documentation_url, steps=steps) + ) + ) + + # Add any notes that occurred after the last test step + if "notes" in report and report.notes: + if len(report.cases) >= 1 and len(report.cases[0].steps) >= 1: + post_notes = { + k: v + for k, v in report.notes.items() + if v.timestamp.datetime > latest_step_time + } + else: + post_notes = {} + if post_notes: + append_notes(post_notes) + + scenario = TestedScenario( + type=report.scenario_type, + name=report.name, + url=report.documentation_url, + epochs=epochs, + scenario_index=indexer.scenario_index, + participants=scenario_participants, + ) + indexer.scenario_index += 1 + return scenario + + +def _compute_action_node(report: TestSuiteActionReport, indexer: Indexer) -> ActionNode: + ( + is_test_suite, + is_test_scenario, + is_action_generator, + ) = report.get_applicable_report() + if is_test_scenario: + return ActionNode( + name=report.test_scenario.name, + node_type=ActionNodeType.Scenario, + children=[], + scenario=_compute_tested_scenario(report.test_scenario, indexer), + ) + elif is_test_suite: + return ActionNode( + name=report.test_suite.name, + node_type=ActionNodeType.Suite, + children=[ + _compute_action_node(a, indexer) for a in report.test_suite.actions + ], + ) + elif is_action_generator: + return ActionNode( + name=report.action_generator.generator_type, + node_type=ActionNodeType.ActionGenerator, + children=[ + _compute_action_node(a, indexer) + for a in report.action_generator.actions + ], + ) + else: + raise ValueError( + "Invalid TestSuiteActionReport; doesn't specify scenario, suite, or action generator" + ) + + +def _compute_overview_rows(node: ActionNode) -> Iterator[OverviewRow]: + if node.node_type == ActionNodeType.Scenario: + yield OverviewRow(suite_cells=[], scenario_node=node) + else: + first_row = True + for child in node.children: + for row in _compute_overview_rows(child): + yield OverviewRow( + suite_cells=[SuiteCell(node=node, first_row=first_row)] + + row.suite_cells, + scenario_node=row.scenario_node, + ) + first_row = False + + +def _align_overview_rows(rows: List[OverviewRow]) -> None: + max_suite_cols = max(len(r.suite_cells) for r in rows) + to_fill = 0 + for row in rows: + if to_fill > 0: + row.filled = True + to_fill -= 1 + elif len(row.suite_cells) < max_suite_cols: + if row.suite_cells[-1].first_row and all( + c.node_type == ActionNodeType.Scenario + for c in row.suite_cells[-1].node.children + ): + row.suite_cells[-1].colspan += max_suite_cols - len(row.suite_cells) + row.filled = True + to_fill = row.suite_cells[-1].node.rows - 1 + + r0 = 0 + while r0 < len(rows): + if len(rows[r0].suite_cells) < max_suite_cols and not rows[r0].filled: + r1 = r0 + 1 + while r1 < len(rows): + if ( + len(rows[r1].suite_cells) != len(rows[r0].suite_cells) + or rows[r1].suite_cells[-1].node != rows[r0].suite_cells[-1].node + ): + break + r1 += 1 + rows[r0].suite_cells.append( + SuiteCell( + node=None, + first_row=True, + rowspan=r1 - r0, + colspan=max_suite_cols - len(rows[r0].suite_cells), + ) + ) + rows[r0].filled = True + r0 = r1 + else: + r0 += 1 + + +def _enumerate_all_participants(node: ActionNode) -> List[ParticipantID]: + if node.node_type == ActionNodeType.Scenario: + return list(node.scenario.participants) + else: + result = set() + for child in node.children: + for p in _enumerate_all_participants(child): + result.add(p) + return list(result) + + +def _generate_scenario_pages( + node: ActionNode, config: SequenceViewConfiguration +) -> None: + if node.node_type == ActionNodeType.Scenario: + all_participants = list(node.scenario.participants) + all_participants.sort() + scenario_file = os.path.join( + config.output_path, f"s{node.scenario.scenario_index}.html" + ) + template = jinja_env.get_template("sequence_view/scenario.html") + with open(scenario_file, "w") as f: + f.write( + template.render( + test_scenario=node.scenario, + all_participants=all_participants, + EpochType=EpochType, + EventType=EventType, + len=len, + str=str, + ) + ) + else: + for child in node.children: + _generate_scenario_pages(child, config) + + +def generate_sequence_view( + report: TestRunReport, config: SequenceViewConfiguration +) -> None: + node = _compute_action_node(report.report, Indexer()) + + os.makedirs(config.output_path, exist_ok=True) + _generate_scenario_pages(node, config) + + overview_rows = list(_compute_overview_rows(node)) + _align_overview_rows(overview_rows) + max_suite_cols = max(len(r.suite_cells) for r in overview_rows) + all_participants = _enumerate_all_participants(node) + all_participants.sort() + overview_file = os.path.join(config.output_path, "index.html") + template = jinja_env.get_template("sequence_view/overview.html") + with open(overview_file, "w") as f: + f.write( + template.render( + overview_rows=overview_rows, + max_suite_cols=max_suite_cols, + all_participants=all_participants, + ActionNodeType=ActionNodeType, + len=len, + ) + ) diff --git a/monitoring/uss_qualifier/reports/templates/report.html b/monitoring/uss_qualifier/reports/templates/report.html index 18f2ef083f..03b0dad560 100644 --- a/monitoring/uss_qualifier/reports/templates/report.html +++ b/monitoring/uss_qualifier/reports/templates/report.html @@ -8,8 +8,8 @@
{{ explorer_header() }} - {{ explorer_content(report) }} - {{ explorer_footer() }} + {{ explorer_content("top_node", report) }} + {{ explorer_footer(["top_node"]) }}
diff --git a/monitoring/uss_qualifier/reports/templates/sequence_view/overview.html b/monitoring/uss_qualifier/reports/templates/sequence_view/overview.html new file mode 100644 index 0000000000..963cda2fc5 --- /dev/null +++ b/monitoring/uss_qualifier/reports/templates/sequence_view/overview.html @@ -0,0 +1,104 @@ + + + + + +
+ + + {% if max_suite_cols > 0 %} + + {% endif %} + + {% for participant_id in all_participants %} + + {% endfor %} + + {% for row in overview_rows %} + + {% for suite_cell in row.suite_cells %} + {% if suite_cell.first_row %} + {% if suite_cell.node != None %} + + {% else %} + + {% endif %} + {% endif %} + {% endfor %} + + {% for participant_id in all_participants %} + {% if participant_id in row.scenario_node.scenario.participants %} + {% if row.scenario_node.scenario.participants[participant_id].has_failures %} + + {% else %} + + {% endif %} + {% else %} + + {% endif %} + {% endfor %} + + {% endfor %} +
Suite / action generatorScenario{{ participant_id }}
{{ suite_cell.node.name }} + {{ row.scenario_node.scenario.name }} +
+
+ + diff --git a/monitoring/uss_qualifier/reports/templates/sequence_view/scenario.html b/monitoring/uss_qualifier/reports/templates/sequence_view/scenario.html new file mode 100644 index 0000000000..c2fafc2f73 --- /dev/null +++ b/monitoring/uss_qualifier/reports/templates/sequence_view/scenario.html @@ -0,0 +1,188 @@ + +{% from "explorer.html" import explorer_header, explorer_content, explorer_footer %} + + + + s{{ test_scenario.scenario_index }} - {{ test_scenario.name }} + + {{ explorer_header() }} + + +{% set collapsible = namespace(queries=[]) %} +
+ {% if test_scenario.url %} +

{{ test_scenario.name }}

+ {% else %} +

{{ test_scenario.name }}

+ {% endif %} +

{{ test_scenario.type }}

+ + + + + + {% for participant_id in all_participants %} + + {% endfor %} + + + {% set first_row = namespace(epoch=True, step=True) %} + {% for epoch in test_scenario.epochs %} + {% set first_row.epoch = True %} + {% if epoch.type == EpochType.Case %} + {% for test_step in epoch.case.steps %} + {% set first_row.step = True %} + {% for event in test_step.events %} + + {% if first_row.epoch %} + + {% endif %} + {% if first_row.step %} + + {% endif %} + + {% if event.type == EventType.PassedCheck %} + + + {% for participant_id in all_participants %} + {% if participant_id in event.passed_check.participants %} + + {% else %} + + {% endif %} + {% endfor %} + {% elif event.type == EventType.FailedCheck %} + + + {% for participant_id in all_participants %} + {% if participant_id in event.failed_check.participants %} + + {% else %} + + {% endif %} + {% endfor %} + {% elif event.type == EventType.Query %} + + + {% for participant_id in all_participants %} + {% if participant_id == event.query.get("server_id", None) %} + + {% else %} + + {% endif %} + {% endfor %} + {% elif event.type == EventType.Note %} + + + {% else %} + + {% endif %} + + {% set first_row.epoch = False %} + {% set first_row.step = False %} + {% endfor %} + {% endfor %} + {% elif epoch.type == EpochType.Events %} + {% for event in epoch.events %} + + {% if first_row.epoch %} + + {% endif %} + + + + + {% set first_row.epoch = False %} + {% endfor %} + {% endif %} + {% endfor %} +
CaseStepEvent{{ participant_id }}
+ {% if epoch.case.url %} + {{ epoch.case.name }} + {% else %} + {{ epoch.case.name }} + {% endif %} + + {% if test_step.url %} + {{ test_step.name }} + {% else %} + {{ test_step.name }} + {% endif %} + {{ event.event_index }} + {{ event.passed_check.name }} + + {% if event.failed_check.documentation_url %} + {{ event.failed_check.name }} + {% else %} + {{ event.failed_check.name }} + {% endif %} + 🌐 + {{ event.query.request.method }} {{ event.query.request.url_hostname }} + {% set query_id = "e" + str(event.event_index) + "query" %} + {{ explorer_content(query_id, event.query) }} + {% set collapsible.queries = collapsible.queries + [query_id] %} + 🌐📓 + {{ event.note.key }}: {{ event.note.value }} + ???Render error: unknown EventType '{{ event.type }}'
{{ event.event_index }}📓 + {{ event.note.key }}: {{ event.note.message }} +
+
+{{ explorer_footer(collapsible.queries) }} + + diff --git a/monitoring/uss_qualifier/scenarios/scenario.py b/monitoring/uss_qualifier/scenarios/scenario.py index b04f702d0f..2ee5aec44b 100644 --- a/monitoring/uss_qualifier/scenarios/scenario.py +++ b/monitoring/uss_qualifier/scenarios/scenario.py @@ -122,7 +122,7 @@ def record_failed( kwargs = { "name": self._documentation.name, "documentation_url": self._documentation.url, - "timestamp": StringBasedDateTime(datetime.utcnow()), + "timestamp": StringBasedDateTime(arrow.utcnow()), "summary": summary, "details": details, "requirements": requirements, @@ -157,6 +157,7 @@ def record_passed( passed_check = PassedCheck( name=self._documentation.name, + timestamp=StringBasedDateTime(arrow.utcnow()), participants=participants, requirements=requirements, ) diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json b/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json index 67fb992e24..65a548b622 100644 --- a/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json +++ b/schemas/monitoring/uss_qualifier/configurations/configuration/ArtifactsConfiguration.json @@ -44,6 +44,17 @@ } ] }, + "sequence_view": { + "description": "If specified, configuration describing a desired report describing the sequence of events that occurred during the test", + "oneOf": [ + { + "type": "null" + }, + { + "$ref": "SequenceViewConfiguration.json" + } + ] + }, "templated_reports": { "description": "List of report templates to be rendered", "items": { diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/ReportHTMLConfiguration.json b/schemas/monitoring/uss_qualifier/configurations/configuration/ReportHTMLConfiguration.json index 4ad3d1fafc..1877035482 100644 --- a/schemas/monitoring/uss_qualifier/configurations/configuration/ReportHTMLConfiguration.json +++ b/schemas/monitoring/uss_qualifier/configurations/configuration/ReportHTMLConfiguration.json @@ -1,19 +1,19 @@ { + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/configurations/configuration/ReportHTMLConfiguration.json", "$schema": "https://json-schema.org/draft/2020-12/schema", - "type": "object", + "description": "monitoring.uss_qualifier.configurations.configuration.ReportHTMLConfiguration, as defined in monitoring/uss_qualifier/configurations/configuration.py", "properties": { "$ref": { - "type": "string", - "description": "Path to content that replaces the $ref" + "description": "Path to content that replaces the $ref", + "type": "string" }, "html_path": { - "type": "string", - "description": "Path of HTML file to contain an HTML rendering of the test report" + "description": "Path of HTML file to contain an HTML rendering of the raw test report object", + "type": "string" } }, - "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/configurations/configuration/ReportHTMLConfiguration.json", - "description": "monitoring.uss_qualifier.configurations.configuration.ReportHTMLConfiguration, as defined in monitoring/uss_qualifier/configurations/configuration.py", "required": [ "html_path" - ] + ], + "type": "object" } \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/configurations/configuration/SequenceViewConfiguration.json b/schemas/monitoring/uss_qualifier/configurations/configuration/SequenceViewConfiguration.json new file mode 100644 index 0000000000..f1428d02a2 --- /dev/null +++ b/schemas/monitoring/uss_qualifier/configurations/configuration/SequenceViewConfiguration.json @@ -0,0 +1,19 @@ +{ + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/configurations/configuration/SequenceViewConfiguration.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "description": "monitoring.uss_qualifier.configurations.configuration.SequenceViewConfiguration, as defined in monitoring/uss_qualifier/configurations/configuration.py", + "properties": { + "$ref": { + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "output_path": { + "description": "Path of a folder into which report HTML files should be written", + "type": "string" + } + }, + "required": [ + "output_path" + ], + "type": "object" +} \ No newline at end of file diff --git a/schemas/monitoring/uss_qualifier/reports/report/PassedCheck.json b/schemas/monitoring/uss_qualifier/reports/report/PassedCheck.json index 30ec39ed0a..efa921727c 100644 --- a/schemas/monitoring/uss_qualifier/reports/report/PassedCheck.json +++ b/schemas/monitoring/uss_qualifier/reports/report/PassedCheck.json @@ -1,35 +1,41 @@ { + "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/report/PassedCheck.json", "$schema": "https://json-schema.org/draft/2020-12/schema", - "type": "object", + "description": "monitoring.uss_qualifier.reports.report.PassedCheck, as defined in monitoring/uss_qualifier/reports/report.py", "properties": { "$ref": { - "type": "string", - "description": "Path to content that replaces the $ref" + "description": "Path to content that replaces the $ref", + "type": "string" + }, + "name": { + "description": "Name of the check that passed", + "type": "string" }, "participants": { - "type": "array", + "description": "Participants that may not have met the relevant requirements if this check had failed", "items": { "type": "string" }, - "description": "Participants that may not have met the relevant requirements if this check had failed" - }, - "name": { - "type": "string", - "description": "Name of the check that passed" + "type": "array" }, "requirements": { - "type": "array", + "description": "Requirements that would not have been met if this check had failed", "items": { "type": "string" }, - "description": "Requirements that would not have been met if this check had failed" + "type": "array" + }, + "timestamp": { + "description": "Time the issue was discovered", + "format": "date-time", + "type": "string" } }, - "$id": "https://github.com/interuss/monitoring/blob/main/schemas/monitoring/uss_qualifier/reports/report/PassedCheck.json", - "description": "monitoring.uss_qualifier.reports.report.PassedCheck, as defined in monitoring/uss_qualifier/reports/report.py", "required": [ "name", "participants", - "requirements" - ] + "requirements", + "timestamp" + ], + "type": "object" } \ No newline at end of file