From 254854623eb073f28c497d22fc6b703894bfe2c9 Mon Sep 17 00:00:00 2001 From: Julien Perrochet Date: Fri, 18 Aug 2023 13:51:20 +0200 Subject: [PATCH] NET0460 query the details endpoint to collect perf stats --- monitoring/monitorlib/rid.py | 18 +++++++ .../resources/netrid/observers.py | 5 +- .../astm/netrid/common/aggregate_checks.py | 50 +++++++++++++++++++ .../astm/netrid/display_data_evaluator.py | 9 ++++ .../astm/netrid/v19/aggregate_checks.md | 8 +++ .../astm/netrid/v22a/aggregate_checks.md | 8 +++ 6 files changed, 97 insertions(+), 1 deletion(-) diff --git a/monitoring/monitorlib/rid.py b/monitoring/monitorlib/rid.py index 3b43e39182..3dfea945bf 100644 --- a/monitoring/monitorlib/rid.py +++ b/monitoring/monitorlib/rid.py @@ -155,6 +155,24 @@ def dp_data_resp_percentile95_s(self) -> float: else: raise ValueError("Unsupported RID version '{}'".format(self)) + @property + def dp_details_resp_percentile95_s(self) -> float: + if self == RIDVersion.f3411_19: + return v19.constants.NetDpDetailsResponse95thPercentileSeconds + elif self == RIDVersion.f3411_22a: + return v22a.constants.NetDpDetailsResponse95thPercentileSeconds + else: + raise ValueError("Unsupported RID version '{}'".format(self)) + + @property + def dp_details_resp_percentile99_s(self) -> float: + if self == RIDVersion.f3411_19: + return v19.constants.NetDpDetailsResponse99thPercentileSeconds + elif self == RIDVersion.f3411_22a: + return v22a.constants.NetDpDetailsResponse99thPercentileSeconds + else: + raise ValueError("Unsupported RID version '{}'".format(self)) + @property def dp_data_resp_percentile99_s(self) -> float: if self == RIDVersion.f3411_19: diff --git a/monitoring/uss_qualifier/resources/netrid/observers.py b/monitoring/uss_qualifier/resources/netrid/observers.py index 70a8737d39..bd8aa79dcb 100644 --- a/monitoring/uss_qualifier/resources/netrid/observers.py +++ b/monitoring/uss_qualifier/resources/netrid/observers.py @@ -59,7 +59,10 @@ def observe_flight_details( self, flight_id: str ) -> Tuple[Optional[observation_api.GetDetailsResponse], fetch.Query]: query = fetch.query_and_describe( - self.session, "GET", f"/display_data/{flight_id}" + self.session, + "GET", + f"/display_data/{flight_id}", + scope=self.rid_version.read_scope, ) try: result = ( diff --git a/monitoring/uss_qualifier/scenarios/astm/netrid/common/aggregate_checks.py b/monitoring/uss_qualifier/scenarios/astm/netrid/common/aggregate_checks.py index 4c93634652..1dc1f5f262 100644 --- a/monitoring/uss_qualifier/scenarios/astm/netrid/common/aggregate_checks.py +++ b/monitoring/uss_qualifier/scenarios/astm/netrid/common/aggregate_checks.py @@ -71,9 +71,59 @@ def run(self): self._dp_display_data_times_step() self.end_test_step() + self.begin_test_step("Performance of /display_data/ requests") + self._dp_display_data_details_times_step() + self.end_test_step() + self.end_test_case() self.end_test_scenario() + def _dp_display_data_details_times_step(self): + """ + Check performance of /display_data/ requests and confirm they conform to NetDpDetailsResponse95thPercentile (2s) and NetDpDetailsResponse99thPercentile (6s) + """ + pattern = re.compile(r"/display_data/[-:\w]+") + for participant, all_queries in self._queries_by_participant.items(): + relevant_queries: List[fetch.Query] = list() + for query in all_queries: + match = pattern.search(query.request.url) + if match is not None and query.status_code == 200: + relevant_queries.append(query) + + if len(relevant_queries) == 0: + # this may be a service provider + self.record_note( + f"{participant}/display_data/", + "skipped check: no relevant queries", + ) + continue + + # compute percentiles + durations = [query.response.elapsed_s for query in relevant_queries] + [p95, p99] = evaluation.compute_percentiles(durations, [95, 99]) + with self.check( + "Performance of /display_data/ requests", [participant] + ) as check: + if p95 > self._rid_version.dp_details_resp_percentile95_s: + check.record_failed( + summary=f"95th percentile of durations for DP display_data details queries is higher than threshold", + severity=Severity.Medium, + participants=[participant], + details=f"threshold: {self._rid_version.dp_details_resp_percentile95_s}s, 95th percentile: {p95}s", + ) + if p99 > self._rid_version.dp_details_resp_percentile99_s: + check.record_failed( + summary=f"99th percentile of durations for DP display_data details queries is higher than threshold", + severity=Severity.Medium, + participants=[participant], + details=f"threshold: {self._rid_version.dp_details_resp_percentile99_s}s, 99th percentile: {p99}s", + ) + + self.record_note( + f"{participant}/display_data/ stats computed on {len(durations)} queries", + f"95th percentile: {p95}s, 99th percentile: {p99}s", + ) + def _dp_display_data_times_step(self): """ :return: the query durations of respectively the initial queries and the subsequent ones diff --git a/monitoring/uss_qualifier/scenarios/astm/netrid/display_data_evaluator.py b/monitoring/uss_qualifier/scenarios/astm/netrid/display_data_evaluator.py index c6ef82cfb5..c0f6c8e995 100644 --- a/monitoring/uss_qualifier/scenarios/astm/netrid/display_data_evaluator.py +++ b/monitoring/uss_qualifier/scenarios/astm/netrid/display_data_evaluator.py @@ -261,6 +261,15 @@ def evaluate_system_instantaneously( query, verified_sps, ) + # We also issue queries to the flight details endpoint in order to collect + # performance statistics, which are computed and checked at a later stage. + if query.status_code == 200: + # If there are multiple flights, we only issue a single details query for the first returned one, + # as we don't want to slow down the test we are piggy-backing on. + # TODO: revisit if needed + if len(observation.flights) > 0: + (_, detailQuery) = observer.observe_flight_details(observation.flights[0].id) + self._test_scenario.record_query(detailQuery) # TODO: If bounding rect is smaller than cluster threshold, expand slightly above cluster threshold and re-observe # TODO: If bounding rect is smaller than area-too-large threshold, expand slightly above area-too-large threshold and re-observe diff --git a/monitoring/uss_qualifier/scenarios/astm/netrid/v19/aggregate_checks.md b/monitoring/uss_qualifier/scenarios/astm/netrid/v19/aggregate_checks.md index dead5ac209..7c107df89c 100644 --- a/monitoring/uss_qualifier/scenarios/astm/netrid/v19/aggregate_checks.md +++ b/monitoring/uss_qualifier/scenarios/astm/netrid/v19/aggregate_checks.md @@ -19,6 +19,14 @@ The observers to evaluate in the report. ## Performance of Display Providers requests test case +### Performance of /display_data/ requests test step + +#### Performance of /display_data/ requests check + +**[astm.f3411.v19.NET0460](../../../../requirements/astm/f3411/v19.md) Checks that the DP response times for the +`/display_data/` endpoint have a p95 and p99 that are respectively below +`NetDpDetailsResponse95thPercentileSeconds` (2 seconds) and `NetDpDetailsResponse99thPercentileSeconds` (6 seconds). + ### Performance of /display_data requests test step In this step, all successful display data queries made during the execution of the previous scenarios are aggregated per observer and per request (identified by their URLs). For each of those, and using the session length diff --git a/monitoring/uss_qualifier/scenarios/astm/netrid/v22a/aggregate_checks.md b/monitoring/uss_qualifier/scenarios/astm/netrid/v22a/aggregate_checks.md index 279a470e3e..ae7d631dce 100644 --- a/monitoring/uss_qualifier/scenarios/astm/netrid/v22a/aggregate_checks.md +++ b/monitoring/uss_qualifier/scenarios/astm/netrid/v22a/aggregate_checks.md @@ -19,6 +19,14 @@ The observers to evaluate in the report. ## Performance of Display Providers requests test case +### Performance of /display_data/ requests test step + +#### Performance of /display_data/ requests check + +**[astm.f3411.v19.NET0460](../../../../requirements/astm/f3411/v19.md) Checks that the DP response times for the +`/display_data/` endpoint have a p95 and p99 that are respectively below +`NetDpDetailsResponse95thPercentileSeconds` (2 seconds) and `NetDpDetailsResponse99thPercentileSeconds` (6 seconds). + ### Performance of /display_data requests test step In this step, all successful display data queries made during the execution of the previous scenarios are aggregated per observer and per request (identified by their URLs). For each of those, and using the session length