Skip to content

Commit

Permalink
NET0460 query the details endpoint to collect perf stats
Browse files Browse the repository at this point in the history
  • Loading branch information
Shastick committed Aug 31, 2023
1 parent 0da6053 commit 0fc2124
Show file tree
Hide file tree
Showing 6 changed files with 106 additions and 3 deletions.
23 changes: 23 additions & 0 deletions monitoring/monitorlib/rid.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,11 @@ def read_scope(self) -> str:
else:
raise ValueError("Unsupported RID version '{}'".format(self))

@property
def dss_read_isa_scope(self) -> str:
# Same regardless of ASTM RID version
return "dss.read.identification_service_areas"

@property
def realtime_period(self) -> timedelta:
if self == RIDVersion.f3411_19:
Expand Down Expand Up @@ -155,6 +160,24 @@ def dp_data_resp_percentile95_s(self) -> float:
else:
raise ValueError("Unsupported RID version '{}'".format(self))

@property
def dp_details_resp_percentile95_s(self) -> float:
if self == RIDVersion.f3411_19:
return v19.constants.NetDpDetailsResponse95thPercentileSeconds
elif self == RIDVersion.f3411_22a:
return v22a.constants.NetDpDetailsResponse95thPercentileSeconds
else:
raise ValueError("Unsupported RID version '{}'".format(self))

@property
def dp_details_resp_percentile99_s(self) -> float:
if self == RIDVersion.f3411_19:
return v19.constants.NetDpDetailsResponse99thPercentileSeconds
elif self == RIDVersion.f3411_22a:
return v22a.constants.NetDpDetailsResponse99thPercentileSeconds
else:
raise ValueError("Unsupported RID version '{}'".format(self))

@property
def dp_data_resp_percentile99_s(self) -> float:
if self == RIDVersion.f3411_19:
Expand Down
5 changes: 4 additions & 1 deletion monitoring/uss_qualifier/resources/netrid/observers.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,10 @@ def observe_flight_details(
self, flight_id: str
) -> Tuple[Optional[observation_api.GetDetailsResponse], fetch.Query]:
query = fetch.query_and_describe(
self.session, "GET", f"/display_data/{flight_id}"
self.session,
"GET",
f"/display_data/{flight_id}",
scope=self.rid_version.dss_read_isa_scope,
)
try:
result = (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,59 @@ def run(self):
self._dp_display_data_times_step()
self.end_test_step()

self.begin_test_step("Performance of /display_data/<flight_id> requests")
self._dp_display_data_details_times_step()
self.end_test_step()

self.end_test_case()
self.end_test_scenario()

def _dp_display_data_details_times_step(self):
"""
Check performance of /display_data/<flight_id> requests and confirm they conform to NetDpDetailsResponse95thPercentile (2s) and NetDpDetailsResponse99thPercentile (6s)
"""
pattern = re.compile(r"/display_data/[-:\w]+")
for participant, all_queries in self._queries_by_participant.items():
relevant_queries: List[fetch.Query] = list()
for query in all_queries:
match = pattern.search(query.request.url)
if match is not None and query.status_code == 200:
relevant_queries.append(query)

if len(relevant_queries) == 0:
# this may be a service provider
self.record_note(
f"{participant}/display_data/<flight_id>",
"skipped check: no relevant queries",
)
continue

# compute percentiles
durations = [query.response.elapsed_s for query in relevant_queries]
[p95, p99] = evaluation.compute_percentiles(durations, [95, 99])
with self.check(
"Performance of /display_data/<flight_id> requests", [participant]
) as check:
if p95 > self._rid_version.dp_details_resp_percentile95_s:
check.record_failed(
summary=f"95th percentile of durations for DP display_data details queries is higher than threshold",
severity=Severity.Medium,
participants=[participant],
details=f"threshold: {self._rid_version.dp_details_resp_percentile95_s}s, 95th percentile: {p95}s",
)
if p99 > self._rid_version.dp_details_resp_percentile99_s:
check.record_failed(
summary=f"99th percentile of durations for DP display_data details queries is higher than threshold",
severity=Severity.Medium,
participants=[participant],
details=f"threshold: {self._rid_version.dp_details_resp_percentile99_s}s, 99th percentile: {p99}s",
)

self.record_note(
f"{participant}/display_data/<flight_id> stats computed on {len(durations)} queries",
f"95th percentile: {p95}s, 99th percentile: {p99}s",
)

def _dp_display_data_times_step(self):
"""
:return: the query durations of respectively the initial queries and the subsequent ones
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,17 @@ def evaluate_system_instantaneously(
query,
verified_sps,
)
# We also issue queries to the flight details endpoint in order to collect
# performance statistics, which are computed and checked at a later stage.
if query.status_code == 200:
# If there are multiple flights, we only issue a single details query for the first returned one,
# as we don't want to slow down the test we are piggy-backing on.
# TODO: revisit if needed
if len(observation.flights) > 0:
(_, detailQuery) = observer.observe_flight_details(
observation.flights[0].id
)
self._test_scenario.record_query(detailQuery)

# TODO: If bounding rect is smaller than cluster threshold, expand slightly above cluster threshold and re-observe
# TODO: If bounding rect is smaller than area-too-large threshold, expand slightly above area-too-large threshold and re-observe
Expand Down Expand Up @@ -617,14 +628,14 @@ def _evaluate_clusters_observation(
check.record_failed(
summary="Error while evaluating clustered area view. Missing flight",
severity=Severity.Medium,
details=f"{expected_count-clustered_flight_count} (~{uncertain_count}) missing flight(s)",
details=f"{expected_count - clustered_flight_count} (~{uncertain_count}) missing flight(s)",
)
elif clustered_flight_count > expected_count + uncertain_count:
# Unexpected flight
check.record_failed(
summary="Error while evaluating clustered area view. Unexpected flight",
severity=Severity.Medium,
details=f"{clustered_flight_count-expected_count} (~{uncertain_count}) unexpected flight(s)",
details=f"{clustered_flight_count - expected_count} (~{uncertain_count}) unexpected flight(s)",
)
elif clustered_flight_count == expected_count:
# evaluate cluster obfuscation distance
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,14 @@ The observers to evaluate in the report.

## Performance of Display Providers requests test case

### Performance of /display_data/<flight_id> requests test step

#### Performance of /display_data/<flight_id> requests check

**[astm.f3411.v19.NET0460](../../../../requirements/astm/f3411/v19.md) Checks that the DP response times for the
`/display_data/<flight_id>` endpoint have a p95 and p99 that are respectively below
`NetDpDetailsResponse95thPercentileSeconds` (2 seconds) and `NetDpDetailsResponse99thPercentileSeconds` (6 seconds).

### Performance of /display_data requests test step
In this step, all successful display data queries made during the execution of the previous scenarios are aggregated per
observer and per request (identified by their URLs). For each of those, and using the session length
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,14 @@ The observers to evaluate in the report.

## Performance of Display Providers requests test case

### Performance of /display_data/<flight_id> requests test step

#### Performance of /display_data/<flight_id> requests check

**[astm.f3411.v19.NET0460](../../../../requirements/astm/f3411/v22a.md) Checks that the DP response times for the
Display Application's flight details requests have a p95 and p99 that are respectively below
`NetDpDetailsResponse95thPercentileSeconds` (2 seconds) and `NetDpDetailsResponse99thPercentileSeconds` (6 seconds).

### Performance of /display_data requests test step
In this step, all successful display data queries made during the execution of the previous scenarios are aggregated per
observer and per request (identified by their URLs). For each of those, and using the session length
Expand Down

0 comments on commit 0fc2124

Please sign in to comment.