Skip to content

Commit

Permalink
Bug 1917876 - Unable to find my test results for Firefox on linux opt…
Browse files Browse the repository at this point in the history
…, and Chrome on linux shippable platforms (#8216)

* Removing condition that is resulting in missing test results and unnecessary is_empty field

* Address suggestion

* Address nit change request
  • Loading branch information
beatrice-acasandrei authored Oct 16, 2024
1 parent dd2f5b7 commit 34fb534
Show file tree
Hide file tree
Showing 3 changed files with 190 additions and 36 deletions.
166 changes: 152 additions & 14 deletions tests/webapp/api/test_perfcompare_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,6 @@ def test_perfcompare_results_against_no_base(
"framework_id": base_sig.framework.id,
"platform": base_sig.platform.platform,
"suite": base_sig.suite,
"is_empty": False,
"header_name": response["header_name"],
"base_repository_name": base_sig.repository.name,
"new_repository_name": new_sig.repository.name,
Expand Down Expand Up @@ -283,7 +282,6 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
"framework_id": base_sig.framework.id,
"platform": base_sig.platform.platform,
"suite": base_sig.suite,
"is_empty": False,
"header_name": response["header_name"],
"base_repository_name": base_sig.repository.name,
"new_repository_name": new_sig.repository.name,
Expand Down Expand Up @@ -352,6 +350,146 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
assert expected[0] == response.json()[0]


def test_perfcompare_results_without_base_signature(
client,
create_signature,
create_perf_datum,
test_perf_signature,
test_repository,
try_repository,
eleven_jobs_stored,
test_perfcomp_push,
test_perfcomp_push_2,
test_linux_platform,
test_option_collection,
):
perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by("push__time").all()

test_perfcomp_push.time = THREE_DAYS_AGO
test_perfcomp_push.repository = try_repository
test_perfcomp_push.save()

test_perfcomp_push_2.time = datetime.datetime.now()
test_perfcomp_push_2.save()

suite = "a11yr"
test = "dhtml.html"
extra_options = "e10s fission stylo webrender"
measurement_unit = "ms"
new_application = "geckoview"

new_perf_data_values = [40.2]

new_sig = create_signature(
signature_hash=(20 * "t2"),
extra_options=extra_options,
platform=test_linux_platform,
measurement_unit=measurement_unit,
suite=suite,
test=test,
test_perf_signature=test_perf_signature,
repository=test_repository,
application=new_application,
)

job = perf_jobs[1]
job.push = test_perfcomp_push_2
job.save()
perf_datum = PerformanceDatum.objects.create(
value=new_perf_data_values[0],
push_timestamp=job.push.time,
job=job,
push=job.push,
repository=job.repository,
signature=new_sig,
)
perf_datum.push.time = job.push.time
perf_datum.push.save()

response = get_expected(
None,
new_sig,
extra_options,
test_option_collection,
new_perf_data_values,
[],
)

expected = [
{
"base_rev": test_perfcomp_push.revision,
"new_rev": test_perfcomp_push_2.revision,
"framework_id": new_sig.framework.id,
"platform": new_sig.platform.platform,
"suite": new_sig.suite,
"header_name": response["header_name"],
"base_repository_name": try_repository.name,
"new_repository_name": new_sig.repository.name,
"base_app": "",
"new_app": "geckoview",
"is_complete": False,
"base_measurement_unit": "",
"new_measurement_unit": new_sig.measurement_unit,
"base_retriggerable_job_ids": [],
"new_retriggerable_job_ids": [job.id],
"base_runs": [],
"new_runs": new_perf_data_values,
"base_runs_replicates": [],
"new_runs_replicates": [],
"base_avg_value": round(response["base_avg_value"], 2),
"new_avg_value": round(response["new_avg_value"], 2),
"base_median_value": round(response["base_median_value"], 2),
"new_median_value": round(response["new_median_value"], 2),
"test": new_sig.test,
"option_name": response["option_name"],
"extra_options": new_sig.extra_options,
"base_stddev": round(response["base_stddev"], 2),
"new_stddev": round(response["new_stddev"], 2),
"base_stddev_pct": round(response["base_stddev_pct"], 2),
"new_stddev_pct": round(response["new_stddev_pct"], 2),
"confidence": round(response["confidence"], 2),
"confidence_text": response["confidence_text"],
"delta_value": round(response["delta_value"], 2),
"delta_percentage": round(response["delta_pct"], 2),
"magnitude": round(response["magnitude"], 2),
"new_is_better": response["new_is_better"],
"lower_is_better": response["lower_is_better"],
"is_confident": response["is_confident"],
"more_runs_are_needed": False,
"noise_metric": False,
"graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&"
f"highlightedRevisions={test_perfcomp_push_2.revision}&"
f"series={try_repository.name}%2C{new_sig.signature_hash}%2C1%2C{new_sig.framework.id}&"
f"series={test_repository.name}%2C{new_sig.signature_hash}%2C1%2C{new_sig.framework.id}&"
f"timerange=604800",
"is_improvement": response["is_improvement"],
"is_regression": response["is_regression"],
"is_meaningful": response["is_meaningful"],
"base_parent_signature": response["base_parent_signature"],
"new_parent_signature": response["new_parent_signature"],
"base_signature_id": response["base_signature_id"],
"new_signature_id": response["new_signature_id"],
"has_subtests": response["has_subtests"],
},
]

query_params = (
"?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={"
"}&no_subtests=true".format(
try_repository.name,
test_repository.name,
test_perfcomp_push.revision,
test_perfcomp_push_2.revision,
test_perf_signature.framework_id,
)
)

response = client.get(reverse("perfcompare-results") + query_params)

assert response.status_code == 200
assert expected[0] == response.json()[0]


def test_perfcompare_results_subtests_support(
client,
create_signature,
Expand Down Expand Up @@ -456,7 +594,6 @@ def test_perfcompare_results_subtests_support(
"framework_id": base_sig.framework.id,
"platform": base_sig.platform.platform,
"suite": base_sig.suite,
"is_empty": False,
"header_name": response["header_name"],
"base_repository_name": base_sig.repository.name,
"new_repository_name": new_sig.repository.name,
Expand Down Expand Up @@ -625,7 +762,6 @@ def test_perfcompare_results_multiple_runs(
"framework_id": sig1.framework.id,
"platform": sig1.platform.platform,
"suite": sig1.suite,
"is_empty": False,
"header_name": first_row["header_name"],
"base_repository_name": sig1.repository.name,
"new_repository_name": sig2.repository.name,
Expand Down Expand Up @@ -674,7 +810,6 @@ def test_perfcompare_results_multiple_runs(
"framework_id": sig3.framework.id,
"platform": sig3.platform.platform,
"suite": sig3.suite,
"is_empty": False,
"header_name": second_row["header_name"],
"base_repository_name": sig3.repository.name,
"new_repository_name": sig4.repository.name,
Expand Down Expand Up @@ -800,8 +935,9 @@ def get_expected(
new_perf_data_values,
base_perf_data_values,
):
response = {"option_name": test_option_collection.get(base_sig.option_collection_id, "")}
test_suite = perfcompare_utils.get_test_suite(base_sig.suite, base_sig.test)
sig = base_sig if base_sig else new_sig
response = {"option_name": test_option_collection.get(sig.option_collection_id, "")}
test_suite = perfcompare_utils.get_test_suite(sig.suite, sig.test)
response["header_name"] = perfcompare_utils.get_header_name(
extra_options, response["option_name"], test_suite
)
Expand Down Expand Up @@ -833,9 +969,9 @@ def get_expected(
)
response["magnitude"] = perfcompare_utils.get_magnitude(response["delta_pct"])
response["new_is_better"] = perfcompare_utils.is_new_better(
response["delta_value"], base_sig.lower_is_better
response["delta_value"], sig.lower_is_better
)
response["lower_is_better"] = base_sig.lower_is_better
response["lower_is_better"] = sig.lower_is_better
response["confidence"] = perfcompare_utils.get_abs_ttest_value(
base_perf_data_values, new_perf_data_values
)
Expand All @@ -857,12 +993,14 @@ def get_expected(
response["is_regression"] = class_name == "danger"
response["is_meaningful"] = class_name == ""
response["base_parent_signature"] = (
base_sig.parent_signature.id if base_sig.parent_signature else None
base_sig.parent_signature.id if base_sig and base_sig.parent_signature else None
)
response["new_parent_signature"] = (
new_sig.parent_signature.id if base_sig.parent_signature else None
new_sig.parent_signature.id if new_sig and new_sig.parent_signature else None
)
response["base_signature_id"] = base_sig.id if base_sig else None
response["new_signature_id"] = new_sig.id if new_sig else None
response["has_subtests"] = (base_sig.has_subtests if base_sig else False) or (
new_sig.has_subtests if new_sig else False
)
response["base_signature_id"] = base_sig.id
response["new_signature_id"] = new_sig.id
response["has_subtests"] = base_sig.has_subtests or new_sig.has_subtests
return response
58 changes: 38 additions & 20 deletions treeherder/webapp/api/performance_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -952,15 +952,27 @@ def list(self, request):
for platform in platforms:
sig_identifier = perfcompare_utils.get_sig_identifier(header, platform)
base_sig = base_signatures_map.get(sig_identifier, {})
base_sig_id = base_sig.get("id", "")
base_sig_id = base_sig.get("id", None)
new_sig = new_signatures_map.get(sig_identifier, {})
new_sig_id = new_sig.get("id", "")
lower_is_better = base_sig.get("lower_is_better", "")
is_empty = not (
base_sig and new_sig
) # ensures there are signatures for base and new
if is_empty:
continue
new_sig_id = new_sig.get("id", None)
if base_sig:
(
extra_options,
lower_is_better,
option_name,
sig_hash,
suite,
test,
) = self._get_signature_based_properties(base_sig, option_collection_map)
else:
(
extra_options,
lower_is_better,
option_name,
sig_hash,
suite,
test,
) = self._get_signature_based_properties(new_sig, option_collection_map)
base_perf_data_values = base_grouped_values.get(base_sig_id, [])
new_perf_data_values = new_grouped_values.get(new_sig_id, [])
base_perf_data_replicates = base_grouped_replicates.get(base_sig_id, [])
Expand All @@ -983,11 +995,6 @@ def list(self, request):
base_perf_data_values, new_perf_data_values
)
confidence_text = perfcompare_utils.get_confidence_text(confidence)
sig_hash = (
base_sig.get("signature_hash", "")
if base_sig
else new_sig.get("signature_hash", "")
)
delta_value = perfcompare_utils.get_delta_value(new_avg_value, base_avg_value)
delta_percentage = perfcompare_utils.get_delta_percentage(
delta_value, base_avg_value
Expand Down Expand Up @@ -1015,15 +1022,12 @@ def list(self, request):
"platform": platform,
"base_app": base_sig.get("application", ""),
"new_app": new_sig.get("application", ""),
"suite": base_sig.get("suite", ""), # same suite for base_result and new_result
"test": base_sig.get("test", ""), # same test for base_result and new_result
"suite": suite, # same suite for base_result and new_result
"test": test, # same test for base_result and new_result
"is_complete": is_complete,
"framework_id": framework,
"is_empty": is_empty,
"option_name": option_collection_map.get(
base_sig.get("option_collection_id", ""), ""
),
"extra_options": base_sig.get("extra_options", ""),
"option_name": option_name,
"extra_options": extra_options,
"base_repository_name": base_repo_name,
"new_repository_name": new_repo_name,
"base_measurement_unit": base_sig.get("measurement_unit", ""),
Expand Down Expand Up @@ -1079,6 +1083,20 @@ def list(self, request):

return Response(data=serialized_data)

def _get_signature_based_properties(self, sig, option_collection_map):
return (
sig.get("extra_options", ""),
sig.get("lower_is_better", ""),
self._get_option_name(sig, option_collection_map),
sig.get("signature_hash", ""),
sig.get("suite", ""),
sig.get("test", ""),
)

@staticmethod
def _get_option_name(sig, option_collection_map):
return option_collection_map.get(sig.get("option_collection_id", ""), "")

@staticmethod
def _get_push_timestamp(base_push, new_push):
# This function will determine the right push time stamp to assign a revision.
Expand Down
2 changes: 0 additions & 2 deletions treeherder/webapp/api/performance_serializers.py
Original file line number Diff line number Diff line change
Expand Up @@ -504,7 +504,6 @@ class PerfCompareResultsSerializer(serializers.ModelSerializer):
max_length=10,
default="",
)
is_empty = serializers.BooleanField()
is_complete = serializers.BooleanField()
platform = serializers.CharField()
header_name = serializers.CharField()
Expand Down Expand Up @@ -569,7 +568,6 @@ class Meta:
"framework_id",
"platform",
"suite",
"is_empty",
"header_name",
"base_repository_name",
"new_repository_name",
Expand Down

0 comments on commit 34fb534

Please sign in to comment.