Skip to content

Commit

Permalink
assessment-dashboard filter field changes from created_at to publicat…
Browse files Browse the repository at this point in the history
…ion_date
  • Loading branch information
sudan45 authored and AdityaKhatri committed Apr 30, 2024
1 parent 0c1dc33 commit cb0913a
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 14 deletions.
26 changes: 13 additions & 13 deletions apps/assessment_registry/dashboard_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def cache_key_gen(root: AssessmentDashboardStat, *_):
)


def get_global_filters(_filter: dict, date_field="created_at"):
def get_global_filters(_filter: dict, date_field="publication_date"):
return {
f"{date_field}__gte": _filter["date_from"],
f"{date_field}__lte": _filter["date_to"],
Expand Down Expand Up @@ -532,7 +532,7 @@ def resolve_assessment_per_framework_pillar(root: AssessmentDashboardStat, info)
).values('focus').order_by('focus').annotate(
count=Count('id')
).values('focus', 'count').annotate(
date=TruncDay('created_at')
date=TruncDay('publication_date')
).values('focus', 'count', 'date')

@staticmethod
Expand All @@ -543,7 +543,7 @@ def resolve_assessment_per_affected_group(root: AssessmentDashboardStat, info):
).values('affected_group').order_by('affected_group').annotate(
count=Count('id')
).values('affected_group', 'count').annotate(
date=TruncDay('created_at')
date=TruncDay('publication_date')
).values('affected_group', 'count', 'date')

@staticmethod
Expand All @@ -554,7 +554,7 @@ def resolve_assessment_per_humanitarian_sector(root: AssessmentDashboardStat, in
).values('sector').order_by('sector').annotate(
count=Count('id')
).values('sector', 'count').annotate(
date=TruncDay('created_at')
date=TruncDay('publication_date')
).values('sector', 'count', 'date')

@staticmethod
Expand All @@ -565,7 +565,7 @@ def resolve_assessment_per_protection_management(root: AssessmentDashboardStat,
).values('protection_management').order_by('protection_management').annotate(
count=Count('id')
).values('protection_management', 'count').annotate(
date=TruncDay('created_at')
date=TruncDay('publication_date')
).values('protection_management', 'count', 'date')

@staticmethod
Expand Down Expand Up @@ -604,7 +604,7 @@ def resolve_assessment_per_affected_group_and_sector(root: AssessmentDashboardSt
@node_cache(CacheKey.AssessmentDashboard.ASSESSMENT_AFFECTED_GROUP_AND_GEOAREA)
def resolve_assessment_per_affected_group_and_geoarea(root: AssessmentDashboardStat, info):
return (
root.assessment_registry_qs.values("locations", date=TruncDay("created_at"))
root.assessment_registry_qs.values("locations", date=TruncDay("publication_date"))
.annotate(
geo_area=models.F("locations"),
count=Count("id"),
Expand Down Expand Up @@ -637,7 +637,7 @@ def resolve_assessment_by_lead_organization(root: AssessmentDashboardStat, info)
AssessmentRegistryOrganization.objects.filter(
organization_type=AssessmentRegistryOrganization.Type.LEAD_ORGANIZATION
)
.values(date=TruncDay("assessment_registry__created_at"))
.values(date=TruncDay("assessment_registry__publication_date"))
.filter(assessment_registry__in=root.assessment_registry_qs)
.annotate(count=Count("organization"))
.values("organization", "count", "date")
Expand All @@ -648,7 +648,7 @@ def resolve_assessment_by_lead_organization(root: AssessmentDashboardStat, info)
@node_cache(CacheKey.AssessmentDashboard.ASSESSMENT_PER_DATA_COLLECTION_TECHNIQUE)
def resolve_assessment_per_datatechnique(root: AssessmentDashboardStat, info):
return (
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__created_at"))
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__publication_date"))
.annotate(count=Count("data_collection_technique"))
.values("data_collection_technique", "count", "date")
.order_by("data_collection_technique")
Expand All @@ -658,7 +658,7 @@ def resolve_assessment_per_datatechnique(root: AssessmentDashboardStat, info):
@node_cache(CacheKey.AssessmentDashboard.ASSESSMENT_PER_UNIT_ANALYSIS)
def resolve_assessment_per_unit_of_analysis(root: AssessmentDashboardStat, info):
return (
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__created_at"))
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__publication_date"))
.annotate(count=Count("unit_of_analysis"))
.values("unit_of_analysis", "count", "date")
.order_by("unit_of_analysis")
Expand All @@ -668,7 +668,7 @@ def resolve_assessment_per_unit_of_analysis(root: AssessmentDashboardStat, info)
@node_cache(CacheKey.AssessmentDashboard.ASSESSMENT_PER_UNIT_REPORTING)
def resolve_assessment_per_unit_of_reporting(root: AssessmentDashboardStat, info):
return (
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__created_at"))
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__publication_date"))
.annotate(count=Count("unit_of_reporting"))
.values("unit_of_reporting", "count", "date")
.order_by("unit_of_reporting")
Expand All @@ -678,7 +678,7 @@ def resolve_assessment_per_unit_of_reporting(root: AssessmentDashboardStat, info
@node_cache(CacheKey.AssessmentDashboard.ASSESSMENT_PER_SAMPLE_APPROACH)
def resolve_assessment_per_sampling_approach(root: AssessmentDashboardStat, info):
return (
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__created_at"))
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__publication_date"))
.annotate(count=Count("sampling_approach"))
.values("sampling_approach", "count", "date")
.order_by("sampling_approach")
Expand All @@ -688,7 +688,7 @@ def resolve_assessment_per_sampling_approach(root: AssessmentDashboardStat, info
@node_cache(CacheKey.AssessmentDashboard.ASSESSMENT_PER_PROXIMITY)
def resolve_assessment_per_proximity(root: AssessmentDashboardStat, info):
return (
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__created_at"))
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__publication_date"))
.annotate(count=Count("proximity"))
.values("proximity", "count", "date")
.order_by("proximity")
Expand All @@ -698,7 +698,7 @@ def resolve_assessment_per_proximity(root: AssessmentDashboardStat, info):
@node_cache(CacheKey.AssessmentDashboard.SAMPLE_SIZE_PER_DATA_COLLECTION_TECHNIQUE)
def resolve_sample_size_per_data_collection_technique(root: AssessmentDashboardStat, info):
return (
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__created_at"))
root.methodology_attribute_qs.values(date=TruncDay("assessment_registry__publication_date"))
.annotate(sampling_size=Sum("sampling_size"))
.values("sampling_size", "data_collection_technique", "date")
.order_by("data_collection_technique")
Expand Down
3 changes: 2 additions & 1 deletion apps/assessment_registry/tests/test_dashboard_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def _query_check(minput, **kwargs):
limitations="test",
objectives="test",
noOfPages=10,
publicationDate="2023-01-01",
publicationDate=str(date.today()),
sampling="test",
language=[self.genum(AssessmentRegistry.Language.ENGLISH), self.genum(AssessmentRegistry.Language.SPANISH)],
bgCountries=[self.region.id],
Expand Down Expand Up @@ -337,6 +337,7 @@ def _query_check(filter=None, **kwargs):

self.force_login(self.member_user)
content = _query_check(filter)["data"]["project"]["assessmentDashboardStatistics"]
print(content)
# assessment dashboard tab 1
self.assertEqual(content["totalAssessment"], 1)
self.assertEqual(content["totalCollectionTechnique"], 2)
Expand Down

0 comments on commit cb0913a

Please sign in to comment.