>
diff --git a/frontend/src/scenes/settings/organization/VerifiedDomains/SSOSelect.tsx b/frontend/src/scenes/settings/organization/VerifiedDomains/SSOSelect.tsx
index 8b00924441155..d106adc4f8289 100644
--- a/frontend/src/scenes/settings/organization/VerifiedDomains/SSOSelect.tsx
+++ b/frontend/src/scenes/settings/organization/VerifiedDomains/SSOSelect.tsx
@@ -11,9 +11,16 @@ export interface SSOSelectInterface {
loading: boolean
onChange: (value: SSOProvider | '') => void
samlAvailable: boolean
+ disabledReason?: string | null
}
-export function SSOSelect({ value, loading, onChange, samlAvailable }: SSOSelectInterface): JSX.Element | null {
+export function SSOSelect({
+ value,
+ loading,
+ onChange,
+ samlAvailable,
+ disabledReason,
+}: SSOSelectInterface): JSX.Element | null {
const { preflight } = useValues(preflightLogic)
if (!preflight) {
@@ -46,7 +53,7 @@ export function SSOSelect({ value, loading, onChange, samlAvailable }: SSOSelect
value={value}
options={options}
loading={loading}
- disabledReason={loading ? 'Cannot change while loading' : undefined}
+ disabledReason={loading ? 'Cannot change while loading' : disabledReason}
fullWidth
onChange={onChange}
/>
diff --git a/frontend/src/scenes/settings/organization/VerifiedDomains/VerifiedDomains.tsx b/frontend/src/scenes/settings/organization/VerifiedDomains/VerifiedDomains.tsx
index aeedc82526592..f4c516a924ace 100644
--- a/frontend/src/scenes/settings/organization/VerifiedDomains/VerifiedDomains.tsx
+++ b/frontend/src/scenes/settings/organization/VerifiedDomains/VerifiedDomains.tsx
@@ -1,6 +1,9 @@
import { IconCheckCircle, IconInfo, IconLock, IconTrash, IconWarning } from '@posthog/icons'
import { useActions, useValues } from 'kea'
import { PayGateMini } from 'lib/components/PayGateMini/PayGateMini'
+import { RestrictionScope } from 'lib/components/RestrictedArea'
+import { useRestrictedArea } from 'lib/components/RestrictedArea'
+import { OrganizationMembershipLevel } from 'lib/constants'
import { IconExclamation, IconOffline } from 'lib/lemon-ui/icons'
import { LemonButton } from 'lib/lemon-ui/LemonButton'
import { More } from 'lib/lemon-ui/LemonButton/More'
@@ -27,6 +30,11 @@ export function VerifiedDomains(): JSX.Element {
const { verifiedDomainsLoading, updatingDomainLoading } = useValues(verifiedDomainsLogic)
const { setAddModalShown } = useActions(verifiedDomainsLogic)
+ const restrictionReason = useRestrictedArea({
+ minimumAccessLevel: OrganizationMembershipLevel.Admin,
+ scope: RestrictionScope.Organization,
+ })
+
return (
@@ -39,7 +47,7 @@ export function VerifiedDomains(): JSX.Element {
type="primary"
onClick={() => setAddModalShown(true)}
className="mt-4"
- disabledReason={verifiedDomainsLoading || updatingDomainLoading ? 'loading...' : null}
+ disabledReason={verifiedDomainsLoading || updatingDomainLoading ? 'loading...' : restrictionReason}
>
Add domain
@@ -60,6 +68,11 @@ function VerifiedDomainsTable(): JSX.Element {
useActions(verifiedDomainsLogic)
const { preflight } = useValues(preflightLogic)
+ const restrictionReason = useRestrictedArea({
+ minimumAccessLevel: OrganizationMembershipLevel.Admin,
+ scope: RestrictionScope.Organization,
+ })
+
const columns: LemonTableColumns = [
{
key: 'domain',
@@ -119,6 +132,7 @@ function VerifiedDomainsTable(): JSX.Element {
updateDomain({ id, jit_provisioning_enabled: checked })}
label={
{jit_provisioning_enabled ? 'Enabled' : 'Disabled'}
@@ -157,6 +171,7 @@ function VerifiedDomainsTable(): JSX.Element {
loading={updatingDomainLoading}
onChange={(val) => updateDomain({ id, sso_enforcement: val })}
samlAvailable={has_saml}
+ disabledReason={restrictionReason}
/>
) : (
Verify domain to enable
@@ -205,7 +220,7 @@ function VerifiedDomainsTable(): JSX.Element {
return is_verified ? (
<>>
) : (
- setVerifyModal(id)}>
+ setVerifyModal(id)} disabledReason={restrictionReason}>
Verify
)
@@ -224,8 +239,10 @@ function VerifiedDomainsTable(): JSX.Element {
setConfigureSAMLModalId(id)}
fullWidth
- disabled={!isSAMLAvailable}
- title={isSAMLAvailable ? undefined : 'Upgrade to enable SAML'}
+ disabledReason={
+ restrictionReason ||
+ (!isSAMLAvailable ? 'Upgrade to enable SAML' : undefined)
+ }
>
Configure SAML
@@ -249,6 +266,7 @@ function VerifiedDomainsTable(): JSX.Element {
}
fullWidth
icon={}
+ disabledReason={restrictionReason}
>
Remove domain
diff --git a/frontend/src/scenes/settings/organization/VerifiedDomains/__snapshots__/verifiedDomainsLogic.test.ts.snap b/frontend/src/scenes/settings/organization/VerifiedDomains/__snapshots__/verifiedDomainsLogic.test.ts.snap
index 47c205486a7e8..697dc2f7f1fc0 100644
--- a/frontend/src/scenes/settings/organization/VerifiedDomains/__snapshots__/verifiedDomainsLogic.test.ts.snap
+++ b/frontend/src/scenes/settings/organization/VerifiedDomains/__snapshots__/verifiedDomainsLogic.test.ts.snap
@@ -75,6 +75,7 @@ exports[`verifiedDomainsLogic values has proper defaults 1`] = `
"effective_membership_level": 8,
"has_group_types": true,
"heatmaps_opt_in": true,
+ "human_friendly_comparison_periods": false,
"id": 997,
"ingested_event": true,
"is_demo": false,
diff --git a/frontend/src/scenes/settings/types.ts b/frontend/src/scenes/settings/types.ts
index fc20388e67c40..4b7244ed4bff2 100644
--- a/frontend/src/scenes/settings/types.ts
+++ b/frontend/src/scenes/settings/types.ts
@@ -63,6 +63,7 @@ export type SettingId =
| 'person-display-name'
| 'path-cleaning'
| 'datacapture'
+ | 'human-friendly-comparison-periods'
| 'group-analytics'
| 'persons-on-events'
| 'replay'
diff --git a/frontend/src/scenes/teamActivityDescriber.tsx b/frontend/src/scenes/teamActivityDescriber.tsx
index a85dd03ac3f04..b79cd975af9d4 100644
--- a/frontend/src/scenes/teamActivityDescriber.tsx
+++ b/frontend/src/scenes/teamActivityDescriber.tsx
@@ -305,29 +305,6 @@ const teamActionsMapping: Record<
],
}
},
- // TODO if I had to test and describe every single one of this I'd never release this
- // we can add descriptions here as the need arises
- access_control: () => null,
- anonymize_ips: () => null,
- app_urls: () => null,
- completed_snippet_onboarding: () => null,
- correlation_config: () => null,
- data_attributes: () => null,
- effective_membership_level: () => null,
- has_group_types: () => null,
- ingested_event: () => null,
- is_demo: () => null,
- live_events_columns: () => null,
- organization: () => null,
- project_id: () => null,
- path_cleaning_filters: () => null,
- person_display_name_properties: () => null,
- person_on_events_querying_enabled: () => null,
- primary_dashboard: () => null,
- slack_incoming_webhook: () => null,
- timezone: () => null,
- surveys_opt_in: () => null,
- week_start_day: () => null,
extra_settings: (change: ActivityChange | undefined): ChangeMapping | null => {
const after = change?.after
if (typeof after !== 'object') {
@@ -336,7 +313,9 @@ const teamActionsMapping: Record<
const descriptions = []
for (const key in after) {
if (key === 'poe_v2_enabled') {
- descriptions.push(<>{after[key] ? 'enabled' : 'disabled'} Person on Events (v2)>)
+ descriptions.push(
+ <>{after[key as keyof typeof after] ? 'enabled' : 'disabled'} Person on Events (v2)>
+ )
}
}
return { description: descriptions }
@@ -350,24 +329,13 @@ const teamActionsMapping: Record<
for (const key in after) {
descriptions.push(
<>
- set {key} to "{String(after[key])}"
+ set {key} to "{String(after[key as keyof typeof after])}"
>
)
}
return { description: descriptions }
},
- default_modifiers: () => null,
- has_completed_onboarding_for: () => null,
- // should never come from the backend
- created_at: () => null,
- api_token: () => null,
- id: () => null,
- updated_at: () => null,
- uuid: () => null,
- user_access_level: () => null,
- live_events_token: () => null,
- product_intents: () => null,
- default_data_theme: (change) => {
+ default_data_theme: (change): ChangeMapping | null => {
return {
description: [
<>
@@ -385,6 +353,55 @@ const teamActionsMapping: Record<
],
}
},
+ human_friendly_comparison_periods: (change): ChangeMapping | null => {
+ if (!change) {
+ return null
+ }
+
+ return {
+ description: [
+ <>
+ {change?.after ? 'enabled' : 'disabled'} human friendly comparison periods
+ >,
+ ],
+ }
+ },
+
+ // TODO if I had to test and describe every single one of this I'd never release this
+ // we can add descriptions here as the need arises
+ access_control: () => null,
+ anonymize_ips: () => null,
+ app_urls: () => null,
+ completed_snippet_onboarding: () => null,
+ correlation_config: () => null,
+ data_attributes: () => null,
+ effective_membership_level: () => null,
+ has_group_types: () => null,
+ ingested_event: () => null,
+ is_demo: () => null,
+ live_events_columns: () => null,
+ organization: () => null,
+ project_id: () => null,
+ path_cleaning_filters: () => null,
+ person_display_name_properties: () => null,
+ person_on_events_querying_enabled: () => null,
+ primary_dashboard: () => null,
+ slack_incoming_webhook: () => null,
+ timezone: () => null,
+ surveys_opt_in: () => null,
+ week_start_day: () => null,
+ default_modifiers: () => null,
+ has_completed_onboarding_for: () => null,
+
+ // should never come from the backend
+ created_at: () => null,
+ api_token: () => null,
+ id: () => null,
+ updated_at: () => null,
+ uuid: () => null,
+ user_access_level: () => null,
+ live_events_token: () => null,
+ product_intents: () => null,
cookieless_server_hash_mode: () => null,
}
@@ -409,11 +426,11 @@ export function teamActivityDescriber(logItem: ActivityLogItem, asNotification?:
let changeSuffix: Description = <>on {nameAndLink(logItem)}>
for (const change of logItem.detail.changes || []) {
- if (!change?.field || !teamActionsMapping[change.field]) {
+ if (!change?.field || !(change.field in teamActionsMapping)) {
continue // not all notebook fields are describable
}
- const actionHandler = teamActionsMapping[change.field]
+ const actionHandler = teamActionsMapping[change.field as keyof TeamType]
const processedChange = actionHandler(change, logItem)
if (processedChange === null) {
continue // // unexpected log from backend is indescribable
diff --git a/frontend/src/scenes/trends/mathsLogic.tsx b/frontend/src/scenes/trends/mathsLogic.tsx
index 7f907d3f1f246..be5348ef198a3 100644
--- a/frontend/src/scenes/trends/mathsLogic.tsx
+++ b/frontend/src/scenes/trends/mathsLogic.tsx
@@ -241,6 +241,19 @@ export const PROPERTY_MATH_DEFINITIONS: Record
),
category: MathCategory.PropertyValue,
},
+ [PropertyMathType.P75]: {
+ name: '75th percentile',
+ shortName: '75th percentile',
+ description: (
+ <>
+ Event property 75th percentile.
+
+
+ For example 100 events captured with property amount
equal to 101..200, result in 175.
+ >
+ ),
+ category: MathCategory.PropertyValue,
+ },
[PropertyMathType.P90]: {
name: '90th percentile',
shortName: '90th percentile',
@@ -315,6 +328,12 @@ export const COUNT_PER_ACTOR_MATH_DEFINITIONS: RecordEvent count per actor 50th percentile.>,
category: MathCategory.EventCountPerActor,
},
+ [CountPerActorMathType.P75]: {
+ name: '75th percentile',
+ shortName: '75th percentile',
+ description: <>Event count per actor 75th percentile.>,
+ category: MathCategory.EventCountPerActor,
+ },
[CountPerActorMathType.P90]: {
name: '90th percentile',
shortName: '90th percentile',
diff --git a/frontend/src/scenes/urls.ts b/frontend/src/scenes/urls.ts
index 1b3495bb528c3..cace92f08b5fb 100644
--- a/frontend/src/scenes/urls.ts
+++ b/frontend/src/scenes/urls.ts
@@ -159,8 +159,8 @@ export const urls = {
cohorts: (): string => '/cohorts',
experiment: (id: string | number): string => `/experiments/${id}`,
experiments: (): string => '/experiments',
- experimentsSavedMetrics: (): string => '/experiments/saved-metrics',
- experimentsSavedMetric: (id: string | number): string => `/experiments/saved-metrics/${id}`,
+ experimentsSharedMetrics: (): string => '/experiments/shared-metrics',
+ experimentsSharedMetric: (id: string | number): string => `/experiments/shared-metrics/${id}`,
featureFlags: (tab?: string): string => `/feature_flags${tab ? `?tab=${tab}` : ''}`,
featureFlag: (id: string | number): string => `/feature_flags/${id}`,
featureManagement: (id?: string | number): string => `/features${id ? `/${id}` : ''}`,
diff --git a/frontend/src/types.ts b/frontend/src/types.ts
index c2b690515861a..e6fa3bbb63db6 100644
--- a/frontend/src/types.ts
+++ b/frontend/src/types.ts
@@ -550,6 +550,7 @@ export interface TeamType extends TeamBasicType {
live_events_columns: string[] | null // Custom columns shown on the Live Events page
live_events_token: string
cookieless_server_hash_mode?: CookielessServerHashMode
+ human_friendly_comparison_periods: boolean
/** Effective access level of the user in this specific team. Null if user has no access. */
effective_membership_level: OrganizationMembershipLevel | null
@@ -709,7 +710,7 @@ export enum ExperimentsTabs {
Yours = 'yours',
Archived = 'archived',
Holdouts = 'holdouts',
- SavedMetrics = 'saved-metrics',
+ SharedMetrics = 'shared-metrics',
}
export enum ActivityTab {
@@ -3651,6 +3652,7 @@ export enum PropertyMathType {
Minimum = 'min',
Maximum = 'max',
Median = 'median',
+ P75 = 'p75',
P90 = 'p90',
P95 = 'p95',
P99 = 'p99',
@@ -3661,6 +3663,7 @@ export enum CountPerActorMathType {
Minimum = 'min_count_per_actor',
Maximum = 'max_count_per_actor',
Median = 'median_count_per_actor',
+ P75 = 'p75_count_per_actor',
P90 = 'p90_count_per_actor',
P95 = 'p95_count_per_actor',
P99 = 'p99_count_per_actor',
diff --git a/mypy-baseline.txt b/mypy-baseline.txt
index a4444d8b67031..d1068644d3b3e 100644
--- a/mypy-baseline.txt
+++ b/mypy-baseline.txt
@@ -603,6 +603,22 @@ posthog/temporal/data_imports/workflow_activities/sync_new_schemas.py:0: note: d
posthog/temporal/data_imports/workflow_activities/sync_new_schemas.py:0: note: def get(self, Type, Sequence[str], /) -> Sequence[str]
posthog/temporal/data_imports/workflow_activities/sync_new_schemas.py:0: note: def [_T] get(self, Type, _T, /) -> Sequence[str] | _T
posthog/temporal/data_imports/workflow_activities/sync_new_schemas.py:0: error: Argument "source_id" to "sync_old_schemas_with_new_schemas" has incompatible type "str"; expected "UUID" [arg-type]
+posthog/taxonomy/property_definition_api.py:0: error: Item "AnonymousUser" of "User | AnonymousUser" has no attribute "organization" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "None" of "Organization | Any | None" has no attribute "is_feature_available" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "ForeignObjectRel" of "Field[Any, Any] | ForeignObjectRel | GenericForeignKey" has no attribute "cached_col" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "GenericForeignKey" of "Field[Any, Any] | ForeignObjectRel | GenericForeignKey" has no attribute "cached_col" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Incompatible types in assignment (expression has type "Manager[EnterprisePropertyDefinition]", variable has type "QuerySet[PropertyDefinition, PropertyDefinition]") [assignment]
+posthog/taxonomy/property_definition_api.py:0: error: Item "BasePagination" of "BasePagination | None" has no attribute "get_limit" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "None" of "BasePagination | None" has no attribute "get_limit" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "BasePagination" of "BasePagination | None" has no attribute "get_offset" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "None" of "BasePagination | None" has no attribute "get_offset" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "BasePagination" of "BasePagination | None" has no attribute "set_count" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "None" of "BasePagination | None" has no attribute "set_count" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "AnonymousUser" of "User | AnonymousUser" has no attribute "organization" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "None" of "Organization | Any | None" has no attribute "is_feature_available" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Incompatible types in assignment (expression has type "type[EnterprisePropertyDefinitionSerializer]", variable has type "type[PropertyDefinitionSerializer]") [assignment]
+posthog/taxonomy/property_definition_api.py:0: error: Item "AnonymousUser" of "User | AnonymousUser" has no attribute "organization" [union-attr]
+posthog/taxonomy/property_definition_api.py:0: error: Item "None" of "Organization | Any | None" has no attribute "is_feature_available" [union-attr]
posthog/tasks/exports/test/test_csv_exporter.py:0: error: Function is missing a return type annotation [no-untyped-def]
posthog/tasks/exports/test/test_csv_exporter.py:0: error: Function is missing a type annotation [no-untyped-def]
posthog/tasks/exports/test/test_csv_exporter.py:0: error: Function is missing a type annotation for one or more arguments [no-untyped-def]
@@ -717,22 +733,6 @@ posthog/api/test/dashboards/test_dashboard.py:0: error: Module "django.utils.tim
posthog/api/test/dashboards/test_dashboard.py:0: error: Module "django.utils.timezone" does not explicitly export attribute "timedelta" [attr-defined]
posthog/api/test/dashboards/test_dashboard.py:0: error: Module "django.utils.timezone" does not explicitly export attribute "timedelta" [attr-defined]
posthog/api/query.py:0: error: Statement is unreachable [unreachable]
-posthog/api/property_definition.py:0: error: Item "AnonymousUser" of "User | AnonymousUser" has no attribute "organization" [union-attr]
-posthog/api/property_definition.py:0: error: Item "None" of "Organization | Any | None" has no attribute "is_feature_available" [union-attr]
-posthog/api/property_definition.py:0: error: Item "ForeignObjectRel" of "Field[Any, Any] | ForeignObjectRel | GenericForeignKey" has no attribute "cached_col" [union-attr]
-posthog/api/property_definition.py:0: error: Item "GenericForeignKey" of "Field[Any, Any] | ForeignObjectRel | GenericForeignKey" has no attribute "cached_col" [union-attr]
-posthog/api/property_definition.py:0: error: Incompatible types in assignment (expression has type "Manager[EnterprisePropertyDefinition]", variable has type "QuerySet[PropertyDefinition, PropertyDefinition]") [assignment]
-posthog/api/property_definition.py:0: error: Item "BasePagination" of "BasePagination | None" has no attribute "get_limit" [union-attr]
-posthog/api/property_definition.py:0: error: Item "None" of "BasePagination | None" has no attribute "get_limit" [union-attr]
-posthog/api/property_definition.py:0: error: Item "BasePagination" of "BasePagination | None" has no attribute "get_offset" [union-attr]
-posthog/api/property_definition.py:0: error: Item "None" of "BasePagination | None" has no attribute "get_offset" [union-attr]
-posthog/api/property_definition.py:0: error: Item "BasePagination" of "BasePagination | None" has no attribute "set_count" [union-attr]
-posthog/api/property_definition.py:0: error: Item "None" of "BasePagination | None" has no attribute "set_count" [union-attr]
-posthog/api/property_definition.py:0: error: Item "AnonymousUser" of "User | AnonymousUser" has no attribute "organization" [union-attr]
-posthog/api/property_definition.py:0: error: Item "None" of "Organization | Any | None" has no attribute "is_feature_available" [union-attr]
-posthog/api/property_definition.py:0: error: Incompatible types in assignment (expression has type "type[EnterprisePropertyDefinitionSerializer]", variable has type "type[PropertyDefinitionSerializer]") [assignment]
-posthog/api/property_definition.py:0: error: Item "AnonymousUser" of "User | AnonymousUser" has no attribute "organization" [union-attr]
-posthog/api/property_definition.py:0: error: Item "None" of "Organization | Any | None" has no attribute "is_feature_available" [union-attr]
posthog/api/event.py:0: error: Argument 1 to has incompatible type "*tuple[str, ...]"; expected "type[BaseRenderer]" [arg-type]
posthog/admin/inlines/plugin_attachment_inline.py:0: error: Signature of "has_add_permission" incompatible with supertype "BaseModelAdmin" [override]
posthog/admin/inlines/plugin_attachment_inline.py:0: note: Superclass:
diff --git a/posthog/api/__init__.py b/posthog/api/__init__.py
index 5e0cd8d81c1e5..5ab189e943aeb 100644
--- a/posthog/api/__init__.py
+++ b/posthog/api/__init__.py
@@ -49,7 +49,6 @@
personal_api_key,
plugin,
plugin_log_entry,
- property_definition,
proxy_record,
query,
scheduled_change,
@@ -61,6 +60,7 @@
uploaded_media,
user,
)
+from ..taxonomy import property_definition_api
from .dashboards import dashboard, dashboard_templates
from .data_management import DataManagementViewSet
from .session import SessionViewSet
@@ -264,7 +264,7 @@ def register_grandfathered_environment_nested_viewset(
)
projects_router.register(
r"property_definitions",
- property_definition.PropertyDefinitionViewSet,
+ property_definition_api.PropertyDefinitionViewSet,
"project_property_definitions",
["project_id"],
)
diff --git a/posthog/api/documentation.py b/posthog/api/documentation.py
index d885aa12a3c84..a47632291c15f 100644
--- a/posthog/api/documentation.py
+++ b/posthog/api/documentation.py
@@ -161,6 +161,7 @@ class PersonPropertiesSerializer(serializers.Serializer):
- `min`: min of a numeric property.
- `max`: max of a numeric property.
- `median`: median of a numeric property.
+- `p75`: 75th percentile of a numeric property.
- `p90`: 90th percentile of a numeric property.
- `p95` 95th percentile of a numeric property.
- `p99`: 99th percentile of a numeric property.
diff --git a/posthog/api/feature_flag.py b/posthog/api/feature_flag.py
index f20c1a4a6105a..949fd33241a4b 100644
--- a/posthog/api/feature_flag.py
+++ b/posthog/api/feature_flag.py
@@ -385,6 +385,7 @@ def update(self, instance: FeatureFlag, validated_data: dict, *args: Any, **kwar
request = self.context["request"]
validated_key = validated_data.get("key", None)
if validated_key:
+ # Delete any soft deleted feature flags with the same key to prevent conflicts
FeatureFlag.objects.filter(
key=validated_key, team__project_id=instance.team.project_id, deleted=True
).delete()
@@ -396,6 +397,8 @@ def update(self, instance: FeatureFlag, validated_data: dict, *args: Any, **kwar
for dashboard in analytics_dashboards:
FeatureFlagDashboards.objects.get_or_create(dashboard=dashboard, feature_flag=instance)
+ old_key = instance.key
+
instance = super().update(instance, validated_data)
# Propagate the new variants and aggregation group type index to the linked experiments
@@ -415,6 +418,9 @@ def update(self, instance: FeatureFlag, validated_data: dict, *args: Any, **kwar
experiment.parameters.pop("aggregation_group_type_index", None)
experiment.save()
+ if old_key != instance.key:
+ _update_feature_flag_dashboard(instance, old_key)
+
report_user_action(request.user, "feature flag updated", instance.get_analytics_metadata())
return instance
@@ -446,6 +452,15 @@ def _create_usage_dashboard(feature_flag: FeatureFlag, user):
return usage_dashboard
+def _update_feature_flag_dashboard(feature_flag: FeatureFlag, old_key: str) -> None:
+ from posthog.helpers.dashboard_templates import update_feature_flag_dashboard
+
+ if not old_key:
+ return
+
+ update_feature_flag_dashboard(feature_flag, old_key)
+
+
class MinimalFeatureFlagSerializer(serializers.ModelSerializer):
filters = serializers.DictField(source="get_filters", required=False)
diff --git a/posthog/api/team.py b/posthog/api/team.py
index 3aa1338ce46a6..7186e84aa2436 100644
--- a/posthog/api/team.py
+++ b/posthog/api/team.py
@@ -202,6 +202,7 @@ class Meta:
"live_events_columns",
"recording_domains",
"cookieless_server_hash_mode",
+ "human_friendly_comparison_periods",
"person_on_events_querying_enabled",
"inject_web_apps",
"extra_settings",
diff --git a/posthog/api/test/__snapshots__/test_action.ambr b/posthog/api/test/__snapshots__/test_action.ambr
index be7461a298a6c..f4de6699d716b 100644
--- a/posthog/api/test/__snapshots__/test_action.ambr
+++ b/posthog/api/test/__snapshots__/test_action.ambr
@@ -83,6 +83,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -390,6 +391,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -906,6 +908,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/__snapshots__/test_annotation.ambr b/posthog/api/test/__snapshots__/test_annotation.ambr
index e12cd7a78b6ca..6b84f6718dcb4 100644
--- a/posthog/api/test/__snapshots__/test_annotation.ambr
+++ b/posthog/api/test/__snapshots__/test_annotation.ambr
@@ -83,6 +83,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -385,6 +386,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -833,6 +835,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/__snapshots__/test_api_docs.ambr b/posthog/api/test/__snapshots__/test_api_docs.ambr
index 862d8507dbb75..9df243202c4df 100644
--- a/posthog/api/test/__snapshots__/test_api_docs.ambr
+++ b/posthog/api/test/__snapshots__/test_api_docs.ambr
@@ -58,7 +58,6 @@
'/home/runner/work/posthog/posthog/posthog/api/project.py: Warning [ProjectViewSet > ProjectBackwardCompatSerializer]: could not resolve field on model with path "default_modifiers". This is likely a custom field that does some unknown magic. Maybe consider annotating the field/property? Defaulting to "string". (Exception: Project has no field named \'default_modifiers\')',
'/home/runner/work/posthog/posthog/posthog/api/project.py: Warning [ProjectViewSet > ProjectBackwardCompatSerializer]: could not resolve field on model with path "person_on_events_querying_enabled". This is likely a custom field that does some unknown magic. Maybe consider annotating the field/property? Defaulting to "string". (Exception: Project has no field named \'person_on_events_querying_enabled\')',
'/home/runner/work/posthog/posthog/posthog/api/project.py: Warning [ProjectViewSet > ProjectBackwardCompatSerializer]: unable to resolve type hint for function "get_product_intents". Consider using a type hint or @extend_schema_field. Defaulting to string.',
- "/home/runner/work/posthog/posthog/posthog/api/property_definition.py: Error [PropertyDefinitionViewSet]: exception raised while getting serializer. Hint: Is get_serializer_class() returning None or is get_queryset() not working without a request? Ignoring the view for now. (Exception: 'AnonymousUser' object has no attribute 'organization')",
'/home/runner/work/posthog/posthog/posthog/api/proxy_record.py: Warning [ProxyRecordViewset]: could not derive type of path parameter "id" because it is untyped and obtaining queryset from the viewset failed. Consider adding a type to the path (e.g. ) or annotating the parameter type with @extend_schema. Defaulting to "string".',
'/home/runner/work/posthog/posthog/posthog/api/proxy_record.py: Warning [ProxyRecordViewset]: could not derive type of path parameter "organization_id" because it is untyped and obtaining queryset from the viewset failed. Consider adding a type to the path (e.g. ) or annotating the parameter type with @extend_schema. Defaulting to "string".',
'/home/runner/work/posthog/posthog/posthog/api/query.py: Warning [QueryViewSet]: could not derive type of path parameter "id" because it is untyped and obtaining queryset from the viewset failed. Consider adding a type to the path (e.g. ) or annotating the parameter type with @extend_schema. Defaulting to "string".',
@@ -83,6 +82,7 @@
'/home/runner/work/posthog/posthog/posthog/models/event/util.py: Warning [EventViewSet > ClickhouseEventSerializer]: unable to resolve type hint for function "get_timestamp". Consider using a type hint or @extend_schema_field. Defaulting to string.',
'/home/runner/work/posthog/posthog/posthog/session_recordings/session_recording_api.py: Warning [SessionRecordingViewSet > SessionRecordingSerializer]: unable to resolve type hint for function "storage". Consider using a type hint or @extend_schema_field. Defaulting to string.',
'/home/runner/work/posthog/posthog/posthog/session_recordings/session_recording_api.py: Warning [SessionRecordingViewSet]: could not derive type of path parameter "project_id" because model "posthog.session_recordings.models.session_recording.SessionRecording" contained no such field. Consider annotating parameter with @extend_schema. Defaulting to "string".',
+ "/home/runner/work/posthog/posthog/posthog/taxonomy/property_definition_api.py: Error [PropertyDefinitionViewSet]: exception raised while getting serializer. Hint: Is get_serializer_class() returning None or is get_queryset() not working without a request? Ignoring the view for now. (Exception: 'AnonymousUser' object has no attribute 'organization')",
'/opt/hostedtoolcache/Python/3.11.9/x64/lib/python3.11/site-packages/pydantic/_internal/_model_construction.py: Warning [QueryViewSet > ModelMetaclass]: Encountered 2 components with identical names "Person" and different classes and . This will very likely result in an incorrect schema. Try renaming one.',
'Warning: encountered multiple names for the same choice set (EffectivePrivilegeLevelEnum). This may be unwanted even though the generated schema is technically correct. Add an entry to ENUM_NAME_OVERRIDES to fix the naming.',
'Warning: encountered multiple names for the same choice set (HrefMatchingEnum). This may be unwanted even though the generated schema is technically correct. Add an entry to ENUM_NAME_OVERRIDES to fix the naming.',
diff --git a/posthog/api/test/__snapshots__/test_decide.ambr b/posthog/api/test/__snapshots__/test_decide.ambr
index f6affd2437a9c..bf19b1219b282 100644
--- a/posthog/api/test/__snapshots__/test_decide.ambr
+++ b/posthog/api/test/__snapshots__/test_decide.ambr
@@ -318,6 +318,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -391,6 +392,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -477,6 +479,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -695,6 +698,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -779,6 +783,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -868,6 +873,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1239,6 +1245,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1312,6 +1319,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1400,6 +1408,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1477,6 +1486,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1679,6 +1689,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1890,6 +1901,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2092,6 +2104,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2201,6 +2214,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2294,6 +2308,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2367,6 +2382,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2455,6 +2471,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2532,6 +2549,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2734,6 +2752,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2933,6 +2952,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3135,6 +3155,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3244,6 +3265,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3316,6 +3338,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3441,6 +3464,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3517,6 +3541,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3594,6 +3619,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3812,6 +3838,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3989,6 +4016,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4062,6 +4090,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4139,6 +4168,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4588,6 +4618,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4661,6 +4692,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4747,6 +4779,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4965,6 +4998,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5049,6 +5083,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5138,6 +5173,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5255,6 +5291,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5473,6 +5510,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5618,6 +5656,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5836,6 +5875,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6166,6 +6206,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6239,6 +6280,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6327,6 +6369,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6404,6 +6447,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6606,6 +6650,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6694,6 +6739,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6908,6 +6954,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7037,6 +7084,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7202,6 +7250,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7311,6 +7360,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7660,6 +7710,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7769,6 +7820,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7862,6 +7914,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7935,6 +7988,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8023,6 +8077,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8100,6 +8155,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8302,6 +8358,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8390,6 +8447,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8604,6 +8662,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8729,6 +8788,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8894,6 +8954,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -9003,6 +9064,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -9344,6 +9406,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -9453,6 +9516,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/__snapshots__/test_early_access_feature.ambr b/posthog/api/test/__snapshots__/test_early_access_feature.ambr
index 5b1dcc653d1d8..e9fa372dbfe4d 100644
--- a/posthog/api/test/__snapshots__/test_early_access_feature.ambr
+++ b/posthog/api/test/__snapshots__/test_early_access_feature.ambr
@@ -50,6 +50,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -145,6 +146,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -222,6 +224,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -424,6 +427,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -533,6 +537,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -630,6 +635,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -802,6 +808,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1004,6 +1011,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1103,6 +1111,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1175,6 +1184,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1360,6 +1370,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1437,6 +1448,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1639,6 +1651,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1715,6 +1728,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1874,6 +1888,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1951,6 +1966,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2153,6 +2169,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/__snapshots__/test_element.ambr b/posthog/api/test/__snapshots__/test_element.ambr
index 903b9ab705a8e..1a6a987ba4ac8 100644
--- a/posthog/api/test/__snapshots__/test_element.ambr
+++ b/posthog/api/test/__snapshots__/test_element.ambr
@@ -83,6 +83,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/__snapshots__/test_feature_flag.ambr b/posthog/api/test/__snapshots__/test_feature_flag.ambr
index 07eb0a9ef7ed1..bc2418527e9e7 100644
--- a/posthog/api/test/__snapshots__/test_feature_flag.ambr
+++ b/posthog/api/test/__snapshots__/test_feature_flag.ambr
@@ -1362,6 +1362,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1531,6 +1532,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/__snapshots__/test_insight.ambr b/posthog/api/test/__snapshots__/test_insight.ambr
index 82d0c4fc65096..9a9d2db26f329 100644
--- a/posthog/api/test/__snapshots__/test_insight.ambr
+++ b/posthog/api/test/__snapshots__/test_insight.ambr
@@ -722,6 +722,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -787,6 +788,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -859,6 +861,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -931,6 +934,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1073,6 +1077,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1330,6 +1335,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1616,6 +1622,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1765,6 +1772,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1891,6 +1899,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2066,6 +2075,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2166,6 +2176,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr b/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr
index 1ac0107628d8a..ac5057c62bf77 100644
--- a/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr
+++ b/posthog/api/test/__snapshots__/test_organization_feature_flag.ambr
@@ -132,6 +132,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -209,6 +210,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -411,6 +413,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -487,6 +490,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -616,6 +620,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -716,6 +721,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -788,6 +794,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -930,6 +937,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1026,6 +1034,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1126,6 +1135,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1198,6 +1208,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1340,6 +1351,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1435,6 +1447,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1538,6 +1551,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1740,6 +1754,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1816,6 +1831,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1964,6 +1980,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2767,6 +2784,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/__snapshots__/test_preflight.ambr b/posthog/api/test/__snapshots__/test_preflight.ambr
index 2077015ad7de9..ef69f7efc7040 100644
--- a/posthog/api/test/__snapshots__/test_preflight.ambr
+++ b/posthog/api/test/__snapshots__/test_preflight.ambr
@@ -83,6 +83,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/__snapshots__/test_survey.ambr b/posthog/api/test/__snapshots__/test_survey.ambr
index af29d9df060c0..d1db7ad90bee3 100644
--- a/posthog/api/test/__snapshots__/test_survey.ambr
+++ b/posthog/api/test/__snapshots__/test_survey.ambr
@@ -86,6 +86,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -151,6 +152,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -228,6 +230,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -455,6 +458,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -554,6 +558,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -643,6 +648,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -870,6 +876,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -969,6 +976,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1046,6 +1054,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1118,6 +1127,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1345,6 +1355,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1433,6 +1444,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1510,6 +1522,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1746,6 +1759,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1855,6 +1869,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2175,6 +2190,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/batch_exports/conftest.py b/posthog/api/test/batch_exports/conftest.py
index 94ef6055d20f9..229f548052a51 100644
--- a/posthog/api/test/batch_exports/conftest.py
+++ b/posthog/api/test/batch_exports/conftest.py
@@ -6,15 +6,17 @@
from contextlib import contextmanager
import pytest
+import temporalio.worker
from asgiref.sync import async_to_sync
-from django.conf import settings
from temporalio.client import Client as TemporalClient
from temporalio.service import RPCError
from temporalio.worker import UnsandboxedWorkflowRunner, Worker
-from posthog.constants import BATCH_EXPORTS_TASK_QUEUE
+
+from posthog import constants
from posthog.batch_exports.models import BatchExport
-from posthog.temporal.common.client import sync_connect
+from posthog.constants import BATCH_EXPORTS_TASK_QUEUE
from posthog.temporal.batch_exports import ACTIVITIES, WORKFLOWS
+from posthog.temporal.common.client import sync_connect
class ThreadedWorker(Worker):
@@ -91,10 +93,10 @@ async def describe_workflow(temporal: TemporalClient, workflow_id: str):
def start_test_worker(temporal: TemporalClient):
with ThreadedWorker(
client=temporal,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=WORKFLOWS,
activities=ACTIVITIES,
- workflow_runner=UnsandboxedWorkflowRunner(),
+ workflow_runner=temporalio.worker.UnsandboxedWorkflowRunner(),
graceful_shutdown_timeout=dt.timedelta(seconds=5),
).run_in_thread():
yield
diff --git a/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr b/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr
index 7da87ca1ef1ed..31f26f695f73a 100644
--- a/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr
+++ b/posthog/api/test/dashboards/__snapshots__/test_dashboard.ambr
@@ -83,6 +83,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -274,6 +275,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -597,6 +599,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -897,6 +900,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1374,6 +1378,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1446,6 +1451,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1577,6 +1583,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1642,6 +1649,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1714,6 +1722,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1786,6 +1795,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1928,6 +1938,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2052,6 +2063,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2269,6 +2281,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2724,6 +2737,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2824,6 +2838,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2957,6 +2972,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3022,6 +3038,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3118,6 +3135,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3242,6 +3260,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3447,6 +3466,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3649,6 +3669,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3749,6 +3770,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3888,6 +3910,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4034,6 +4057,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4106,6 +4130,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4178,6 +4203,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4320,6 +4346,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4444,6 +4471,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4643,6 +4671,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4965,6 +4994,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5265,6 +5295,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5475,6 +5506,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5892,6 +5924,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6017,6 +6050,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6204,6 +6238,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6477,6 +6512,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6577,6 +6613,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6677,6 +6714,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6781,6 +6819,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6853,6 +6892,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6995,6 +7035,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7067,6 +7108,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7139,6 +7181,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7292,6 +7335,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7416,6 +7460,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7633,6 +7678,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7835,6 +7881,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7935,6 +7982,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8047,6 +8095,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8119,6 +8168,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8191,6 +8241,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8333,6 +8384,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8457,6 +8509,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8662,6 +8715,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8871,6 +8925,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8971,6 +9026,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -9071,6 +9127,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -9143,6 +9200,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -9215,6 +9273,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -9411,6 +9470,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -9700,6 +9760,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -9890,6 +9951,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -10075,6 +10137,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -10140,6 +10203,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -10265,6 +10329,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -10452,6 +10517,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -10733,6 +10799,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -10858,6 +10925,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -11045,6 +11113,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr b/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr
index c728a480610ed..de80d2ac69770 100644
--- a/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr
+++ b/posthog/api/test/notebooks/__snapshots__/test_notebook.ambr
@@ -83,6 +83,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -238,6 +239,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -475,6 +477,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -877,6 +880,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/api/test/test_feature_flag.py b/posthog/api/test/test_feature_flag.py
index 4ae2d364d1f03..35bfa12fe3c34 100644
--- a/posthog/api/test/test_feature_flag.py
+++ b/posthog/api/test/test_feature_flag.py
@@ -792,6 +792,447 @@ def test_updating_feature_flag(self, mock_capture):
],
)
+ @patch("posthog.api.feature_flag.report_user_action")
+ def test_updating_feature_flag_key(self, mock_capture):
+ with freeze_time("2021-08-25T22:09:14.252Z") as frozen_datetime:
+ response = self.client.post(
+ f"/api/projects/{self.team.id}/feature_flags/",
+ {"name": "original name", "key": "a-feature-flag-that-is-updated"},
+ format="json",
+ )
+ self.assertEqual(response.status_code, status.HTTP_201_CREATED)
+ flag_id = response.json()["id"]
+
+ frozen_datetime.tick(delta=datetime.timedelta(minutes=10))
+
+ # Assert that the insights were created properly.
+ feature_flag = FeatureFlag.objects.get(id=flag_id)
+ assert feature_flag.usage_dashboard is not None, "Usage dashboard was not created"
+ insights = feature_flag.usage_dashboard.insights
+ total_volume_insight = insights.get(name="Feature Flag Called Total Volume")
+ self.assertEqual(
+ total_volume_insight.description,
+ "Shows the number of total calls made on feature flag with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ total_volume_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-feature-flag-that-is-updated",
+ )
+ unique_users_insight = insights.get(name="Feature Flag calls made by unique users per variant")
+ self.assertEqual(
+ unique_users_insight.description,
+ "Shows the number of unique user calls made on feature flag per variant with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ unique_users_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-feature-flag-that-is-updated",
+ )
+
+ # Update the feature flag key
+ response = self.client.patch(
+ f"/api/projects/{self.team.id}/feature_flags/{flag_id}",
+ {
+ "key": "a-new-feature-flag-key",
+ "filters": {
+ "groups": [
+ {
+ "rollout_percentage": 65,
+ "properties": [
+ {
+ "key": "email",
+ "type": "person",
+ "value": "@posthog.com",
+ "operator": "icontains",
+ }
+ ],
+ }
+ ]
+ },
+ },
+ format="json",
+ )
+
+ self.assertEqual(response.status_code, status.HTTP_200_OK)
+
+ self.assertEqual(response.json()["key"], "a-new-feature-flag-key")
+ self.assertEqual(response.json()["filters"]["groups"][0]["rollout_percentage"], 65)
+
+ # Assert analytics are sent
+ mock_capture.assert_called_with(
+ self.user,
+ "feature flag updated",
+ {
+ "groups_count": 1,
+ "has_variants": False,
+ "variants_count": 0,
+ "has_rollout_percentage": True,
+ "has_filters": True,
+ "filter_count": 1,
+ "created_at": datetime.datetime.fromisoformat("2021-08-25T22:09:14.252000+00:00"),
+ "aggregating_by_groups": False,
+ "payload_count": 0,
+ },
+ )
+
+ self.assert_feature_flag_activity(
+ flag_id,
+ [
+ {
+ "user": {
+ "first_name": self.user.first_name,
+ "email": self.user.email,
+ },
+ "activity": "updated",
+ "created_at": "2021-08-25T22:19:14.252000Z",
+ "scope": "FeatureFlag",
+ "item_id": str(flag_id),
+ "detail": {
+ "changes": [
+ {
+ "type": "FeatureFlag",
+ "action": "changed",
+ "field": "key",
+ "before": "a-feature-flag-that-is-updated",
+ "after": "a-new-feature-flag-key",
+ },
+ {
+ "type": "FeatureFlag",
+ "action": "created",
+ "field": "filters",
+ "before": None,
+ "after": {
+ "groups": [
+ {
+ "properties": [
+ {
+ "key": "email",
+ "type": "person",
+ "value": "@posthog.com",
+ "operator": "icontains",
+ }
+ ],
+ "rollout_percentage": 65,
+ }
+ ]
+ },
+ },
+ ],
+ "trigger": None,
+ "type": None,
+ "name": "a-new-feature-flag-key",
+ "short_id": None,
+ },
+ },
+ {
+ "user": {
+ "first_name": self.user.first_name,
+ "email": self.user.email,
+ },
+ "activity": "created",
+ "created_at": "2021-08-25T22:09:14.252000Z",
+ "scope": "FeatureFlag",
+ "item_id": str(flag_id),
+ "detail": {
+ "changes": None,
+ "trigger": None,
+ "type": None,
+ "name": "a-feature-flag-that-is-updated",
+ "short_id": None,
+ },
+ },
+ ],
+ )
+
+ feature_flag = FeatureFlag.objects.get(id=flag_id)
+ assert feature_flag.usage_dashboard is not None, "Usage dashboard was not created"
+ insights = feature_flag.usage_dashboard.insights
+ total_volume_insight = insights.get(name="Feature Flag Called Total Volume")
+ self.assertEqual(
+ total_volume_insight.description,
+ "Shows the number of total calls made on feature flag with key: a-new-feature-flag-key",
+ )
+ self.assertEqual(
+ total_volume_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-new-feature-flag-key",
+ )
+ unique_users_insight = insights.get(name="Feature Flag calls made by unique users per variant")
+ self.assertEqual(
+ unique_users_insight.description,
+ "Shows the number of unique user calls made on feature flag per variant with key: a-new-feature-flag-key",
+ )
+ self.assertEqual(
+ unique_users_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-new-feature-flag-key",
+ )
+
+ @patch("posthog.api.feature_flag.report_user_action")
+ def test_updating_feature_flag_key_does_not_update_insight_with_changed_description(self, mock_capture):
+ with freeze_time("2021-08-25T22:09:14.252Z") as frozen_datetime:
+ response = self.client.post(
+ f"/api/projects/{self.team.id}/feature_flags/",
+ {"name": "original name", "key": "a-feature-flag-that-is-updated"},
+ format="json",
+ )
+ self.assertEqual(response.status_code, status.HTTP_201_CREATED)
+ flag_id = response.json()["id"]
+
+ frozen_datetime.tick(delta=datetime.timedelta(minutes=10))
+
+ # Assert that the insights were created properly.
+ feature_flag = FeatureFlag.objects.get(id=flag_id)
+ assert feature_flag.usage_dashboard is not None, "Usage dashboard was not created"
+ insights = feature_flag.usage_dashboard.insights
+ total_volume_insight = insights.get(name="Feature Flag Called Total Volume")
+ self.assertEqual(
+ total_volume_insight.description,
+ "Shows the number of total calls made on feature flag with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ total_volume_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-feature-flag-that-is-updated",
+ )
+ unique_users_insight = insights.get(name="Feature Flag calls made by unique users per variant")
+ self.assertEqual(
+ unique_users_insight.description,
+ "Shows the number of unique user calls made on feature flag per variant with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ unique_users_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-feature-flag-that-is-updated",
+ )
+ total_volume_insight.name = "This is a changed description"
+ total_volume_insight.save()
+
+ # Update the feature flag key
+ response = self.client.patch(
+ f"/api/projects/{self.team.id}/feature_flags/{flag_id}",
+ {
+ "key": "a-new-feature-flag-key",
+ "filters": {
+ "groups": [
+ {
+ "rollout_percentage": 65,
+ "properties": [
+ {
+ "key": "email",
+ "type": "person",
+ "value": "@posthog.com",
+ "operator": "icontains",
+ }
+ ],
+ }
+ ]
+ },
+ },
+ format="json",
+ )
+
+ self.assertEqual(response.status_code, status.HTTP_200_OK)
+
+ # Total volume insight should not be updated because we changed its description
+ # unique users insight should still be updated
+ feature_flag = FeatureFlag.objects.get(id=flag_id)
+ assert feature_flag.usage_dashboard is not None, "Usage dashboard was not created"
+ insights = feature_flag.usage_dashboard.insights
+ self.assertIsNone(insights.filter(name="Feature Flag Called Total Volume").first())
+ total_volume_insight = insights.get(name="This is a changed description")
+ self.assertEqual(
+ total_volume_insight.description,
+ "Shows the number of total calls made on feature flag with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ total_volume_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-feature-flag-that-is-updated",
+ )
+ unique_users_insight = insights.get(name="Feature Flag calls made by unique users per variant")
+ self.assertEqual(
+ unique_users_insight.description,
+ "Shows the number of unique user calls made on feature flag per variant with key: a-new-feature-flag-key",
+ )
+ self.assertEqual(
+ unique_users_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-new-feature-flag-key",
+ )
+
+ @patch("posthog.api.feature_flag.report_user_action")
+ def test_updating_feature_flag_key_does_not_update_insight_with_changed_filter(self, mock_capture):
+ with freeze_time("2021-08-25T22:09:14.252Z") as frozen_datetime:
+ response = self.client.post(
+ f"/api/projects/{self.team.id}/feature_flags/",
+ {"name": "original name", "key": "a-feature-flag-that-is-updated"},
+ format="json",
+ )
+ self.assertEqual(response.status_code, status.HTTP_201_CREATED)
+ flag_id = response.json()["id"]
+
+ frozen_datetime.tick(delta=datetime.timedelta(minutes=10))
+
+ # Assert that the insights were created properly.
+ feature_flag = FeatureFlag.objects.get(id=flag_id)
+ assert feature_flag.usage_dashboard is not None, "Usage dashboard was not created"
+ insights = feature_flag.usage_dashboard.insights
+ total_volume_insight = insights.get(name="Feature Flag Called Total Volume")
+ self.assertEqual(
+ total_volume_insight.description,
+ "Shows the number of total calls made on feature flag with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ total_volume_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-feature-flag-that-is-updated",
+ )
+ unique_users_insight = insights.get(name="Feature Flag calls made by unique users per variant")
+ self.assertEqual(
+ unique_users_insight.description,
+ "Shows the number of unique user calls made on feature flag per variant with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ unique_users_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-feature-flag-that-is-updated",
+ )
+ total_volume_insight.query["source"]["properties"]["values"][0]["values"][0]["value"] = (
+ "something_unexpected"
+ )
+ total_volume_insight.save()
+
+ # Update the feature flag key
+ response = self.client.patch(
+ f"/api/projects/{self.team.id}/feature_flags/{flag_id}",
+ {
+ "key": "a-new-feature-flag-key",
+ "filters": {
+ "groups": [
+ {
+ "rollout_percentage": 65,
+ "properties": [
+ {
+ "key": "email",
+ "type": "person",
+ "value": "@posthog.com",
+ "operator": "icontains",
+ }
+ ],
+ }
+ ]
+ },
+ },
+ format="json",
+ )
+
+ self.assertEqual(response.status_code, status.HTTP_200_OK)
+
+ # Total volume insight should not be updated because we changed its description
+ # unique users insight should still be updated
+ feature_flag = FeatureFlag.objects.get(id=flag_id)
+ assert feature_flag.usage_dashboard is not None, "Usage dashboard was not created"
+ insights = feature_flag.usage_dashboard.insights
+ total_volume_insight = insights.get(name="Feature Flag Called Total Volume")
+ self.assertEqual(
+ total_volume_insight.description,
+ "Shows the number of total calls made on feature flag with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ total_volume_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "something_unexpected",
+ )
+ unique_users_insight = insights.get(name="Feature Flag calls made by unique users per variant")
+ self.assertEqual(
+ unique_users_insight.description,
+ "Shows the number of unique user calls made on feature flag per variant with key: a-new-feature-flag-key",
+ )
+ self.assertEqual(
+ unique_users_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-new-feature-flag-key",
+ )
+
+ @patch("posthog.api.feature_flag.report_user_action")
+ def test_updating_feature_flag_key_does_not_update_insight_with_removed_filter(self, mock_capture):
+ with freeze_time("2021-08-25T22:09:14.252Z") as frozen_datetime:
+ response = self.client.post(
+ f"/api/projects/{self.team.id}/feature_flags/",
+ {"name": "original name", "key": "a-feature-flag-that-is-updated"},
+ format="json",
+ )
+ self.assertEqual(response.status_code, status.HTTP_201_CREATED)
+ flag_id = response.json()["id"]
+
+ frozen_datetime.tick(delta=datetime.timedelta(minutes=10))
+
+ # Assert that the insights were created properly.
+ feature_flag = FeatureFlag.objects.get(id=flag_id)
+ assert feature_flag.usage_dashboard is not None, "Usage dashboard was not created"
+ insights = feature_flag.usage_dashboard.insights
+ total_volume_insight = insights.get(name="Feature Flag Called Total Volume")
+ self.assertEqual(
+ total_volume_insight.description,
+ "Shows the number of total calls made on feature flag with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ total_volume_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-feature-flag-that-is-updated",
+ )
+ unique_users_insight = insights.get(name="Feature Flag calls made by unique users per variant")
+ self.assertEqual(
+ unique_users_insight.description,
+ "Shows the number of unique user calls made on feature flag per variant with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ unique_users_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-feature-flag-that-is-updated",
+ )
+ # clear the values from total_volume_insight.query["source"]["properties"]["values"]
+ total_volume_insight.query["source"]["properties"]["values"] = []
+ total_volume_insight.save()
+
+ # Update the feature flag key
+ response = self.client.patch(
+ f"/api/projects/{self.team.id}/feature_flags/{flag_id}",
+ {
+ "key": "a-new-feature-flag-key",
+ "filters": {
+ "groups": [
+ {
+ "rollout_percentage": 65,
+ "properties": [
+ {
+ "key": "email",
+ "type": "person",
+ "value": "@posthog.com",
+ "operator": "icontains",
+ }
+ ],
+ }
+ ]
+ },
+ },
+ format="json",
+ )
+
+ self.assertEqual(response.status_code, status.HTTP_200_OK)
+
+ # Total volume insight should not be updated because we changed its description
+ # unique users insight should still be updated
+ feature_flag = FeatureFlag.objects.get(id=flag_id)
+ assert feature_flag.usage_dashboard is not None, "Usage dashboard was not created"
+ insights = feature_flag.usage_dashboard.insights
+ total_volume_insight = insights.get(name="Feature Flag Called Total Volume")
+ self.assertEqual(
+ total_volume_insight.description,
+ "Shows the number of total calls made on feature flag with key: a-feature-flag-that-is-updated",
+ )
+ self.assertEqual(
+ total_volume_insight.query["source"]["properties"]["values"],
+ [],
+ )
+ unique_users_insight = insights.get(name="Feature Flag calls made by unique users per variant")
+ self.assertEqual(
+ unique_users_insight.description,
+ "Shows the number of unique user calls made on feature flag per variant with key: a-new-feature-flag-key",
+ )
+ self.assertEqual(
+ unique_users_insight.query["source"]["properties"]["values"][0]["values"][0]["value"],
+ "a-new-feature-flag-key",
+ )
+
def test_hard_deleting_feature_flag_is_forbidden(self):
new_user = User.objects.create_and_join(self.organization, "new_annotations@posthog.com", None)
diff --git a/posthog/api/test/test_property_definition.py b/posthog/api/test/test_property_definition.py
index 378f66d7884a5..081de3b3df1f0 100644
--- a/posthog/api/test/test_property_definition.py
+++ b/posthog/api/test/test_property_definition.py
@@ -4,7 +4,6 @@
from rest_framework import status
-from posthog.api.property_definition import PropertyDefinitionQuerySerializer
from posthog.models import (
EventDefinition,
EventProperty,
@@ -13,6 +12,7 @@
Team,
ActivityLog,
)
+from posthog.taxonomy.property_definition_api import PropertyDefinitionQuerySerializer
from posthog.test.base import APIBaseTest, BaseTest
diff --git a/posthog/batch_exports/service.py b/posthog/batch_exports/service.py
index 7d5cf8342a9af..44a5ffa3aa5c0 100644
--- a/posthog/batch_exports/service.py
+++ b/posthog/batch_exports/service.py
@@ -494,10 +494,7 @@ async def start_backfill_batch_export_workflow(
"backfill-batch-export",
inputs,
id=workflow_id,
- # TODO: Backfills could also run in async queue.
- # But tests expect them not to, so we keep them in sync
- # queue after everything is migrated.
- task_queue=SYNC_BATCH_EXPORTS_TASK_QUEUE,
+ task_queue=BATCH_EXPORTS_TASK_QUEUE,
)
return workflow_id
@@ -648,11 +645,7 @@ def sync_batch_export(batch_export: BatchExport, created: bool):
destination_config_fields = {field.name for field in fields(workflow_inputs)}
destination_config = {k: v for k, v in batch_export.destination.config.items() if k in destination_config_fields}
- task_queue = (
- BATCH_EXPORTS_TASK_QUEUE
- if batch_export.destination.type in ("BigQuery", "Redshift")
- else SYNC_BATCH_EXPORTS_TASK_QUEUE
- )
+ task_queue = SYNC_BATCH_EXPORTS_TASK_QUEUE if batch_export.destination.type == "HTTP" else BATCH_EXPORTS_TASK_QUEUE
temporal = sync_connect()
schedule = Schedule(
diff --git a/posthog/caching/warming.py b/posthog/caching/warming.py
index faf20218d0930..16ee52fc9f1d9 100644
--- a/posthog/caching/warming.py
+++ b/posthog/caching/warming.py
@@ -165,7 +165,11 @@ def schedule_warming_for_teams_task():
capture_ph_event(
str(team.uuid),
"cache warming - insights to cache",
- properties={"count": len(insight_tuples)},
+ properties={
+ "count": len(insight_tuples),
+ "team_id": team.id,
+ "organization_id": team.organization_id,
+ },
)
# We chain the task execution to prevent queries *for a single team* running at the same time
@@ -233,6 +237,8 @@ def warm_insight_cache_task(insight_id: int, dashboard_id: Optional[int]):
"insight_id": insight.pk,
"dashboard_id": dashboard_id,
"is_cached": is_cached,
+ "team_id": insight.team_id,
+ "organization_id": insight.team.organization_id,
},
)
diff --git a/posthog/helpers/dashboard_templates.py b/posthog/helpers/dashboard_templates.py
index 313f8c6722a2a..7cfb390128335 100644
--- a/posthog/helpers/dashboard_templates.py
+++ b/posthog/helpers/dashboard_templates.py
@@ -557,6 +557,10 @@ def create_dashboard_from_template(template_key: str, dashboard: Dashboard) -> N
create_from_template(dashboard, template)
+FEATURE_FLAG_TOTAL_VOLUME_INSIGHT_NAME = "Feature Flag Called Total Volume"
+FEATURE_FLAG_UNIQUE_USERS_INSIGHT_NAME = "Feature Flag calls made by unique users per variant"
+
+
def create_feature_flag_dashboard(feature_flag, dashboard: Dashboard) -> None:
dashboard.filters = {"date_from": "-30d"}
if dashboard.team.organization.is_feature_available(AvailableFeature.TAGGING):
@@ -571,8 +575,8 @@ def create_feature_flag_dashboard(feature_flag, dashboard: Dashboard) -> None:
# 1 row
_create_tile_for_insight(
dashboard,
- name="Feature Flag Called Total Volume",
- description="Shows the number of total calls made on feature flag with key: " + feature_flag.key,
+ name=FEATURE_FLAG_TOTAL_VOLUME_INSIGHT_NAME,
+ description=_get_feature_flag_total_volume_insight_description(feature_flag.key),
query={
"kind": "InsightVizNode",
"source": {
@@ -627,9 +631,8 @@ def create_feature_flag_dashboard(feature_flag, dashboard: Dashboard) -> None:
_create_tile_for_insight(
dashboard,
- name="Feature Flag calls made by unique users per variant",
- description="Shows the number of unique user calls made on feature flag per variant with key: "
- + feature_flag.key,
+ name=FEATURE_FLAG_UNIQUE_USERS_INSIGHT_NAME,
+ description=_get_feature_flag_unique_users_insight_description(feature_flag.key),
query={
"kind": "InsightVizNode",
"source": {
@@ -690,6 +693,64 @@ def create_feature_flag_dashboard(feature_flag, dashboard: Dashboard) -> None:
)
+def _get_feature_flag_total_volume_insight_description(feature_flag_key: str) -> str:
+ return f"Shows the number of total calls made on feature flag with key: {feature_flag_key}"
+
+
+def _get_feature_flag_unique_users_insight_description(feature_flag_key: str) -> str:
+ return f"Shows the number of unique user calls made on feature flag per variant with key: {feature_flag_key}"
+
+
+def update_feature_flag_dashboard(feature_flag, old_key: str) -> None:
+ # We need to update the *system* created insights with the new key, so we search for them by name
+ dashboard = feature_flag.usage_dashboard
+
+ if not dashboard:
+ return
+
+ total_volume_insight = dashboard.insights.filter(name=FEATURE_FLAG_TOTAL_VOLUME_INSIGHT_NAME).first()
+ if total_volume_insight:
+ _update_tile_with_new_key(
+ total_volume_insight,
+ feature_flag.key,
+ old_key,
+ _get_feature_flag_total_volume_insight_description,
+ )
+
+ unique_users_insight = dashboard.insights.filter(name=FEATURE_FLAG_UNIQUE_USERS_INSIGHT_NAME).first()
+ if unique_users_insight:
+ _update_tile_with_new_key(
+ unique_users_insight,
+ feature_flag.key,
+ old_key,
+ _get_feature_flag_unique_users_insight_description,
+ )
+
+
+def _update_tile_with_new_key(insight, new_key: str, old_key: str, descriptionFunction: Callable[[str], str]) -> None:
+ old_description = descriptionFunction(old_key)
+ new_description = descriptionFunction(new_key)
+
+ if insight.description != old_description: # We don't touch insights that have been manually edited
+ return
+
+ if insight.query:
+ property_values = insight.query.get("source", {}).get("properties", {}).get("values", [])
+ if len(property_values) != 1: # Exit if not exactly one property group
+ return
+
+ property_group = property_values[0]
+ values = property_group.get("values", [])
+ # Only proceed if there's exactly one value and it's a feature flag
+ if len(values) == 1 and values[0].get("key") == "$feature_flag" and values[0].get("value") == old_key:
+ values[0]["value"] = new_key
+ insight.query = insight.query # Trigger field update
+ # Only update the insight if it matches what we expect for the system created insights
+ insight.description = new_description
+ insight.save()
+ return
+
+
def add_enriched_insights_to_feature_flag_dashboard(feature_flag, dashboard: Dashboard) -> None:
# 1 row
_create_tile_for_insight(
diff --git a/posthog/hogql_queries/ai/team_taxonomy_query_runner.py b/posthog/hogql_queries/ai/team_taxonomy_query_runner.py
index b93ecb69ff340..e197e4c338566 100644
--- a/posthog/hogql_queries/ai/team_taxonomy_query_runner.py
+++ b/posthog/hogql_queries/ai/team_taxonomy_query_runner.py
@@ -12,7 +12,7 @@
)
try:
- from ee.hogai.taxonomy import CORE_FILTER_DEFINITIONS_BY_GROUP
+ from posthog.taxonomy.taxonomy import CORE_FILTER_DEFINITIONS_BY_GROUP
except ImportError:
CORE_FILTER_DEFINITIONS_BY_GROUP = {}
diff --git a/posthog/hogql_queries/insights/trends/aggregation_operations.py b/posthog/hogql_queries/insights/trends/aggregation_operations.py
index 9e88bfca8b4aa..036c84d8e9786 100644
--- a/posthog/hogql_queries/insights/trends/aggregation_operations.py
+++ b/posthog/hogql_queries/insights/trends/aggregation_operations.py
@@ -73,6 +73,8 @@ def select_aggregation(self) -> ast.Expr:
return self._math_func("max", None)
elif self.series.math == "median":
return self._math_quantile(0.5, None)
+ elif self.series.math == "p75":
+ return self._math_quantile(0.75, None)
elif self.series.math == "p90":
return self._math_quantile(0.9, None)
elif self.series.math == "p95":
@@ -106,6 +108,7 @@ def is_count_per_actor_variant(self):
"min_count_per_actor",
"max_count_per_actor",
"median_count_per_actor",
+ "p75_count_per_actor",
"p90_count_per_actor",
"p95_count_per_actor",
"p99_count_per_actor",
@@ -247,6 +250,8 @@ def _actors_inner_select_query(
math_func = self._math_func("max", ["total"])
elif self.series.math == "median_count_per_actor":
math_func = self._math_quantile(0.5, ["total"])
+ elif self.series.math == "p75_count_per_actor":
+ math_func = self._math_quantile(0.75, ["total"])
elif self.series.math == "p90_count_per_actor":
math_func = self._math_quantile(0.9, ["total"])
elif self.series.math == "p95_count_per_actor":
diff --git a/posthog/hogql_queries/insights/trends/test/test_aggregation_operations.py b/posthog/hogql_queries/insights/trends/test/test_aggregation_operations.py
index ddc4379b37320..fcaf17321a026 100644
--- a/posthog/hogql_queries/insights/trends/test/test_aggregation_operations.py
+++ b/posthog/hogql_queries/insights/trends/test/test_aggregation_operations.py
@@ -32,6 +32,7 @@
[PropertyMathType.MIN, "$browser"],
[PropertyMathType.MAX, "$browser"],
[PropertyMathType.MEDIAN, "$browser"],
+ [PropertyMathType.P75, "$browser"],
[PropertyMathType.P90, "$browser"],
[PropertyMathType.P95, "$browser"],
[PropertyMathType.P99, "$browser"],
@@ -39,6 +40,7 @@
[CountPerActorMathType.MIN_COUNT_PER_ACTOR, None],
[CountPerActorMathType.MAX_COUNT_PER_ACTOR, None],
[CountPerActorMathType.MEDIAN_COUNT_PER_ACTOR, None],
+ [CountPerActorMathType.P75_COUNT_PER_ACTOR, None],
[CountPerActorMathType.P90_COUNT_PER_ACTOR, None],
[CountPerActorMathType.P95_COUNT_PER_ACTOR, None],
[CountPerActorMathType.P99_COUNT_PER_ACTOR, None],
@@ -79,6 +81,7 @@ def test_all_cases_return(
[PropertyMathType.MIN, False],
[PropertyMathType.MAX, False],
[PropertyMathType.MEDIAN, False],
+ [PropertyMathType.P75, False],
[PropertyMathType.P90, False],
[PropertyMathType.P95, False],
[PropertyMathType.P99, False],
@@ -86,6 +89,7 @@ def test_all_cases_return(
[CountPerActorMathType.MIN_COUNT_PER_ACTOR, True],
[CountPerActorMathType.MAX_COUNT_PER_ACTOR, True],
[CountPerActorMathType.MEDIAN_COUNT_PER_ACTOR, True],
+ [CountPerActorMathType.P75_COUNT_PER_ACTOR, True],
[CountPerActorMathType.P90_COUNT_PER_ACTOR, True],
[CountPerActorMathType.P95_COUNT_PER_ACTOR, True],
[CountPerActorMathType.P99_COUNT_PER_ACTOR, True],
diff --git a/posthog/hogql_queries/insights/trends/test/test_trends.py b/posthog/hogql_queries/insights/trends/test/test_trends.py
index 04d4fc97fd312..216376a7d1817 100644
--- a/posthog/hogql_queries/insights/trends/test/test_trends.py
+++ b/posthog/hogql_queries/insights/trends/test/test_trends.py
@@ -4094,6 +4094,10 @@ def test_max_filtering(self):
def test_median_filtering(self):
self._test_math_property_aggregation("median", values=range(101, 201), expected_value=150)
+ @also_test_with_materialized_columns(["some_number"])
+ def test_p75_filtering(self):
+ self._test_math_property_aggregation("p75", values=range(101, 201), expected_value=175)
+
@also_test_with_materialized_columns(["some_number"])
def test_p90_filtering(self):
self._test_math_property_aggregation("p90", values=range(101, 201), expected_value=190)
diff --git a/posthog/hogql_queries/utils/query_compare_to_date_range.py b/posthog/hogql_queries/utils/query_compare_to_date_range.py
index b72503e39d03a..4cabf097b1894 100644
--- a/posthog/hogql_queries/utils/query_compare_to_date_range.py
+++ b/posthog/hogql_queries/utils/query_compare_to_date_range.py
@@ -33,7 +33,12 @@ def dates(self) -> tuple[datetime, datetime]:
current_period_date_from = super().date_from()
current_period_date_to = super().date_to()
- start_date = relative_date_parse(self.compare_to, self._team.timezone_info, now=current_period_date_from)
+ start_date = relative_date_parse(
+ self.compare_to,
+ self._team.timezone_info,
+ now=current_period_date_from,
+ human_friendly_comparison_periods=bool(self._team.human_friendly_comparison_periods),
+ )
return (
start_date,
diff --git a/posthog/hogql_queries/utils/test/test_query_compare_to_date_range.py b/posthog/hogql_queries/utils/test/test_query_compare_to_date_range.py
index 30181887fde8c..2e0c901bbdc0c 100644
--- a/posthog/hogql_queries/utils/test/test_query_compare_to_date_range.py
+++ b/posthog/hogql_queries/utils/test/test_query_compare_to_date_range.py
@@ -41,3 +41,40 @@ def test_feb(self):
)
self.assertEqual(query_date_range.date_from(), parser.isoparse("2021-02-28T00:00:00Z"))
self.assertEqual(query_date_range.date_to(), parser.isoparse("2021-03-02T23:59:59.999999Z"))
+
+ # Same as above but with human friendly comparison periods, should use week instead of month/year
+ def test_minus_one_month_human_friendly(self):
+ self.team.human_friendly_comparison_periods = True
+
+ now = parser.isoparse("2021-08-25T00:00:00.000Z")
+ date_range = DateRange(date_from="-48h")
+ query_date_range = QueryCompareToDateRange(
+ team=self.team,
+ date_range=date_range,
+ interval=IntervalType.DAY,
+ now=now,
+ compare_to="-1m",
+ )
+ self.assertEqual(query_date_range.date_from(), parser.isoparse("2021-07-26T00:00:00Z"))
+ self.assertEqual(query_date_range.date_to(), parser.isoparse("2021-07-28T23:59:59.999999Z"))
+
+ # Human friendly comparison periods guarantee that the end of the week is same day
+ self.assertEqual(query_date_range.date_to().isoweekday(), now.isoweekday())
+
+ def test_minus_one_year_human_friendly(self):
+ self.team.human_friendly_comparison_periods = True
+
+ now = parser.isoparse("2021-08-25T00:00:00.000Z")
+ date_range = DateRange(date_from="-48h")
+ query_date_range = QueryCompareToDateRange(
+ team=self.team,
+ date_range=date_range,
+ interval=IntervalType.DAY,
+ now=now,
+ compare_to="-1y",
+ )
+ self.assertEqual(query_date_range.date_from(), parser.isoparse("2020-08-24T00:00:00Z"))
+ self.assertEqual(query_date_range.date_to(), parser.isoparse("2020-08-26T23:59:59.999999Z"))
+
+ # Human friendly comparison periods guarantee that the end of the week is same day
+ self.assertEqual(query_date_range.date_to().isoweekday(), now.isoweekday())
diff --git a/posthog/management/commands/compare_hogql_insights.py b/posthog/management/commands/compare_hogql_insights.py
index 9a49af107e063..622b93d0794c2 100644
--- a/posthog/management/commands/compare_hogql_insights.py
+++ b/posthog/management/commands/compare_hogql_insights.py
@@ -32,10 +32,10 @@ def handle(self, *args, **options):
# len(insights)
for insight in insights[0:500]:
for event in insight.filters.get("events", []):
- if event.get("math") in ("median", "p90", "p95", "p99"):
+ if event.get("math") in ("median", "p75", "p90", "p95", "p99"):
event["math"] = "sum"
for event in insight.filters.get("actions", []):
- if event.get("math") in ("median", "p90", "p95", "p99"):
+ if event.get("math") in ("median", "p75", "p90", "p95", "p99"):
event["math"] = "sum"
try:
print( # noqa: T201
diff --git a/posthog/migrations/0540_team_human_friendly_comparison_periods.py b/posthog/migrations/0540_team_human_friendly_comparison_periods.py
new file mode 100644
index 0000000000000..f5cd9ded5632a
--- /dev/null
+++ b/posthog/migrations/0540_team_human_friendly_comparison_periods.py
@@ -0,0 +1,17 @@
+# Generated by Django 4.2.15 on 2024-12-27 19:22
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+ dependencies = [
+ ("posthog", "0539_user_role_at_organization"),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name="team",
+ name="human_friendly_comparison_periods",
+ field=models.BooleanField(default=False, null=True, blank=True),
+ ),
+ ]
diff --git a/posthog/migrations/max_migration.txt b/posthog/migrations/max_migration.txt
index b182af0cabf6e..602ce56966064 100644
--- a/posthog/migrations/max_migration.txt
+++ b/posthog/migrations/max_migration.txt
@@ -1 +1 @@
-0539_user_role_at_organization
+0540_team_human_friendly_comparison_periods
diff --git a/posthog/models/entity/entity.py b/posthog/models/entity/entity.py
index 29cb5d4212d63..0c2cc7e284bdf 100644
--- a/posthog/models/entity/entity.py
+++ b/posthog/models/entity/entity.py
@@ -20,24 +20,26 @@
"monthly_active",
"unique_group",
"unique_session",
- # TODO: When we are finally on Python 3.11+, inline the below as *PROPERTY_MATH_FUNCTIONS.keys()
+ "hogql",
+ # Equivalent to *PROPERTY_MATH_FUNCTIONS.keys(),
"sum",
"min",
"max",
"avg",
"median",
+ "p75",
"p90",
"p95",
"p99",
- # TODO: When we are finally on Python 3.11+, inline the below as *COUNT_PER_ACTOR_MATH_FUNCTIONS.keys()
+ # Equivalent to *COUNT_PER_ACTOR_MATH_FUNCTIONS.keys()
"min_count_per_actor",
"max_count_per_actor",
"avg_count_per_actor",
"median_count_per_actor",
+ "p75_count_per_actor",
"p90_count_per_actor",
"p95_count_per_actor",
"p99_count_per_actor",
- "hogql",
]
diff --git a/posthog/models/filters/test/__snapshots__/test_filter.ambr b/posthog/models/filters/test/__snapshots__/test_filter.ambr
index e9601ed37a646..9d9ed00a04c0b 100644
--- a/posthog/models/filters/test/__snapshots__/test_filter.ambr
+++ b/posthog/models/filters/test/__snapshots__/test_filter.ambr
@@ -50,6 +50,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -122,6 +123,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -194,6 +196,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -266,6 +269,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -338,6 +342,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/models/team/team.py b/posthog/models/team/team.py
index 5cf0bd55005d7..87a23c725cbbd 100644
--- a/posthog/models/team/team.py
+++ b/posthog/models/team/team.py
@@ -282,6 +282,7 @@ class Meta:
person_display_name_properties: ArrayField = ArrayField(models.CharField(max_length=400), null=True, blank=True)
live_events_columns: ArrayField = ArrayField(models.TextField(), null=True, blank=True)
recording_domains: ArrayField = ArrayField(models.CharField(max_length=200, null=True), blank=True, null=True)
+ human_friendly_comparison_periods = models.BooleanField(default=False, null=True, blank=True)
cookieless_server_hash_mode = models.SmallIntegerField(
default=CookielessServerHashMode.DISABLED, choices=CookielessServerHashMode.choices, null=True
)
diff --git a/posthog/queries/test/test_trends.py b/posthog/queries/test/test_trends.py
index 2a03636afbd23..bf2f2f0f035d9 100644
--- a/posthog/queries/test/test_trends.py
+++ b/posthog/queries/test/test_trends.py
@@ -3611,6 +3611,10 @@ def test_max_filtering(self):
def test_median_filtering(self):
self._test_math_property_aggregation("median", values=range(101, 201), expected_value=150)
+ @also_test_with_materialized_columns(["some_number"])
+ def test_p75_filtering(self):
+ self._test_math_property_aggregation("p75", values=range(101, 201), expected_value=175)
+
@also_test_with_materialized_columns(["some_number"])
def test_p90_filtering(self):
self._test_math_property_aggregation("p90", values=range(101, 201), expected_value=190)
diff --git a/posthog/queries/trends/util.py b/posthog/queries/trends/util.py
index 09f34ff135633..026e14c5e5748 100644
--- a/posthog/queries/trends/util.py
+++ b/posthog/queries/trends/util.py
@@ -33,6 +33,7 @@
"min": "min",
"max": "max",
"median": "quantile(0.50)",
+ "p75": "quantile(0.75)",
"p90": "quantile(0.90)",
"p95": "quantile(0.95)",
"p99": "quantile(0.99)",
@@ -43,6 +44,7 @@
"min_count_per_actor": "min",
"max_count_per_actor": "max",
"median_count_per_actor": "quantile(0.50)",
+ "p75_count_per_actor": "quantile(0.75)",
"p90_count_per_actor": "quantile(0.90)",
"p95_count_per_actor": "quantile(0.95)",
"p99_count_per_actor": "quantile(0.99)",
diff --git a/posthog/schema.py b/posthog/schema.py
index 9df93c460b9b5..c76a742d444a3 100644
--- a/posthog/schema.py
+++ b/posthog/schema.py
@@ -543,6 +543,7 @@ class CountPerActorMathType(StrEnum):
MIN_COUNT_PER_ACTOR = "min_count_per_actor"
MAX_COUNT_PER_ACTOR = "max_count_per_actor"
MEDIAN_COUNT_PER_ACTOR = "median_count_per_actor"
+ P75_COUNT_PER_ACTOR = "p75_count_per_actor"
P90_COUNT_PER_ACTOR = "p90_count_per_actor"
P95_COUNT_PER_ACTOR = "p95_count_per_actor"
P99_COUNT_PER_ACTOR = "p99_count_per_actor"
@@ -1290,6 +1291,7 @@ class PropertyMathType(StrEnum):
MIN = "min"
MAX = "max"
MEDIAN = "median"
+ P75 = "p75"
P90 = "p90"
P95 = "p95"
P99 = "p99"
diff --git a/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr b/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr
index 43351b8340128..e62c4b66a763c 100644
--- a/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr
+++ b/posthog/session_recordings/test/__snapshots__/test_session_recordings.ambr
@@ -50,6 +50,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -122,6 +123,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -194,6 +196,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -266,6 +269,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -338,6 +342,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -519,6 +524,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -618,6 +624,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1030,6 +1037,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1182,6 +1190,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1254,6 +1263,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1326,6 +1336,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1398,6 +1409,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1470,6 +1482,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1542,6 +1555,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1647,6 +1661,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2206,6 +2221,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2349,6 +2365,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2970,6 +2987,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3113,6 +3131,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3486,6 +3505,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3722,6 +3742,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -3865,6 +3886,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4434,6 +4456,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4577,6 +4600,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -4975,6 +4999,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5208,6 +5233,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -5351,6 +5377,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6418,6 +6445,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -6561,6 +6589,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7138,6 +7167,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7281,6 +7311,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7836,6 +7867,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -7979,6 +8011,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8596,6 +8629,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -8739,6 +8773,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/settings/temporal.py b/posthog/settings/temporal.py
index 28fc449404ad8..2010a15e8ecab 100644
--- a/posthog/settings/temporal.py
+++ b/posthog/settings/temporal.py
@@ -19,19 +19,27 @@
BATCH_EXPORT_S3_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES: int = get_from_env(
"BATCH_EXPORT_S3_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES", 0, type_cast=int
)
+
BATCH_EXPORT_SNOWFLAKE_UPLOAD_CHUNK_SIZE_BYTES: int = 1024 * 1024 * 100 # 100MB
BATCH_EXPORT_SNOWFLAKE_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES: int = get_from_env(
"BATCH_EXPORT_SNOWFLAKE_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES", 1024 * 1024 * 300, type_cast=int
)
+
BATCH_EXPORT_POSTGRES_UPLOAD_CHUNK_SIZE_BYTES: int = 1024 * 1024 * 50 # 50MB
+BATCH_EXPORT_POSTGRES_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES: int = get_from_env(
+ "BATCH_EXPORT_POSTGRES_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES", 1024 * 1024 * 300, type_cast=int
+)
+
BATCH_EXPORT_BIGQUERY_UPLOAD_CHUNK_SIZE_BYTES: int = 1024 * 1024 * 100 # 100MB
BATCH_EXPORT_BIGQUERY_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES: int = get_from_env(
"BATCH_EXPORT_BIGQUERY_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES", 0, type_cast=int
)
+
BATCH_EXPORT_REDSHIFT_UPLOAD_CHUNK_SIZE_BYTES: int = 1024 * 1024 * 8 # 8MB
BATCH_EXPORT_REDSHIFT_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES: int = get_from_env(
"BATCH_EXPORT_REDSHIFT_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES", 1024 * 1024 * 300, type_cast=int
)
+
BATCH_EXPORT_HTTP_UPLOAD_CHUNK_SIZE_BYTES: int = 1024 * 1024 * 50 # 50MB
BATCH_EXPORT_HTTP_BATCH_SIZE: int = 5000
BATCH_EXPORT_BUFFER_QUEUE_MAX_SIZE_BYTES: int = 1024 * 1024 * 300 # 300MB
diff --git a/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr b/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr
index 0a4f22903ccf0..747270b5478c2 100644
--- a/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr
+++ b/posthog/tasks/test/__snapshots__/test_process_scheduled_changes.ambr
@@ -73,6 +73,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -195,6 +196,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -272,6 +274,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -474,6 +477,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -615,6 +619,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -766,6 +771,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -843,6 +849,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1008,6 +1015,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1117,6 +1125,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1246,6 +1255,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1600,6 +1610,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/taxonomy/__init__.py b/posthog/taxonomy/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/posthog/api/property_definition.py b/posthog/taxonomy/property_definition_api.py
similarity index 92%
rename from posthog/api/property_definition.py
rename to posthog/taxonomy/property_definition_api.py
index 84b7a03f030bd..ed957af441245 100644
--- a/posthog/api/property_definition.py
+++ b/posthog/taxonomy/property_definition_api.py
@@ -1,6 +1,6 @@
import dataclasses
import json
-from typing import Any, Optional, cast
+from typing import Any, Optional, cast, Self
from django.db import connection
from loginas.utils import is_impersonated_session
@@ -19,6 +19,7 @@
from posthog.models import EventProperty, PropertyDefinition, User
from posthog.models.activity_logging.activity_log import Detail, log_activity
from posthog.models.utils import UUIDT
+from posthog.taxonomy.taxonomy import PROPERTY_NAME_ALIASES
class SeenTogetherQuerySerializer(serializers.Serializer):
@@ -126,7 +127,7 @@ class QueryContext:
params: dict = dataclasses.field(default_factory=dict)
- def with_properties_to_filter(self, properties_to_filter: Optional[str]) -> "QueryContext":
+ def with_properties_to_filter(self, properties_to_filter: Optional[str]) -> Self:
if properties_to_filter:
return dataclasses.replace(
self,
@@ -136,7 +137,7 @@ def with_properties_to_filter(self, properties_to_filter: Optional[str]) -> "Que
else:
return self
- def with_is_numerical_flag(self, is_numerical: Optional[str]) -> "QueryContext":
+ def with_is_numerical_flag(self, is_numerical: Optional[str]) -> Self:
if is_numerical:
return dataclasses.replace(
self,
@@ -145,7 +146,7 @@ def with_is_numerical_flag(self, is_numerical: Optional[str]) -> "QueryContext":
else:
return self
- def with_feature_flags(self, is_feature_flag: Optional[bool]) -> "QueryContext":
+ def with_feature_flags(self, is_feature_flag: Optional[bool]) -> Self:
if is_feature_flag is None:
return self
elif is_feature_flag:
@@ -202,9 +203,7 @@ def with_type_filter(self, type: str, group_type_index: Optional[int]):
},
)
- def with_event_property_filter(
- self, event_names: Optional[str], filter_by_event_names: Optional[bool]
- ) -> "QueryContext":
+ def with_event_property_filter(self, event_names: Optional[str], filter_by_event_names: Optional[bool]) -> Self:
event_property_filter = ""
event_name_filter = ""
event_property_field = "NULL"
@@ -228,14 +227,14 @@ def with_event_property_filter(
params={**self.params, "event_names": list(map(str, event_names or []))},
)
- def with_search(self, search_query: str, search_kwargs: dict) -> "QueryContext":
+ def with_search(self, search_query: str, search_kwargs: dict) -> Self:
return dataclasses.replace(
self,
search_query=search_query,
params={**self.params, "team_id": self.team_id, **search_kwargs},
)
- def with_excluded_properties(self, excluded_properties: Optional[str], type: str) -> "QueryContext":
+ def with_excluded_properties(self, excluded_properties: Optional[str], type: str) -> Self:
if excluded_properties:
excluded_properties = json.loads(excluded_properties)
@@ -305,45 +304,6 @@ def _join_on_event_property(self):
)
-# See frontend/src/lib/taxonomy.tsx for where this came from and see
-# frontend/scripts/print_property_name_aliases.ts for how to regenerate
-PROPERTY_NAME_ALIASES = {
- "$autocapture_disabled_server_side": "Autocapture Disabled Server-Side",
- "$client_session_initial_referring_host": "Referrer Host",
- "$client_session_initial_utm_content": "Initial UTM Source",
- "$client_session_initial_utm_term": "Initial UTM Source",
- "$console_log_recording_enabled_server_side": "Console Log Recording Enabled Server-Side",
- "$el_text": "Element Text",
- "$exception_colno": "Exception source column number",
- "$exception_handled": "Exception was handled",
- "$exception_lineno": "Exception source line number",
- "$geoip_disable": "GeoIP Disabled",
- "$geoip_time_zone": "Timezone",
- "$group_0": "Group 1",
- "$group_1": "Group 2",
- "$group_2": "Group 3",
- "$group_3": "Group 4",
- "$group_4": "Group 5",
- "$ip": "IP Address",
- "$lib": "Library",
- "$lib_custom_api_host": "Library Custom API Host",
- "$lib_version": "Library Version",
- "$lib_version__major": "Library Version (Major)",
- "$lib_version__minor": "Library Version (Minor)",
- "$lib_version__patch": "Library Version (Patch)",
- "$performance_raw": "Browser Performance",
- "$referrer": "Referrer URL",
- "$selected_content": "Copied content",
- "$session_recording_recorder_version_server_side": "Session Recording Recorder Version Server-Side",
- "$user_agent": "Raw User Agent",
- "build": "App Build",
- "previous_build": "App Previous Build",
- "previous_version": "App Previous Version",
- "referring_application": "Referrer Application",
- "version": "App Version",
-}
-
-
def add_name_alias_to_search_query(search_term: str):
if not search_term:
return ""
@@ -504,6 +464,7 @@ def dangerously_get_queryset(self):
order_by_verified = False
if use_enterprise_taxonomy:
try:
+ # noinspection PyUnresolvedReferences
from ee.models.property_definition import EnterprisePropertyDefinition
# Prevent fetching deprecated `tags` field. Tags are separately fetched in TaggedItemSerializerMixin
@@ -591,6 +552,7 @@ def safely_get_object(self, queryset):
id = self.kwargs["id"]
if self.request.user.organization.is_feature_available(AvailableFeature.INGESTION_TAXONOMY):
try:
+ # noinspection PyUnresolvedReferences
from ee.models.property_definition import EnterprisePropertyDefinition
except ImportError:
pass
diff --git a/ee/hogai/taxonomy.py b/posthog/taxonomy/taxonomy.py
similarity index 77%
rename from ee/hogai/taxonomy.py
rename to posthog/taxonomy/taxonomy.py
index 5b78eb38d5f12..9b7213e030130 100644
--- a/ee/hogai/taxonomy.py
+++ b/posthog/taxonomy/taxonomy.py
@@ -74,8 +74,10 @@ class CoreFilterDefinition(TypedDict):
"rdt_cid",
}
+# synced with frontend/src/lib/taxonomy.tsx
CORE_FILTER_DEFINITIONS_BY_GROUP: dict[str, dict[str, CoreFilterDefinition]] = {
"events": {
+ # in front end this key is the empty string
"All Events": {
"label": "All events",
"description": "This is a wildcard that matches all events.",
@@ -95,6 +97,11 @@ class CoreFilterDefinition(TypedDict):
"examples": ["clicked button"],
"ignored_in_assistant": True, # Autocapture is only useful with autocapture-specific filters, which the LLM isn't adept at yet
},
+ "$$heatmap": {
+ "label": "Heatmap",
+ "description": "Heatmap events carry heatmap data to the backend, they do not contribute to event counts.",
+ "ignored_in_assistant": True, # Heatmap events are not useful for LLM
+ },
"$copy_autocapture": {
"label": "Clipboard autocapture",
"description": "Selected text automatically captured when a user copies or cuts.",
@@ -225,53 +232,90 @@ class CoreFilterDefinition(TypedDict):
"description": "The current distinct ID of the user",
"examples": ["16ff262c4301e5-0aa346c03894bc-39667c0e-1aeaa0-16ff262c431767"],
},
+ "timestamp": {
+ "label": "Timestamp",
+ "description": "Time the event happened.",
+ "examples": ["2023-05-20T15:30:00Z"],
+ "system": True,
+ "ignored_in_assistant": True, # Timestamp is not a filterable property
+ },
+ "event": {
+ "label": "Event",
+ "description": "The name of the event.",
+ "examples": ["$pageview"],
+ "system": True,
+ "ignored_in_assistant": True,
+ },
},
"event_properties": {
+ # do we need distinct_id and $session_duration here in the back end?
"$copy_type": {
"label": "Copy Type",
"description": "Type of copy event.",
"examples": ["copy", "cut"],
+ "ignored_in_assistant": True,
},
"$selected_content": {
"label": "Copied content",
"description": "The content that was selected when the user copied or cut.",
+ "ignored_in_assistant": True,
},
"$set": {
"label": "Set",
"description": "Person properties to be set",
+ "ignored_in_assistant": True,
},
"$set_once": {
"label": "Set Once",
"description": "Person properties to be set if not set already (i.e. first-touch)",
+ "ignored_in_assistant": True,
},
"$pageview_id": {
"label": "Pageview ID",
"description": "PostHog's internal ID for matching events to a pageview.",
"system": True,
+ "ignored_in_assistant": True,
},
"$autocapture_disabled_server_side": {
"label": "Autocapture Disabled Server-Side",
"description": "If autocapture has been disabled server-side.",
"system": True,
+ "ignored_in_assistant": True,
},
"$console_log_recording_enabled_server_side": {
"label": "Console Log Recording Enabled Server-Side",
"description": "If console log recording has been enabled server-side.",
"system": True,
+ "ignored_in_assistant": True,
},
"$session_recording_recorder_version_server_side": {
"label": "Session Recording Recorder Version Server-Side",
"description": "The version of the session recording recorder that is enabled server-side.",
"examples": ["v2"],
"system": True,
+ "ignored_in_assistant": True,
},
"$feature_flag_payloads": {
"label": "Feature Flag Payloads",
"description": "Feature flag payloads active in the environment.",
+ "ignored_in_assistant": True,
},
"$capture_failed_request": {
"label": "Capture Failed Request",
"description": "",
+ "ignored_in_assistant": True,
+ },
+ "$lib_rate_limit_remaining_tokens": {
+ "label": "Clientside rate limit remaining tokens",
+ "description": "Remaining rate limit tokens for the posthog-js library client-side rate limiting implementation.",
+ "examples": ["100"],
+ "ignored_in_assistant": True,
+ },
+ "token": {
+ "label": "Token",
+ "description": "Token used for authentication.",
+ "examples": ["ph_abcdefg"],
+ "ignored_in_assistant": True,
},
"$sentry_exception": {
"label": "Sentry exception",
@@ -301,6 +345,11 @@ class CoreFilterDefinition(TypedDict):
"label": "Exception source",
"description": "The source of the exception. E.g. JS file.",
},
+ "$exception_list": {
+ "label": "Exception list",
+ "description": "List of one or more associated exceptions",
+ "system": True,
+ },
"$exception_lineno": {
"label": "Exception source line number",
"description": "Which line in the exception source that caused the exception.",
@@ -356,12 +405,79 @@ class CoreFilterDefinition(TypedDict):
"system": True,
"examples": ["1681211521.345"],
},
+ "$browser_type": {
+ "label": "Browser Type",
+ "description": "This is only added when posthog-js config.opt_out_useragent_filter is true.",
+ "examples": ["browser", "bot"],
+ },
"$device_id": {
"label": "Device ID",
"description": "Unique ID for that device, consistent even if users are logging in/out.",
"examples": ["16ff262c4301e5-0aa346c03894bc-39667c0e-1aeaa0-16ff262c431767"],
"system": True,
},
+ "$replay_minimum_duration": {
+ "label": "Replay config - minimum duration",
+ "description": "Config for minimum duration before emitting a session recording.",
+ "examples": ["1000"],
+ "system": True,
+ },
+ "$replay_sample_rate": {
+ "label": "Replay config - sample rate",
+ "description": "Config for sampling rate of session recordings.",
+ "examples": ["0.1"],
+ "system": True,
+ },
+ "$session_recording_start_reason": {
+ "label": "Session recording start reason",
+ "description": "Reason for starting the session recording. Useful for e.g. if you have sampling enabled and want to see on batch exported events which sessions have recordings available.",
+ "examples": ["sampling_override", "recording_initialized", "linked_flag_match"],
+ "system": True,
+ },
+ "$session_recording_canvas_recording": {
+ "label": "Session recording canvas recording",
+ "description": "Session recording canvas capture config.",
+ "examples": ['{"enabled": false}'],
+ "system": True,
+ },
+ "$session_recording_network_payload_capture": {
+ "label": "Session recording network payload capture",
+ "description": "Session recording network payload capture config.",
+ "examples": ['{"recordHeaders": false}'],
+ "system": True,
+ },
+ "$configured_session_timeout_ms": {
+ "label": "Configured session timeout",
+ "description": "Configured session timeout in milliseconds.",
+ "examples": ["1800000"],
+ "system": True,
+ },
+ "$replay_script_config": {
+ "label": "Replay script config",
+ "description": "Sets an alternative recorder script for the web sdk.",
+ "examples": ['{"script": "recorder-next""}'],
+ "system": True,
+ },
+ "$session_recording_url_trigger_activated_session": {
+ "label": "Session recording URL trigger activated session",
+ "description": "Session recording URL trigger activated session config. Used by posthog-js to track URL activation of session replay.",
+ "system": True,
+ },
+ "$session_recording_url_trigger_status": {
+ "label": "Session recording URL trigger status",
+ "description": "Session recording URL trigger status. Used by posthog-js to track URL activation of session replay.",
+ "system": True,
+ },
+ "$recording_status": {
+ "label": "Session recording status",
+ "description": "The status of session recording at the time the event was captured",
+ "system": True,
+ },
+ "$cymbal_errors": {
+ "label": "Exception processing errors",
+ "description": "Errors encountered while trying to process exceptions",
+ "system": True,
+ },
"$geoip_city_name": {
"label": "City Name",
"description": "Name of the city matched to this event's IP address.",
@@ -619,9 +735,10 @@ class CoreFilterDefinition(TypedDict):
"system": True,
},
"$timestamp": {
- "label": "Timestamp",
- "description": "Time the event happened.",
+ "label": "Timestamp (deprecated)",
+ "description": "Use the HogQL field `timestamp` instead. This field was previously set on some client side events.",
"examples": ["2023-05-20T15:30:00Z"],
+ "system": True,
},
"$sent_at": {
"label": "Sent At",
@@ -977,6 +1094,11 @@ class CoreFilterDefinition(TypedDict):
"label": "Is Identified",
"description": "When the person was identified",
},
+ "$initial_person_info": {
+ "label": "Initial Person Info",
+ "description": "posthog-js initial person information. used in the $set_once flow",
+ "system": True,
+ },
"$web_vitals_enabled_server_side": {
"label": "Web vitals enabled server side",
"description": "Whether web vitals was enabled in remote config",
@@ -1005,6 +1127,125 @@ class CoreFilterDefinition(TypedDict):
"$web_vitals_CLS_value": {
"label": "Web vitals CLS value",
},
+ "$web_vitals_allowed_metrics": {
+ "label": "Web vitals allowed metrics",
+ "description": "Allowed web vitals metrics config.",
+ "examples": ['["LCP", "CLS"]'],
+ "system": True,
+ },
+ "$prev_pageview_last_scroll": {
+ "label": "Previous pageview last scroll",
+ "description": "posthog-js adds these to the page leave event, they are used in web analytics calculations",
+ "examples": [0],
+ },
+ "$prev_pageview_last_scroll_percentage": {
+ "label": "Previous pageview last scroll percentage",
+ "description": "posthog-js adds these to the page leave event, they are used in web analytics calculations",
+ "examples": [0],
+ },
+ "$prev_pageview_max_scroll": {
+ "examples": [0],
+ "label": "Previous pageview max scroll",
+ "description": "posthog-js adds these to the page leave event, they are used in web analytics calculations",
+ },
+ "$prev_pageview_max_scroll_percentage": {
+ "examples": [0],
+ "label": "Previous pageview max scroll percentage",
+ "description": "posthog-js adds these to the page leave event, they are used in web analytics calculations",
+ },
+ "$prev_pageview_last_content": {
+ "examples": [0],
+ "label": "Previous pageview last content",
+ "description": "posthog-js adds these to the page leave event, they are used in web analytics calculations",
+ },
+ "$prev_pageview_last_content_percentage": {
+ "examples": [0],
+ "description": "posthog-js adds these to the page leave event, they are used in web analytics calculations",
+ "label": "Previous pageview last content percentage",
+ },
+ "$prev_pageview_max_content": {
+ "examples": [0],
+ "description": "posthog-js adds these to the page leave event, they are used in web analytics calculations",
+ "label": "Previous pageview max content",
+ },
+ "$prev_pageview_max_content_percentage": {
+ "examples": [0],
+ "description": "posthog-js adds these to the page leave event, they are used in web analytics calculations",
+ "label": "Previous pageview max content percentage",
+ },
+ "$prev_pageview_pathname": {
+ "examples": ["/pricing", "/about-us/team"],
+ "description": "posthog-js adds these to the page leave event, they are used in web analytics calculations",
+ "label": "Previous pageview pathname",
+ },
+ "$prev_pageview_duration": {
+ "examples": [0],
+ "description": "posthog-js adds these to the page leave event, they are used in web analytics calculations",
+ "label": "Previous pageview duration",
+ },
+ "$surveys_activated": {
+ "label": "Surveys Activated",
+ "description": "The surveys that were activated for this event.",
+ },
+ "$process_person_profile": {
+ "label": "Person Profile processing flag",
+ "description": "The setting from an SDK to control whether an event has person processing enabled",
+ "system": True,
+ },
+ "$dead_clicks_enabled_server_side": {
+ "label": "Dead clicks enabled server side",
+ "description": "Whether dead clicks were enabled in remote config",
+ "system": True,
+ },
+ "$dead_click_scroll_delay_ms": {
+ "label": "Dead click scroll delay in milliseconds",
+ "description": "The delay between a click and the next scroll event",
+ "system": True,
+ },
+ "$dead_click_mutation_delay_ms": {
+ "label": "Dead click mutation delay in milliseconds",
+ "description": "The delay between a click and the next mutation event",
+ "system": True,
+ },
+ "$dead_click_absolute_delay_ms": {
+ "label": "Dead click absolute delay in milliseconds",
+ "description": "The delay between a click and having seen no activity at all",
+ "system": True,
+ },
+ "$dead_click_selection_changed_delay_ms": {
+ "label": "Dead click selection changed delay in milliseconds",
+ "description": "The delay between a click and the next text selection change event",
+ "system": True,
+ },
+ "$dead_click_last_mutation_timestamp": {
+ "label": "Dead click last mutation timestamp",
+ "description": "debug signal time of the last mutation seen by dead click autocapture",
+ "system": True,
+ },
+ "$dead_click_event_timestamp": {
+ "label": "Dead click event timestamp",
+ "description": "debug signal time of the event that triggered dead click autocapture",
+ "system": True,
+ },
+ "$dead_click_scroll_timeout": {
+ "label": "Dead click scroll timeout",
+ "description": "whether the dead click autocapture passed the threshold for waiting for a scroll event",
+ },
+ "$dead_click_mutation_timeout": {
+ "label": "Dead click mutation timeout",
+ "description": "whether the dead click autocapture passed the threshold for waiting for a mutation event",
+ "system": True,
+ },
+ "$dead_click_absolute_timeout": {
+ "label": "Dead click absolute timeout",
+ "description": "whether the dead click autocapture passed the threshold for waiting for any activity",
+ "system": True,
+ },
+ "$dead_click_selection_changed_timeout": {
+ "label": "Dead click selection changed timeout",
+ "description": "whether the dead click autocapture passed the threshold for waiting for a text selection change event",
+ "system": True,
+ },
},
"numerical_event_properties": {},
"person_properties": {},
@@ -1093,6 +1334,16 @@ class CoreFilterDefinition(TypedDict):
"examples": ["true", "false"],
"type": "Boolean",
},
+ "$last_external_click_url": {
+ "label": "Last external click URL",
+ "description": "The last external URL clicked in this session.",
+ "examples": ["https://example.com/interesting-article?parameter=true"],
+ },
+ "$vitals_lcp": {
+ "label": "Web vitals LCP",
+ "description": "The time it took for the Largest Contentful Paint on the page. This captures the perceived load time of the page, and measure how long it took for the main content of the page to be visible to users.",
+ "examples": ["2.2"],
+ },
},
"groups": {
"$group_key": {
@@ -1119,6 +1370,29 @@ class CoreFilterDefinition(TypedDict):
"label": "Visited page",
"description": "URL a user visited during their session",
},
+ "click_count": {
+ "label": "Clicks",
+ "description": "Number of clicks during the session",
+ },
+ "keypress_count": {
+ "label": "Key presses",
+ "description": "Number of key presses during the session",
+ },
+ "console_error_count": {
+ "label": "Errors",
+ "description": "Number of console errors during the session",
+ },
+ },
+ "log_entries": {
+ "level": {
+ "label": "Console log level",
+ "description": "Level of the ",
+ "examples": ["info", "warn", "error"],
+ },
+ "message": {
+ "label": "Console log message",
+ "description": "The contents of the log message",
+ },
},
}
@@ -1156,3 +1430,9 @@ class CoreFilterDefinition(TypedDict):
else "Data from the first event in this session."
),
}
+
+PROPERTY_NAME_ALIASES = {
+ key: value["label"]
+ for key, value in CORE_FILTER_DEFINITIONS_BY_GROUP["event_properties"].items()
+ if "label" in value and "deprecated" not in value["label"]
+}
diff --git a/posthog/taxonomy/test/test_event_properties_taxonomy.py b/posthog/taxonomy/test/test_event_properties_taxonomy.py
new file mode 100644
index 0000000000000..37c732a070fc8
--- /dev/null
+++ b/posthog/taxonomy/test/test_event_properties_taxonomy.py
@@ -0,0 +1,29 @@
+from posthog.taxonomy.taxonomy import (
+ CORE_FILTER_DEFINITIONS_BY_GROUP,
+ CAMPAIGN_PROPERTIES,
+ SESSION_INITIAL_PROPERTIES_ADAPTED_FROM_EVENTS,
+)
+
+
+def test_event_properties_includes_campaign_properties() -> None:
+ keys = CORE_FILTER_DEFINITIONS_BY_GROUP["event_properties"].keys()
+ for campaign_param in CAMPAIGN_PROPERTIES:
+ assert campaign_param in keys
+
+
+def test_initial_person_properties_set_up_correctly() -> None:
+ assert (
+ CORE_FILTER_DEFINITIONS_BY_GROUP["person_properties"]["$initial_referring_domain"]["label"]
+ == "Initial Referring Domain"
+ )
+
+
+def test_should_have_a_session_referring_domain_property() -> None:
+ prop = CORE_FILTER_DEFINITIONS_BY_GROUP["session_properties"]["$entry_referring_domain"]
+ assert prop["label"] == "Entry Referring Domain"
+
+
+def test_should_have_every_property_in_session_adopted_from_person() -> None:
+ session_props = CORE_FILTER_DEFINITIONS_BY_GROUP["session_properties"].keys()
+ for prop in SESSION_INITIAL_PROPERTIES_ADAPTED_FROM_EVENTS:
+ assert f"$entry_{prop.replace('$', '')}" in session_props
diff --git a/posthog/temporal/batch_exports/bigquery_batch_export.py b/posthog/temporal/batch_exports/bigquery_batch_export.py
index 5aa3965b5a8bd..91b0885c932da 100644
--- a/posthog/temporal/batch_exports/bigquery_batch_export.py
+++ b/posthog/temporal/batch_exports/bigquery_batch_export.py
@@ -563,6 +563,7 @@ async def flush(
self.rows_exported_counter.add(records_since_last_flush)
self.bytes_exported_counter.add(bytes_since_last_flush)
+ self.heartbeat_details.records_completed += records_since_last_flush
self.heartbeat_details.track_done_range(last_date_range, self.data_interval_start)
@@ -639,7 +640,7 @@ async def insert_into_bigquery_activity(inputs: BigQueryInsertInputs) -> Records
record_batch_schema = await wait_for_schema_or_producer(queue, producer_task)
if record_batch_schema is None:
- return 0
+ return details.records_completed
record_batch_schema = pa.schema(
# NOTE: For some reason, some batches set non-nullable fields as non-nullable, whereas other
@@ -716,7 +717,7 @@ async def insert_into_bigquery_activity(inputs: BigQueryInsertInputs) -> Records
bigquery_table=bigquery_stage_table if can_perform_merge else bigquery_table,
table_schema=stage_schema if can_perform_merge else schema,
)
- records_completed = await run_consumer(
+ await run_consumer(
consumer=consumer,
queue=queue,
producer_task=producer_task,
@@ -740,7 +741,7 @@ async def insert_into_bigquery_activity(inputs: BigQueryInsertInputs) -> Records
stage_fields_cast_to_json=json_columns,
)
- return records_completed
+ return details.records_completed
@workflow.defn(name="bigquery-export", failure_exception_types=[workflow.NondeterminismError])
diff --git a/posthog/temporal/batch_exports/heartbeat.py b/posthog/temporal/batch_exports/heartbeat.py
index fdd21d0613eee..a873507be2be9 100644
--- a/posthog/temporal/batch_exports/heartbeat.py
+++ b/posthog/temporal/batch_exports/heartbeat.py
@@ -1,14 +1,14 @@
-import typing
-import datetime as dt
import collections.abc
import dataclasses
+import datetime as dt
+import typing
import structlog
from posthog.temporal.common.heartbeat import (
+ EmptyHeartbeatError,
HeartbeatDetails,
HeartbeatParseError,
- EmptyHeartbeatError,
NotEnoughHeartbeatValuesError,
)
@@ -27,6 +27,7 @@ class BatchExportRangeHeartbeatDetails(HeartbeatDetails):
"""
done_ranges: list[DateRange] = dataclasses.field(default_factory=list)
+ records_completed: int = 0
_remaining: collections.abc.Sequence[typing.Any] = dataclasses.field(default_factory=tuple)
@classmethod
@@ -37,10 +38,11 @@ def deserialize_details(cls, details: collections.abc.Sequence[typing.Any]) -> d
values. Moreover, we expect datetime values to be ISO-formatted strings.
"""
done_ranges: list[DateRange] = []
+ records_completed = 0
remaining = super().deserialize_details(details)
if len(remaining["_remaining"]) == 0:
- return {"done_ranges": done_ranges, **remaining}
+ return {"done_ranges": done_ranges, "records_completed": records_completed, **remaining}
first_detail = remaining["_remaining"][0]
remaining["_remaining"] = remaining["_remaining"][1:]
@@ -57,7 +59,18 @@ def deserialize_details(cls, details: collections.abc.Sequence[typing.Any]) -> d
done_ranges.append(datetime_bounds)
- return {"done_ranges": done_ranges, **remaining}
+ if len(remaining["_remaining"]) == 0:
+ return {"done_ranges": done_ranges, "records_completed": records_completed, **remaining}
+
+ next_detail = remaining["_remaining"][0]
+ remaining["_remaining"] = remaining["_remaining"][1:]
+
+ try:
+ records_completed = int(next_detail)
+ except (TypeError, ValueError) as e:
+ raise HeartbeatParseError("records_completed") from e
+
+ return {"done_ranges": done_ranges, "records_completed": records_completed, **remaining}
def serialize_details(self) -> tuple[typing.Any, ...]:
"""Serialize this into a tuple.
@@ -69,7 +82,7 @@ def serialize_details(self) -> tuple[typing.Any, ...]:
(start.isoformat() if start is not None else start, end.isoformat()) for (start, end) in self.done_ranges
]
serialized_parent_details = super().serialize_details()
- return (*serialized_parent_details[:-1], serialized_done_ranges, self._remaining)
+ return (*serialized_parent_details[:-1], serialized_done_ranges, self.records_completed, self._remaining)
@property
def empty(self) -> bool:
diff --git a/posthog/temporal/batch_exports/postgres_batch_export.py b/posthog/temporal/batch_exports/postgres_batch_export.py
index d4df285067b94..3c70e5b747dea 100644
--- a/posthog/temporal/batch_exports/postgres_batch_export.py
+++ b/posthog/temporal/batch_exports/postgres_batch_export.py
@@ -29,18 +29,26 @@
default_fields,
execute_batch_export_insert_activity,
get_data_interval,
- iter_model_records,
start_batch_export_run,
)
-from posthog.temporal.batch_exports.metrics import (
- get_bytes_exported_metric,
- get_rows_exported_metric,
+from posthog.temporal.batch_exports.heartbeat import (
+ BatchExportRangeHeartbeatDetails,
+ DateRange,
+ should_resume_from_activity_heartbeat,
+)
+from posthog.temporal.batch_exports.spmc import (
+ Consumer,
+ Producer,
+ RecordBatchQueue,
+ run_consumer,
+ wait_for_schema_or_producer,
+)
+from posthog.temporal.batch_exports.temporary_file import (
+ BatchExportTemporaryFile,
+ WriterFormat,
)
-from posthog.temporal.batch_exports.temporary_file import CSVBatchExportWriter
from posthog.temporal.batch_exports.utils import (
JsonType,
- apeek_first_and_rewind,
- cast_record_batch_json_columns,
make_retryable_with_exponential_backoff,
set_status_to_running_task,
)
@@ -466,6 +474,70 @@ def get_postgres_fields_from_record_schema(
return pg_schema
+@dataclasses.dataclass
+class PostgreSQLHeartbeatDetails(BatchExportRangeHeartbeatDetails):
+ """The PostgreSQL batch export details included in every heartbeat."""
+
+ pass
+
+
+class PostgreSQLConsumer(Consumer):
+ def __init__(
+ self,
+ heartbeater: Heartbeater,
+ heartbeat_details: PostgreSQLHeartbeatDetails,
+ data_interval_start: dt.datetime | str | None,
+ data_interval_end: dt.datetime | str,
+ writer_format: WriterFormat,
+ postgresql_client: PostgreSQLClient,
+ postgresql_table: str,
+ postgresql_table_schema: str,
+ postgresql_table_fields: list[str],
+ ):
+ super().__init__(
+ heartbeater=heartbeater,
+ heartbeat_details=heartbeat_details,
+ data_interval_start=data_interval_start,
+ data_interval_end=data_interval_end,
+ writer_format=writer_format,
+ )
+ self.heartbeat_details: PostgreSQLHeartbeatDetails = heartbeat_details
+ self.postgresql_table = postgresql_table
+ self.postgresql_table_schema = postgresql_table_schema
+ self.postgresql_table_fields = postgresql_table_fields
+ self.postgresql_client = postgresql_client
+
+ async def flush(
+ self,
+ batch_export_file: BatchExportTemporaryFile,
+ records_since_last_flush: int,
+ bytes_since_last_flush: int,
+ flush_counter: int,
+ last_date_range: DateRange,
+ is_last: bool,
+ error: Exception | None,
+ ):
+ await self.logger.adebug(
+ "Copying %s records of size %s bytes",
+ records_since_last_flush,
+ bytes_since_last_flush,
+ )
+
+ await self.postgresql_client.copy_tsv_to_postgres(
+ batch_export_file,
+ self.postgresql_table_schema,
+ self.postgresql_table,
+ self.postgresql_table_fields,
+ )
+
+ await self.logger.ainfo("Copied %s to PostgreSQL table '%s'", records_since_last_flush, self.postgresql_table)
+ self.rows_exported_counter.add(records_since_last_flush)
+ self.bytes_exported_counter.add(bytes_since_last_flush)
+
+ self.heartbeat_details.records_completed += records_since_last_flush
+ self.heartbeat_details.track_done_range(last_date_range, self.data_interval_start)
+
+
@activity.defn
async def insert_into_postgres_activity(inputs: PostgresInsertInputs) -> RecordsCompleted:
"""Activity streams data from ClickHouse to Postgres."""
@@ -480,35 +552,67 @@ async def insert_into_postgres_activity(inputs: PostgresInsertInputs) -> Records
)
async with (
- Heartbeater(),
+ Heartbeater() as heartbeater,
set_status_to_running_task(run_id=inputs.run_id, logger=logger),
get_client(team_id=inputs.team_id) as client,
):
if not await client.is_alive():
raise ConnectionError("Cannot establish connection to ClickHouse")
+ _, details = await should_resume_from_activity_heartbeat(activity, PostgreSQLHeartbeatDetails)
+ if details is None:
+ details = PostgreSQLHeartbeatDetails()
+
+ done_ranges: list[DateRange] = details.done_ranges
+
model: BatchExportModel | BatchExportSchema | None = None
if inputs.batch_export_schema is None and "batch_export_model" in {
field.name for field in dataclasses.fields(inputs)
}:
model = inputs.batch_export_model
+ if model is not None:
+ model_name = model.name
+ extra_query_parameters = model.schema["values"] if model.schema is not None else None
+ fields = model.schema["fields"] if model.schema is not None else None
+ else:
+ model_name = "events"
+ extra_query_parameters = None
+ fields = None
else:
model = inputs.batch_export_schema
+ model_name = "custom"
+ extra_query_parameters = model["values"] if model is not None else {}
+ fields = model["fields"] if model is not None else None
- record_batch_iterator = iter_model_records(
- client=client,
- model=model,
+ data_interval_start = (
+ dt.datetime.fromisoformat(inputs.data_interval_start) if inputs.data_interval_start else None
+ )
+ data_interval_end = dt.datetime.fromisoformat(inputs.data_interval_end)
+ full_range = (data_interval_start, data_interval_end)
+
+ queue = RecordBatchQueue(max_size_bytes=settings.BATCH_EXPORT_POSTGRES_RECORD_BATCH_QUEUE_MAX_SIZE_BYTES)
+ producer = Producer(clickhouse_client=client)
+ producer_task = producer.start(
+ queue=queue,
+ model_name=model_name,
+ is_backfill=inputs.is_backfill,
team_id=inputs.team_id,
- interval_start=inputs.data_interval_start,
- interval_end=inputs.data_interval_end,
+ full_range=full_range,
+ done_ranges=done_ranges,
+ fields=fields,
+ destination_default_fields=postgres_default_fields(),
exclude_events=inputs.exclude_events,
include_events=inputs.include_events,
- destination_default_fields=postgres_default_fields(),
- is_backfill=inputs.is_backfill,
+ extra_query_parameters=extra_query_parameters,
+ )
+
+ record_batch_schema = await wait_for_schema_or_producer(queue, producer_task)
+ if record_batch_schema is None:
+ return details.records_completed
+
+ record_batch_schema = pa.schema(
+ [field.with_nullable(True) for field in record_batch_schema if field.name != "_inserted_at"]
)
- first_record_batch, record_batch_iterator = await apeek_first_and_rewind(record_batch_iterator)
- if first_record_batch is None:
- return 0
if model is None or (isinstance(model, BatchExportModel) and model.name == "events"):
table_fields: Fields = [
@@ -526,17 +630,13 @@ async def insert_into_postgres_activity(inputs: PostgresInsertInputs) -> Records
]
else:
- column_names = [column for column in first_record_batch.schema.names if column != "_inserted_at"]
- record_schema = first_record_batch.select(column_names).schema
table_fields = get_postgres_fields_from_record_schema(
- record_schema, known_json_columns=["properties", "set", "set_once", "person_properties"]
+ record_batch_schema,
+ known_json_columns=["properties", "set", "set_once", "person_properties"],
)
schema_columns = [field[0] for field in table_fields]
- rows_exported = get_rows_exported_metric()
- bytes_exported = get_bytes_exported_metric()
-
requires_merge = (
isinstance(inputs.batch_export_model, BatchExportModel) and inputs.batch_export_model.name == "persons"
)
@@ -564,47 +664,33 @@ async def insert_into_postgres_activity(inputs: PostgresInsertInputs) -> Records
primary_key=primary_key,
) as pg_stage_table,
):
-
- async def flush_to_postgres(
- local_results_file,
- records_since_last_flush,
- bytes_since_last_flush,
- flush_counter: int,
- last_inserted_at,
- last: bool,
- error: Exception | None,
- ):
- await logger.adebug(
- "Copying %s records of size %s bytes",
- records_since_last_flush,
- bytes_since_last_flush,
- )
-
- table = pg_stage_table if requires_merge else pg_table
- await pg_client.copy_tsv_to_postgres(
- local_results_file,
- inputs.schema,
- table,
- schema_columns,
- )
- rows_exported.add(records_since_last_flush)
- bytes_exported.add(bytes_since_last_flush)
-
- writer = CSVBatchExportWriter(
+ consumer = PostgreSQLConsumer(
+ heartbeater=heartbeater,
+ heartbeat_details=details,
+ data_interval_end=data_interval_end,
+ data_interval_start=data_interval_start,
+ writer_format=WriterFormat.CSV,
+ postgresql_client=pg_client,
+ postgresql_table=pg_stage_table if requires_merge else pg_table,
+ postgresql_table_schema=inputs.schema,
+ postgresql_table_fields=schema_columns,
+ )
+ await run_consumer(
+ consumer=consumer,
+ queue=queue,
+ producer_task=producer_task,
+ schema=record_batch_schema,
max_bytes=settings.BATCH_EXPORT_POSTGRES_UPLOAD_CHUNK_SIZE_BYTES,
- flush_callable=flush_to_postgres,
- field_names=schema_columns,
- delimiter="\t",
- quoting=csv.QUOTE_MINIMAL,
- escape_char=None,
+ json_columns=(),
+ writer_file_kwargs={
+ "delimiter": "\t",
+ "quoting": csv.QUOTE_MINIMAL,
+ "escape_char": None,
+ "field_names": schema_columns,
+ },
+ multiple_files=True,
)
- async with writer.open_temporary_file():
- async for record_batch in record_batch_iterator:
- record_batch = cast_record_batch_json_columns(record_batch, json_columns=())
-
- await writer.write_record_batch(record_batch)
-
if requires_merge:
merge_key: Fields = (
("team_id", "INT"),
@@ -619,7 +705,7 @@ async def flush_to_postgres(
merge_key=merge_key,
)
- return writer.records_total
+ return details.records_completed
@workflow.defn(name="postgres-export", failure_exception_types=[workflow.NondeterminismError])
@@ -726,6 +812,8 @@ async def run(self, inputs: PostgresBatchExportInputs):
# We do not create any ourselves, so this generally is a user-managed check, so we
# should not retry.
"CheckViolation",
+ # We do not create foreign keys, so this is a user managed check we have failed.
+ "ForeignKeyViolation",
],
finish_inputs=finish_inputs,
)
diff --git a/posthog/temporal/batch_exports/redshift_batch_export.py b/posthog/temporal/batch_exports/redshift_batch_export.py
index 9c0fc8b9119b5..7abbbc885e09f 100644
--- a/posthog/temporal/batch_exports/redshift_batch_export.py
+++ b/posthog/temporal/batch_exports/redshift_batch_export.py
@@ -306,6 +306,7 @@ async def flush(
self.rows_exported_counter.add(records_since_last_flush)
self.bytes_exported_counter.add(bytes_since_last_flush)
+ self.heartbeat_details.records_completed += records_since_last_flush
self.heartbeat_details.track_done_range(last_date_range, self.data_interval_start)
@@ -404,9 +405,10 @@ async def insert_into_redshift_activity(inputs: RedshiftInsertInputs) -> Records
extra_query_parameters=extra_query_parameters,
max_record_batch_size_bytes=1024 * 1024 * 2, # 2MB
)
+
record_batch_schema = await wait_for_schema_or_producer(queue, producer_task)
if record_batch_schema is None:
- return 0
+ return details.records_completed
record_batch_schema = pa.schema(
[field.with_nullable(True) for field in record_batch_schema if field.name != "_inserted_at"]
@@ -474,7 +476,7 @@ async def insert_into_redshift_activity(inputs: RedshiftInsertInputs) -> Records
redshift_client=redshift_client,
redshift_table=redshift_stage_table if requires_merge else redshift_table,
)
- records_completed = await run_consumer(
+ await run_consumer(
consumer=consumer,
queue=queue,
producer_task=producer_task,
@@ -504,7 +506,7 @@ async def insert_into_redshift_activity(inputs: RedshiftInsertInputs) -> Records
merge_key=merge_key,
)
- return records_completed
+ return details.records_completed
@workflow.defn(name="redshift-export", failure_exception_types=[workflow.NondeterminismError])
diff --git a/posthog/temporal/batch_exports/s3_batch_export.py b/posthog/temporal/batch_exports/s3_batch_export.py
index e12822fc22cfa..812ac9adc68a1 100644
--- a/posthog/temporal/batch_exports/s3_batch_export.py
+++ b/posthog/temporal/batch_exports/s3_batch_export.py
@@ -55,7 +55,10 @@
from posthog.temporal.batch_exports.utils import set_status_to_running_task
from posthog.temporal.common.clickhouse import get_client
from posthog.temporal.common.heartbeat import Heartbeater
-from posthog.temporal.common.logger import bind_temporal_worker_logger
+from posthog.temporal.common.logger import (
+ bind_temporal_worker_logger,
+ get_internal_logger,
+)
NON_RETRYABLE_ERROR_TYPES = [
# S3 parameter validation failed.
@@ -257,6 +260,8 @@ def __init__(
if self.endpoint_url == "":
raise InvalidS3EndpointError("Endpoint URL is empty.")
+ self.logger = get_internal_logger()
+
def to_state(self) -> S3MultiPartUploadState:
"""Produce state tuple that can be used to resume this S3MultiPartUpload."""
# The second predicate is trivial but required by type-checking.
@@ -314,17 +319,17 @@ async def start(self) -> str:
upload_id: str = multipart_response["UploadId"]
self.upload_id = upload_id
-
+ await self.logger.adebug("Started multipart upload for key %s with upload id %s", self.key, upload_id)
return upload_id
- def continue_from_state(self, state: S3MultiPartUploadState):
+ async def continue_from_state(self, state: S3MultiPartUploadState):
"""Continue this S3MultiPartUpload from a previous state.
This method is intended to be used with the state found in an Activity heartbeat.
"""
self.upload_id = state.upload_id
self.parts = state.parts
-
+ await self.logger.adebug("Resuming multipart upload for key %s with upload id %s", self.key, self.upload_id)
return self.upload_id
async def complete(self) -> str | None:
@@ -429,6 +434,10 @@ async def upload_part_retryable(
error_code = err.response.get("Error", {}).get("Code", None)
attempt += 1
+ await self.logger.ainfo(
+ "Caught ClientError while uploading part %s: %s", next_part_number, error_code
+ )
+
if error_code is not None and error_code == "RequestTimeout":
if attempt >= max_attempts:
raise IntermittentUploadPartTimeoutError(part_number=next_part_number) from err
@@ -569,9 +578,10 @@ async def flush(
async with self.s3_upload as s3_upload:
await self.logger.adebug(
- "Uploading file number %s part %s containing %s records with size %s bytes",
+ "Uploading file number %s part %s with upload id %s containing %s records with size %s bytes",
self.file_number,
s3_upload.part_number + 1,
+ s3_upload.upload_id,
records_since_last_flush,
bytes_since_last_flush,
)
@@ -581,6 +591,9 @@ async def flush(
self.bytes_exported_counter.add(bytes_since_last_flush)
if is_last:
+ await self.logger.adebug(
+ "Completing multipart upload %s for file number %s", s3_upload.upload_id, self.file_number
+ )
await s3_upload.complete()
if is_last:
@@ -590,10 +603,14 @@ async def flush(
else:
self.heartbeat_details.append_upload_state(self.s3_upload.to_state())
+ self.heartbeat_details.records_completed += records_since_last_flush
self.heartbeat_details.track_done_range(last_date_range, self.data_interval_start)
async def close(self):
if self.s3_upload is not None:
+ await self.logger.adebug(
+ "Completing multipart upload %s for file number %s", self.s3_upload.upload_id, self.file_number
+ )
await self.s3_upload.complete()
self.heartbeat_details.mark_file_upload_as_complete()
@@ -614,7 +631,7 @@ async def initialize_and_resume_multipart_upload(
s3_upload = initialize_upload(inputs, file_number)
if details.upload_state:
- s3_upload.continue_from_state(details.upload_state)
+ await s3_upload.continue_from_state(details.upload_state)
if inputs.compression == "brotli":
# Even if we receive details we cannot resume a brotli compressed upload as
@@ -756,11 +773,10 @@ async def insert_into_s3_activity(inputs: S3InsertInputs) -> RecordsCompleted:
max_record_batch_size_bytes=1024 * 1024 * 10, # 10MB
use_latest_schema=True,
)
- records_completed = 0
record_batch_schema = await wait_for_schema_or_producer(queue, producer_task)
if record_batch_schema is None:
- return records_completed
+ return details.records_completed
record_batch_schema = pa.schema(
# NOTE: For some reason, some batches set non-nullable fields as non-nullable, whereas other
@@ -780,7 +796,7 @@ async def insert_into_s3_activity(inputs: S3InsertInputs) -> RecordsCompleted:
s3_upload=s3_upload,
s3_inputs=inputs,
)
- records_completed = await run_consumer(
+ await run_consumer(
consumer=consumer,
queue=queue,
producer_task=producer_task,
@@ -791,7 +807,7 @@ async def insert_into_s3_activity(inputs: S3InsertInputs) -> RecordsCompleted:
max_file_size_bytes=inputs.max_file_size_mb * 1024 * 1024 if inputs.max_file_size_mb else 0,
)
- return records_completed
+ return details.records_completed
@workflow.defn(name="s3-export", failure_exception_types=[workflow.NondeterminismError])
diff --git a/posthog/temporal/batch_exports/snowflake_batch_export.py b/posthog/temporal/batch_exports/snowflake_batch_export.py
index 0e3046a371048..ebdea7749e014 100644
--- a/posthog/temporal/batch_exports/snowflake_batch_export.py
+++ b/posthog/temporal/batch_exports/snowflake_batch_export.py
@@ -546,6 +546,7 @@ async def flush(
self.rows_exported_counter.add(records_since_last_flush)
self.bytes_exported_counter.add(bytes_since_last_flush)
+ self.heartbeat_details.records_completed += records_since_last_flush
self.heartbeat_details.track_done_range(last_date_range, self.data_interval_start)
diff --git a/posthog/temporal/batch_exports/spmc.py b/posthog/temporal/batch_exports/spmc.py
index 022aa7b60ef09..d4f8156406bb3 100644
--- a/posthog/temporal/batch_exports/spmc.py
+++ b/posthog/temporal/batch_exports/spmc.py
@@ -7,7 +7,6 @@
import uuid
import pyarrow as pa
-import structlog
import temporalio.common
from django.conf import settings
@@ -42,8 +41,7 @@
)
from posthog.temporal.common.clickhouse import ClickHouseClient
from posthog.temporal.common.heartbeat import Heartbeater
-
-logger = structlog.get_logger()
+from posthog.temporal.common.logger import get_internal_logger
class RecordBatchQueue(asyncio.Queue):
@@ -123,6 +121,7 @@ async def raise_on_task_failure(task: asyncio.Task) -> None:
return
exc = task.exception()
+ logger = get_internal_logger()
await logger.aexception("%s task failed", task.get_name(), exc_info=exc)
raise RecordBatchTaskError() from exc
@@ -187,7 +186,7 @@ def __init__(
self.data_interval_start = data_interval_start
self.data_interval_end = data_interval_end
self.writer_format = writer_format
- self.logger = logger
+ self.logger = get_internal_logger()
@property
def rows_exported_counter(self) -> temporalio.common.MetricCounter:
@@ -435,7 +434,7 @@ def consumer_done_callback(task: asyncio.Task):
consumer_tasks_pending.remove(task)
consumer_tasks_done.add(task)
- await logger.adebug("Starting record batch consumer")
+ await consumer.logger.adebug("Starting record batch consumer")
consumer_task = consumer.create_consumer_task(
queue=queue,
@@ -460,7 +459,7 @@ def consumer_done_callback(task: asyncio.Task):
raise consumer_task_exception
await raise_on_task_failure(producer_task)
- await logger.adebug("Successfully finished record batch consumer")
+ await consumer.logger.adebug("Successfully finished record batch consumer")
consumer.complete_heartbeat()
@@ -509,6 +508,7 @@ class Producer:
def __init__(self, clickhouse_client: ClickHouseClient):
self.clickhouse_client = clickhouse_client
self._task: asyncio.Task | None = None
+ self.logger = get_internal_logger()
@property
def task(self) -> asyncio.Task:
@@ -650,7 +650,7 @@ async def produce_batch_export_record_batches_from_range(
await queue.put(record_batch_slice)
except Exception as e:
- await logger.aexception("Unexpected error occurred while producing record batches", exc_info=e)
+ await self.logger.aexception("Unexpected error occurred while producing record batches", exc_info=e)
raise
diff --git a/posthog/temporal/common/logger.py b/posthog/temporal/common/logger.py
index db3767848770a..0b25c072fe31c 100644
--- a/posthog/temporal/common/logger.py
+++ b/posthog/temporal/common/logger.py
@@ -1,18 +1,18 @@
import asyncio
-from contextvars import copy_context
import json
import logging
+import queue as sync_queue
import ssl
import threading
import uuid
-from kafka import KafkaProducer
-import queue as sync_queue
+from contextvars import copy_context
import aiokafka
import structlog
import temporalio.activity
import temporalio.workflow
from django.conf import settings
+from kafka import KafkaProducer
from structlog.processors import EventRenamer
from structlog.typing import FilteringBoundLogger
@@ -21,6 +21,18 @@
BACKGROUND_LOGGER_TASKS = set()
+def get_internal_logger():
+ """Return a logger for internal use, where logs do not get sent to Kafka.
+
+ We attach the temporal context to the logger for easier debugging (for
+ example, we can track things like the workflow id across log entries).
+ """
+ logger = structlog.get_logger()
+ temporal_context = get_temporal_context()
+
+ return logger.new(**temporal_context)
+
+
async def bind_temporal_worker_logger(team_id: int, destination: str | None = None) -> FilteringBoundLogger:
"""Return a bound logger for Temporal Workers."""
if not structlog.is_configured():
diff --git a/posthog/temporal/data_imports/external_data_job.py b/posthog/temporal/data_imports/external_data_job.py
index 26332fb0ab2d7..68c023ece2f46 100644
--- a/posthog/temporal/data_imports/external_data_job.py
+++ b/posthog/temporal/data_imports/external_data_job.py
@@ -67,6 +67,7 @@
"password authentication failed for user",
"No primary key defined for table",
"failed: timeout expired",
+ "SSL connection has been closed unexpectedly",
],
ExternalDataSource.Type.ZENDESK: ["404 Client Error: Not Found for url", "403 Client Error: Forbidden for url"],
ExternalDataSource.Type.MYSQL: ["Can't connect to MySQL server on", "No primary key defined for table"],
diff --git a/posthog/temporal/data_imports/pipelines/pipeline/delta_table_helper.py b/posthog/temporal/data_imports/pipelines/pipeline/delta_table_helper.py
index 542ab40bc3744..0e98f948f9519 100644
--- a/posthog/temporal/data_imports/pipelines/pipeline/delta_table_helper.py
+++ b/posthog/temporal/data_imports/pipelines/pipeline/delta_table_helper.py
@@ -84,6 +84,20 @@ def get_delta_table(self) -> deltalake.DeltaTable | None:
return None
+ def reset_table(self):
+ table = self.get_delta_table()
+ if table is None:
+ return
+
+ delta_uri = self._get_delta_table_uri()
+
+ table.delete()
+
+ s3 = get_s3_client()
+ s3.delete(delta_uri, recursive=True)
+
+ self.get_delta_table.cache_clear()
+
def write_to_deltalake(
self, data: pa.Table, is_incremental: bool, chunk_index: int, primary_keys: Sequence[Any] | None
) -> deltalake.DeltaTable:
diff --git a/posthog/temporal/data_imports/pipelines/pipeline/pipeline.py b/posthog/temporal/data_imports/pipelines/pipeline/pipeline.py
index 49106164d7ad2..c1a8b95bb0abe 100644
--- a/posthog/temporal/data_imports/pipelines/pipeline/pipeline.py
+++ b/posthog/temporal/data_imports/pipelines/pipeline/pipeline.py
@@ -19,7 +19,7 @@
from posthog.temporal.data_imports.pipelines.pipeline.hogql_schema import HogQLSchema
from posthog.temporal.data_imports.pipelines.pipeline_sync import validate_schema_and_update_table_sync
from posthog.temporal.data_imports.util import prepare_s3_files_for_querying
-from posthog.warehouse.models import DataWarehouseTable, ExternalDataJob, ExternalDataSchema
+from posthog.warehouse.models import DataWarehouseTable, ExternalDataJob, ExternalDataSchema, ExternalDataSource
class PipelineNonDLT:
@@ -29,11 +29,14 @@ class PipelineNonDLT:
_schema: ExternalDataSchema
_logger: FilteringBoundLogger
_is_incremental: bool
+ _reset_pipeline: bool
_delta_table_helper: DeltaTableHelper
_internal_schema = HogQLSchema()
_load_id: int
- def __init__(self, source: DltSource, logger: FilteringBoundLogger, job_id: str, is_incremental: bool) -> None:
+ def __init__(
+ self, source: DltSource, logger: FilteringBoundLogger, job_id: str, is_incremental: bool, reset_pipeline: bool
+ ) -> None:
resources = list(source.resources.items())
assert len(resources) == 1
resource_name, resource = resources[0]
@@ -42,6 +45,7 @@ def __init__(self, source: DltSource, logger: FilteringBoundLogger, job_id: str,
self._resource_name = resource_name
self._job = ExternalDataJob.objects.prefetch_related("schema").get(id=job_id)
self._is_incremental = is_incremental
+ self._reset_pipeline = reset_pipeline
self._logger = logger
self._load_id = time.time_ns()
@@ -60,6 +64,14 @@ def run(self):
row_count = 0
chunk_index = 0
+ if self._reset_pipeline:
+ self._logger.debug("Deleting existing table due to reset_pipeline being set")
+ self._delta_table_helper.reset_table()
+
+ source: ExternalDataSource = self._job.pipeline
+ source.job_inputs.pop("reset_pipeline", None)
+ source.save()
+
for item in self._resource:
py_table = None
@@ -140,20 +152,25 @@ def _post_run_operations(self, row_count: int):
return
self._logger.debug("Spawning new process for deltatable compact and vacuuming")
- process = subprocess.Popen(
- [
- "python",
- f"{os.getcwd()}/posthog/temporal/data_imports/pipelines/pipeline/delta_table_subprocess.py",
- "--table_uri",
- self._delta_table_helper._get_delta_table_uri(),
- ],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- )
- stdout, stderr = process.communicate()
-
- if process.returncode != 0:
- raise Exception(f"Delta subprocess failed: {stderr.decode()}")
+ try:
+ process = subprocess.Popen(
+ [
+ "python",
+ f"{os.getcwd()}/posthog/temporal/data_imports/pipelines/pipeline/delta_table_subprocess.py",
+ "--table_uri",
+ self._delta_table_helper._get_delta_table_uri(),
+ ],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=True,
+ )
+ stdout, stderr = process.communicate()
+
+ if process.returncode != 0:
+ raise Exception(f"Delta subprocess failed: {stderr.decode()}")
+ finally:
+ if process.poll() is not None:
+ process.kill()
file_uris = delta_table.file_uris()
self._logger.info(f"Preparing S3 files - total parquet files: {len(file_uris)}")
diff --git a/posthog/temporal/data_imports/pipelines/sql_database_v2/arrow_helpers.py b/posthog/temporal/data_imports/pipelines/sql_database_v2/arrow_helpers.py
index 03c966491f650..5fef253e74501 100644
--- a/posthog/temporal/data_imports/pipelines/sql_database_v2/arrow_helpers.py
+++ b/posthog/temporal/data_imports/pipelines/sql_database_v2/arrow_helpers.py
@@ -1,4 +1,6 @@
+import decimal
import json
+import math
from typing import Any, Optional
from collections.abc import Sequence
@@ -94,6 +96,11 @@ def row_tuples_to_arrow(rows: Sequence[RowAny], columns: TTableSchemaColumns, tz
)
json_str_array = pa.array([None if s is None else json_dumps(s) for s in columnar_known_types[field.name]])
columnar_known_types[field.name] = json_str_array
+ if issubclass(py_type, decimal.Decimal):
+ # Remove any NaN values from decimal columns
+ columnar_known_types[field.name] = np.array(
+ [None if x is not None and math.isnan(x) else x for x in columnar_known_types[field.name]]
+ )
# If there are unknown type columns, first create a table to infer their types
if columnar_unknown_types:
diff --git a/posthog/temporal/data_imports/workflow_activities/import_data_sync.py b/posthog/temporal/data_imports/workflow_activities/import_data_sync.py
index 3f166ec0049a9..a9a058bb52261 100644
--- a/posthog/temporal/data_imports/workflow_activities/import_data_sync.py
+++ b/posthog/temporal/data_imports/workflow_activities/import_data_sync.py
@@ -528,7 +528,7 @@ def _run(
reset_pipeline: bool,
):
if settings.TEMPORAL_TASK_QUEUE == DATA_WAREHOUSE_TASK_QUEUE_V2:
- pipeline = PipelineNonDLT(source, logger, job_inputs.run_id, schema.is_incremental)
+ pipeline = PipelineNonDLT(source, logger, job_inputs.run_id, schema.is_incremental, reset_pipeline)
pipeline.run()
del pipeline
else:
diff --git a/posthog/temporal/tests/batch_exports/conftest.py b/posthog/temporal/tests/batch_exports/conftest.py
index 027c9c40dbdf5..2c1d6f7793d15 100644
--- a/posthog/temporal/tests/batch_exports/conftest.py
+++ b/posthog/temporal/tests/batch_exports/conftest.py
@@ -5,8 +5,10 @@
import psycopg
import pytest
import pytest_asyncio
+import temporalio.worker
from psycopg import sql
+from posthog import constants
from posthog.temporal.tests.utils.events import generate_test_events_in_clickhouse
from posthog.temporal.tests.utils.persons import (
generate_test_person_distinct_id2_in_clickhouse,
@@ -147,6 +149,24 @@ async def setup_postgres_test_db(postgres_config):
await connection.close()
+@pytest_asyncio.fixture
+async def temporal_worker(temporal_client, workflows, activities):
+ worker = temporalio.worker.Worker(
+ temporal_client,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
+ workflows=workflows,
+ activities=activities,
+ workflow_runner=temporalio.worker.UnsandboxedWorkflowRunner(),
+ )
+
+ worker_run = asyncio.create_task(worker.run())
+
+ yield worker
+
+ worker_run.cancel()
+ await asyncio.wait([worker_run])
+
+
@pytest_asyncio.fixture(scope="module", autouse=True)
async def create_clickhouse_tables_and_views(clickhouse_client, django_db_setup):
from posthog.batch_exports.sql import (
diff --git a/posthog/temporal/tests/batch_exports/test_backfill_batch_export.py b/posthog/temporal/tests/batch_exports/test_backfill_batch_export.py
index e422dc5088e4c..9c9dcc618759f 100644
--- a/posthog/temporal/tests/batch_exports/test_backfill_batch_export.py
+++ b/posthog/temporal/tests/batch_exports/test_backfill_batch_export.py
@@ -16,6 +16,7 @@
from asgiref.sync import sync_to_async
from django.conf import settings
+from posthog import constants
from posthog.models import Team
from posthog.temporal.batch_exports.backfill_batch_export import (
BackfillBatchExportInputs,
@@ -298,7 +299,7 @@ async def test_backfill_batch_export_workflow(temporal_worker, temporal_schedule
BackfillBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
execution_timeout=dt.timedelta(minutes=1),
retry_policy=temporalio.common.RetryPolicy(maximum_attempts=1),
)
@@ -379,7 +380,7 @@ async def test_backfill_batch_export_workflow_no_end_at(
BackfillBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
execution_timeout=dt.timedelta(minutes=1),
retry_policy=temporalio.common.RetryPolicy(maximum_attempts=1),
)
@@ -455,7 +456,7 @@ async def test_backfill_batch_export_workflow_fails_when_schedule_deleted(
BackfillBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
execution_timeout=dt.timedelta(seconds=20),
retry_policy=temporalio.common.RetryPolicy(maximum_attempts=1),
)
@@ -497,7 +498,7 @@ async def test_backfill_batch_export_workflow_fails_when_schedule_deleted_after_
BackfillBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
execution_timeout=dt.timedelta(seconds=20),
retry_policy=temporalio.common.RetryPolicy(maximum_attempts=1),
)
@@ -583,7 +584,7 @@ async def test_backfill_batch_export_workflow_is_cancelled_on_repeated_failures(
BackfillBatchExportWorkflow.run,
inputs,
id=backfill_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
execution_timeout=dt.timedelta(minutes=2),
retry_policy=temporalio.common.RetryPolicy(maximum_attempts=1),
)
@@ -653,7 +654,7 @@ async def test_backfill_utc_batch_export_workflow_with_timezone_aware_bounds(
BackfillBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
execution_timeout=dt.timedelta(minutes=1),
retry_policy=temporalio.common.RetryPolicy(maximum_attempts=1),
)
@@ -748,7 +749,7 @@ async def test_backfill_aware_batch_export_workflow_with_timezone_aware_bounds(
BackfillBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
execution_timeout=dt.timedelta(minutes=1),
retry_policy=temporalio.common.RetryPolicy(maximum_attempts=1),
)
@@ -819,7 +820,7 @@ async def test_backfill_batch_export_workflow_no_start_at(temporal_worker, tempo
BackfillBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
execution_timeout=dt.timedelta(minutes=1),
retry_policy=temporalio.common.RetryPolicy(maximum_attempts=1),
)
diff --git a/posthog/temporal/tests/batch_exports/test_postgres_batch_export_workflow.py b/posthog/temporal/tests/batch_exports/test_postgres_batch_export_workflow.py
index 65c95d8cd0bd0..db2022b464e0b 100644
--- a/posthog/temporal/tests/batch_exports/test_postgres_batch_export_workflow.py
+++ b/posthog/temporal/tests/batch_exports/test_postgres_batch_export_workflow.py
@@ -16,6 +16,7 @@
from temporalio.testing import WorkflowEnvironment
from temporalio.worker import UnsandboxedWorkflowRunner, Worker
+from posthog import constants
from posthog.batch_exports.service import BatchExportModel, BatchExportSchema
from posthog.temporal.batch_exports.batch_exports import (
finish_batch_export_run,
@@ -513,7 +514,7 @@ async def test_postgres_export_workflow(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[PostgresBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -527,7 +528,7 @@ async def test_postgres_export_workflow(
PostgresBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(seconds=10),
)
@@ -601,7 +602,7 @@ async def test_postgres_export_workflow_without_events(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[PostgresBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -615,7 +616,7 @@ async def test_postgres_export_workflow_without_events(
PostgresBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(seconds=10),
)
@@ -677,7 +678,7 @@ async def test_postgres_export_workflow_backfill_earliest_persons(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[PostgresBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -690,7 +691,7 @@ async def test_postgres_export_workflow_backfill_earliest_persons(
PostgresBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(minutes=10),
)
@@ -736,7 +737,7 @@ async def insert_into_postgres_activity_mocked(_: PostgresInsertInputs) -> str:
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[PostgresBatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -750,7 +751,7 @@ async def insert_into_postgres_activity_mocked(_: PostgresInsertInputs) -> str:
PostgresBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -788,7 +789,7 @@ class InsufficientPrivilege(Exception):
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[PostgresBatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -802,7 +803,7 @@ class InsufficientPrivilege(Exception):
PostgresBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -837,7 +838,7 @@ async def never_finish_activity(_: PostgresInsertInputs) -> str:
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[PostgresBatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -850,7 +851,7 @@ async def never_finish_activity(_: PostgresInsertInputs) -> str:
PostgresBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
await asyncio.sleep(5)
diff --git a/posthog/temporal/tests/batch_exports/test_redshift_batch_export_workflow.py b/posthog/temporal/tests/batch_exports/test_redshift_batch_export_workflow.py
index 20c38545490b9..cbb75d8948301 100644
--- a/posthog/temporal/tests/batch_exports/test_redshift_batch_export_workflow.py
+++ b/posthog/temporal/tests/batch_exports/test_redshift_batch_export_workflow.py
@@ -17,6 +17,7 @@
from temporalio.testing import WorkflowEnvironment
from temporalio.worker import UnsandboxedWorkflowRunner, Worker
+from posthog import constants
from posthog.batch_exports.service import BatchExportModel, BatchExportSchema
from posthog.temporal.batch_exports.batch_exports import (
finish_batch_export_run,
@@ -675,7 +676,7 @@ async def test_redshift_export_workflow(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[RedshiftBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -689,7 +690,7 @@ async def test_redshift_export_workflow(
RedshiftBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(seconds=10),
)
@@ -756,7 +757,7 @@ async def insert_into_redshift_activity_mocked(_: RedshiftInsertInputs) -> str:
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[RedshiftBatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -770,7 +771,7 @@ async def insert_into_redshift_activity_mocked(_: RedshiftInsertInputs) -> str:
RedshiftBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(seconds=20),
)
@@ -809,7 +810,7 @@ class InsufficientPrivilege(Exception):
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[RedshiftBatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -823,7 +824,7 @@ class InsufficientPrivilege(Exception):
RedshiftBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
diff --git a/posthog/temporal/tests/batch_exports/test_s3_batch_export_workflow.py b/posthog/temporal/tests/batch_exports/test_s3_batch_export_workflow.py
index f9df20582783b..2195725a5d8ed 100644
--- a/posthog/temporal/tests/batch_exports/test_s3_batch_export_workflow.py
+++ b/posthog/temporal/tests/batch_exports/test_s3_batch_export_workflow.py
@@ -21,6 +21,7 @@
from temporalio.testing import WorkflowEnvironment
from temporalio.worker import UnsandboxedWorkflowRunner, Worker
+from posthog import constants
from posthog.batch_exports.service import BatchExportModel, BatchExportSchema
from posthog.temporal.batch_exports.batch_exports import (
finish_batch_export_run,
@@ -889,7 +890,7 @@ async def test_s3_export_workflow_with_minio_bucket(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[S3BatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -902,7 +903,7 @@ async def test_s3_export_workflow_with_minio_bucket(
S3BatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(minutes=10),
)
@@ -979,7 +980,7 @@ async def test_s3_export_workflow_backfill_earliest_persons_with_minio_bucket(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[S3BatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -992,7 +993,7 @@ async def test_s3_export_workflow_backfill_earliest_persons_with_minio_bucket(
S3BatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(minutes=10),
)
@@ -1062,7 +1063,7 @@ async def test_s3_export_workflow_with_minio_bucket_without_events(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[S3BatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -1075,7 +1076,7 @@ async def test_s3_export_workflow_with_minio_bucket_without_events(
S3BatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(minutes=10),
)
@@ -1177,7 +1178,7 @@ async def test_s3_export_workflow_with_s3_bucket(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[S3BatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -1190,7 +1191,7 @@ async def test_s3_export_workflow_with_s3_bucket(
S3BatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(seconds=10),
)
@@ -1270,7 +1271,7 @@ async def test_s3_export_workflow_with_minio_bucket_and_custom_key_prefix(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[S3BatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -1283,7 +1284,7 @@ async def test_s3_export_workflow_with_minio_bucket_and_custom_key_prefix(
S3BatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(seconds=10),
)
@@ -1356,7 +1357,7 @@ async def insert_into_s3_activity_mocked(_: S3InsertInputs) -> str:
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[S3BatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -1370,7 +1371,7 @@ async def insert_into_s3_activity_mocked(_: S3InsertInputs) -> str:
S3BatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -1409,7 +1410,7 @@ class ParamValidationError(Exception):
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[S3BatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -1423,7 +1424,7 @@ class ParamValidationError(Exception):
S3BatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -1461,7 +1462,7 @@ async def never_finish_activity(_: S3InsertInputs) -> str:
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[S3BatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -1474,7 +1475,7 @@ async def never_finish_activity(_: S3InsertInputs) -> str:
S3BatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
await asyncio.sleep(5)
@@ -2035,7 +2036,7 @@ def __init__(self, *args, **kwargs):
await WorkflowEnvironment.start_time_skipping() as activity_environment,
Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[S3BatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -2053,7 +2054,7 @@ def __init__(self, *args, **kwargs):
S3BatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=2),
execution_timeout=dt.timedelta(minutes=2),
)
diff --git a/posthog/temporal/tests/batch_exports/test_snowflake_batch_export_workflow.py b/posthog/temporal/tests/batch_exports/test_snowflake_batch_export_workflow.py
index a08e49357e9ee..cb6f352cb1d6f 100644
--- a/posthog/temporal/tests/batch_exports/test_snowflake_batch_export_workflow.py
+++ b/posthog/temporal/tests/batch_exports/test_snowflake_batch_export_workflow.py
@@ -15,7 +15,6 @@
import pytest_asyncio
import responses
import snowflake.connector
-from django.conf import settings
from django.test import override_settings
from requests.models import PreparedRequest
from temporalio import activity
@@ -25,6 +24,7 @@
from temporalio.testing import WorkflowEnvironment
from temporalio.worker import UnsandboxedWorkflowRunner, Worker
+from posthog import constants
from posthog.batch_exports.service import BatchExportModel, BatchExportSchema
from posthog.temporal.batch_exports.batch_exports import (
finish_batch_export_run,
@@ -423,7 +423,7 @@ async def test_snowflake_export_workflow_exports_events(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -446,7 +446,7 @@ async def test_snowflake_export_workflow_exports_events(
inputs,
id=workflow_id,
execution_timeout=dt.timedelta(seconds=10),
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -494,7 +494,7 @@ async def test_snowflake_export_workflow_without_events(ateam, snowflake_batch_e
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -515,7 +515,7 @@ async def test_snowflake_export_workflow_without_events(ateam, snowflake_batch_e
SnowflakeBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -580,7 +580,7 @@ async def test_snowflake_export_workflow_raises_error_on_put_fail(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -603,7 +603,7 @@ def __init__(self, *args, **kwargs):
SnowflakeBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -646,7 +646,7 @@ async def test_snowflake_export_workflow_raises_error_on_copy_fail(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -669,7 +669,7 @@ def __init__(self, *args, **kwargs):
SnowflakeBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -697,7 +697,7 @@ async def insert_into_snowflake_activity_mocked(_: SnowflakeInsertInputs) -> str
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -711,7 +711,7 @@ async def insert_into_snowflake_activity_mocked(_: SnowflakeInsertInputs) -> str
SnowflakeBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -744,7 +744,7 @@ class ForbiddenError(Exception):
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -758,7 +758,7 @@ class ForbiddenError(Exception):
SnowflakeBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -793,7 +793,7 @@ async def never_finish_activity(_: SnowflakeInsertInputs) -> str:
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
mocked_start_batch_export_run,
@@ -806,7 +806,7 @@ async def never_finish_activity(_: SnowflakeInsertInputs) -> str:
SnowflakeBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
await asyncio.sleep(5)
@@ -1286,7 +1286,7 @@ async def test_snowflake_export_workflow(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -1299,7 +1299,7 @@ async def test_snowflake_export_workflow(
SnowflakeBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(minutes=2),
)
@@ -1370,7 +1370,7 @@ async def test_snowflake_export_workflow_with_many_files(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -1384,7 +1384,7 @@ async def test_snowflake_export_workflow_with_many_files(
SnowflakeBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(minutes=2),
)
@@ -1455,7 +1455,7 @@ async def test_snowflake_export_workflow_backfill_earliest_persons(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -1469,7 +1469,7 @@ async def test_snowflake_export_workflow_backfill_earliest_persons(
SnowflakeBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
execution_timeout=dt.timedelta(minutes=10),
)
@@ -1526,7 +1526,7 @@ async def test_snowflake_export_workflow_handles_cancellation(
async with await WorkflowEnvironment.start_time_skipping() as activity_environment:
async with Worker(
activity_environment.client,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
workflows=[SnowflakeBatchExportWorkflow],
activities=[
start_batch_export_run,
@@ -1541,7 +1541,7 @@ async def test_snowflake_export_workflow_handles_cancellation(
SnowflakeBatchExportWorkflow.run,
inputs,
id=workflow_id,
- task_queue=settings.TEMPORAL_TASK_QUEUE,
+ task_queue=constants.BATCH_EXPORTS_TASK_QUEUE,
retry_policy=RetryPolicy(maximum_attempts=1),
)
@@ -1633,8 +1633,11 @@ def capture_heartbeat_details(*details):
@pytest.mark.parametrize(
"details",
[
- ([(dt.datetime.now().isoformat(), dt.datetime.now().isoformat())], 1),
- ([(dt.datetime.now().isoformat(), dt.datetime.now().isoformat())],),
+ ([(dt.datetime.now().isoformat(), dt.datetime.now().isoformat())], 10, 1),
+ (
+ [(dt.datetime.now().isoformat(), dt.datetime.now().isoformat())],
+ 10,
+ ),
],
)
def test_snowflake_heartbeat_details_parses_from_tuple(details):
diff --git a/posthog/temporal/tests/data_imports/test_end_to_end.py b/posthog/temporal/tests/data_imports/test_end_to_end.py
index 1bd242a697af0..5bebcd72a5f57 100644
--- a/posthog/temporal/tests/data_imports/test_end_to_end.py
+++ b/posthog/temporal/tests/data_imports/test_end_to_end.py
@@ -16,6 +16,7 @@
from django.test import override_settings
from dlt.common.configuration.specs.aws_credentials import AwsCredentials
from dlt.sources.helpers.rest_client.client import RESTClient
+import s3fs
from temporalio.common import RetryPolicy
from temporalio.testing import WorkflowEnvironment
from temporalio.worker import UnsandboxedWorkflowRunner, Worker
@@ -1186,3 +1187,85 @@ async def test_missing_source(team, stripe_balance_transaction):
assert exc.value.cause.cause.message == "Source or schema no longer exists - deleted temporal schedule"
mock_delete_external_data_schedule.assert_called()
+
+
+@pytest.mark.django_db(transaction=True)
+@pytest.mark.asyncio
+async def test_postgres_nan_numerical_values(team, postgres_config, postgres_connection):
+ await postgres_connection.execute(
+ "CREATE TABLE IF NOT EXISTS {schema}.numerical_nan (id integer, nan_column numeric)".format(
+ schema=postgres_config["schema"]
+ )
+ )
+ await postgres_connection.execute(
+ "INSERT INTO {schema}.numerical_nan (id, nan_column) VALUES (1, 'NaN'::numeric)".format(
+ schema=postgres_config["schema"]
+ )
+ )
+ await postgres_connection.commit()
+
+ await _run(
+ team=team,
+ schema_name="numerical_nan",
+ table_name="postgres_numerical_nan",
+ source_type="Postgres",
+ job_inputs={
+ "host": postgres_config["host"],
+ "port": postgres_config["port"],
+ "database": postgres_config["database"],
+ "user": postgres_config["user"],
+ "password": postgres_config["password"],
+ "schema": postgres_config["schema"],
+ "ssh_tunnel_enabled": "False",
+ },
+ mock_data_response=[],
+ )
+
+ if settings.TEMPORAL_TASK_QUEUE == DATA_WAREHOUSE_TASK_QUEUE:
+ res = await sync_to_async(execute_hogql_query)(f"SELECT * FROM postgres_numerical_nan", team)
+ columns = res.columns
+ results = res.results
+
+ assert columns is not None
+ assert len(columns) == 2
+ assert columns[0] == "id"
+ assert columns[1] == "nan_column"
+
+ assert results is not None
+ assert len(results) == 1
+ assert results[0] == (1, None)
+
+
+@pytest.mark.django_db(transaction=True)
+@pytest.mark.asyncio
+async def test_delete_table_on_reset(team, stripe_balance_transaction):
+ if settings.TEMPORAL_TASK_QUEUE == DATA_WAREHOUSE_TASK_QUEUE_V2:
+ with (
+ mock.patch.object(DeltaTable, "delete") as mock_delta_table_delete,
+ mock.patch.object(s3fs.S3FileSystem, "delete") as mock_s3_delete,
+ ):
+ workflow_id, inputs = await _run(
+ team=team,
+ schema_name="BalanceTransaction",
+ table_name="stripe_balancetransaction",
+ source_type="Stripe",
+ job_inputs={"stripe_secret_key": "test-key", "stripe_account_id": "acct_id", "reset_pipeline": "True"},
+ mock_data_response=stripe_balance_transaction["data"],
+ )
+
+ source = await sync_to_async(ExternalDataSource.objects.get)(id=inputs.external_data_source_id)
+
+ assert source.job_inputs is not None and isinstance(source.job_inputs, dict)
+ source.job_inputs["reset_pipeline"] = "True"
+
+ await sync_to_async(source.save)()
+
+ await _execute_run(str(uuid.uuid4()), inputs, stripe_balance_transaction["data"])
+
+ mock_delta_table_delete.assert_called()
+ mock_s3_delete.assert_called()
+
+ await sync_to_async(source.refresh_from_db)()
+
+ assert source.job_inputs is not None and isinstance(source.job_inputs, dict)
+ assert "reset_pipeline" not in source.job_inputs.keys()
diff --git a/posthog/test/__snapshots__/test_feature_flag.ambr b/posthog/test/__snapshots__/test_feature_flag.ambr
index 531b385935652..70c56f7e2a2f2 100644
--- a/posthog/test/__snapshots__/test_feature_flag.ambr
+++ b/posthog/test/__snapshots__/test_feature_flag.ambr
@@ -174,6 +174,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -269,6 +270,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -346,6 +348,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -548,6 +551,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -647,6 +651,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -724,6 +729,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -789,6 +795,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -991,6 +998,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1153,6 +1161,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1355,6 +1364,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1454,6 +1464,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1573,6 +1584,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1650,6 +1662,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -1852,6 +1865,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2167,6 +2181,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2336,6 +2351,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2413,6 +2429,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
@@ -2615,6 +2632,7 @@
"posthog_team"."person_display_name_properties",
"posthog_team"."live_events_columns",
"posthog_team"."recording_domains",
+ "posthog_team"."human_friendly_comparison_periods",
"posthog_team"."cookieless_server_hash_mode",
"posthog_team"."primary_dashboard_id",
"posthog_team"."default_data_theme",
diff --git a/posthog/utils.py b/posthog/utils.py
index 5a22bfcdde9ff..1657dbcfa00e2 100644
--- a/posthog/utils.py
+++ b/posthog/utils.py
@@ -174,6 +174,7 @@ def relative_date_parse_with_delta_mapping(
timezone_info: ZoneInfo,
*,
always_truncate: bool = False,
+ human_friendly_comparison_periods: bool = False,
now: Optional[datetime.datetime] = None,
increase: bool = False,
) -> tuple[datetime.datetime, Optional[dict[str, int]], str | None]:
@@ -201,71 +202,95 @@ def relative_date_parse_with_delta_mapping(
parsed_dt = parsed_dt.astimezone(timezone_info)
return parsed_dt, None, None
- regex = r"\-?(?P[0-9]+)?(?P[a-zA-Z])(?PStart|End)?"
+ regex = r"\-?(?P[0-9]+)?(?P[hdwmqyHDWMQY])(?PStart|End)?"
match = re.search(regex, input)
parsed_dt = (now or dt.datetime.now()).astimezone(timezone_info)
delta_mapping: dict[str, int] = {}
if not match:
return parsed_dt, delta_mapping, None
- elif match.group("type") == "h":
- if match.group("number"):
- delta_mapping["hours"] = int(match.group("number"))
- if match.group("position") == "Start":
+
+ delta_mapping = get_delta_mapping_for(
+ **match.groupdict(),
+ human_friendly_comparison_periods=human_friendly_comparison_periods,
+ )
+
+ if increase:
+ parsed_dt += relativedelta(**delta_mapping) # type: ignore
+ else:
+ parsed_dt -= relativedelta(**delta_mapping) # type: ignore
+
+ if always_truncate:
+ # Truncate to the start of the hour for hour-precision datetimes, to the start of the day for larger intervals
+ # TODO: Remove this from this function, this should not be the responsibility of it
+ if "hours" in delta_mapping:
+ parsed_dt = parsed_dt.replace(minute=0, second=0, microsecond=0)
+ else:
+ parsed_dt = parsed_dt.replace(hour=0, minute=0, second=0, microsecond=0)
+ return parsed_dt, delta_mapping, match.group("position") or None
+
+
+def get_delta_mapping_for(
+ *,
+ kind: str,
+ number: Optional[str] = None,
+ position: Optional[str] = None,
+ human_friendly_comparison_periods: bool = False,
+) -> dict[str, int]:
+ delta_mapping: dict[str, int] = {}
+
+ if kind == "h":
+ if number:
+ delta_mapping["hours"] = int(number)
+ if position == "Start":
delta_mapping["minute"] = 0
delta_mapping["second"] = 0
delta_mapping["microsecond"] = 0
- elif match.group("position") == "End":
+ elif position == "End":
delta_mapping["minute"] = 59
delta_mapping["second"] = 59
delta_mapping["microsecond"] = 999999
- elif match.group("type") == "d":
- if match.group("number"):
- delta_mapping["days"] = int(match.group("number"))
- if match.group("position") == "Start":
+ elif kind == "d":
+ if number:
+ delta_mapping["days"] = int(number)
+ if position == "Start":
delta_mapping["hour"] = 0
delta_mapping["minute"] = 0
delta_mapping["second"] = 0
delta_mapping["microsecond"] = 0
- elif match.group("position") == "End":
+ elif position == "End":
delta_mapping["hour"] = 23
delta_mapping["minute"] = 59
delta_mapping["second"] = 59
delta_mapping["microsecond"] = 999999
- elif match.group("type") == "w":
- if match.group("number"):
- delta_mapping["weeks"] = int(match.group("number"))
- elif match.group("type") == "m":
- if match.group("number"):
- delta_mapping["months"] = int(match.group("number"))
- if match.group("position") == "Start":
+ elif kind == "w":
+ if number:
+ delta_mapping["weeks"] = int(number)
+ elif kind == "m":
+ if number:
+ if human_friendly_comparison_periods:
+ delta_mapping["weeks"] = 4
+ else:
+ delta_mapping["months"] = int(number)
+ if position == "Start":
delta_mapping["day"] = 1
- elif match.group("position") == "End":
+ elif position == "End":
delta_mapping["day"] = 31
- elif match.group("type") == "q":
- if match.group("number"):
- delta_mapping["weeks"] = 13 * int(match.group("number"))
- elif match.group("type") == "y":
- if match.group("number"):
- delta_mapping["years"] = int(match.group("number"))
- if match.group("position") == "Start":
+ elif kind == "q":
+ if number:
+ delta_mapping["weeks"] = 13 * int(number)
+ elif kind == "y":
+ if number:
+ if human_friendly_comparison_periods:
+ delta_mapping["weeks"] = 52
+ else:
+ delta_mapping["years"] = int(number)
+ if position == "Start":
delta_mapping["month"] = 1
delta_mapping["day"] = 1
- elif match.group("position") == "End":
+ elif position == "End":
delta_mapping["day"] = 31
- if increase:
- parsed_dt += relativedelta(**delta_mapping) # type: ignore
- else:
- parsed_dt -= relativedelta(**delta_mapping) # type: ignore
-
- if always_truncate:
- # Truncate to the start of the hour for hour-precision datetimes, to the start of the day for larger intervals
- # TODO: Remove this from this function, this should not be the responsibility of it
- if "hours" in delta_mapping:
- parsed_dt = parsed_dt.replace(minute=0, second=0, microsecond=0)
- else:
- parsed_dt = parsed_dt.replace(hour=0, minute=0, second=0, microsecond=0)
- return parsed_dt, delta_mapping, match.group("position") or None
+ return delta_mapping
def relative_date_parse(
@@ -273,11 +298,17 @@ def relative_date_parse(
timezone_info: ZoneInfo,
*,
always_truncate: bool = False,
+ human_friendly_comparison_periods: bool = False,
now: Optional[datetime.datetime] = None,
increase: bool = False,
) -> datetime.datetime:
return relative_date_parse_with_delta_mapping(
- input, timezone_info, always_truncate=always_truncate, now=now, increase=increase
+ input,
+ timezone_info,
+ always_truncate=always_truncate,
+ human_friendly_comparison_periods=human_friendly_comparison_periods,
+ now=now,
+ increase=increase,
)[0]
diff --git a/requirements.in b/requirements.in
index 5ba89d26f6b5c..0e067e967257a 100644
--- a/requirements.in
+++ b/requirements.in
@@ -17,7 +17,7 @@ clickhouse-driver==0.2.7
clickhouse-pool==0.5.3
conditional-cache==1.2
cryptography==39.0.2
-deltalake==0.22.3
+deltalake==0.23.2
dj-database-url==0.5.0
Django~=4.2.15
django-axes==5.9.0
diff --git a/requirements.txt b/requirements.txt
index 1c9c3e14c7ceb..467cb5be0be3c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -141,7 +141,7 @@ defusedxml==0.6.0
# via
# python3-openid
# social-auth-core
-deltalake==0.22.3
+deltalake==0.23.2
# via -r requirements.in
distro==1.9.0
# via openai
@@ -273,8 +273,6 @@ googleapis-common-protos==1.60.0
# via
# google-api-core
# grpcio-status
-greenlet==3.1.1
- # via sqlalchemy
grpcio==1.63.2
# via
# -r requirements.in
diff --git a/rust/.sqlx/query-04abdef9c07ae1a30bb6f22abcfb4dcdf2e218e48e0fd8a247e1b7ae0f04aee3.json b/rust/.sqlx/query-04abdef9c07ae1a30bb6f22abcfb4dcdf2e218e48e0fd8a247e1b7ae0f04aee3.json
new file mode 100644
index 0000000000000..5c8b96e695c28
--- /dev/null
+++ b/rust/.sqlx/query-04abdef9c07ae1a30bb6f22abcfb4dcdf2e218e48e0fd8a247e1b7ae0f04aee3.json
@@ -0,0 +1,21 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n INSERT INTO posthog_propertydefinition (id, name, type, group_type_index, is_numerical, volume_30_day, query_usage_30_day, team_id, project_id, property_type)\n VALUES ($1, $2, $3, $4, $5, NULL, NULL, $6, $7, $8)\n ON CONFLICT (team_id, name, type, coalesce(group_type_index, -1))\n DO UPDATE SET property_type=EXCLUDED.property_type WHERE posthog_propertydefinition.property_type IS NULL\n ",
+ "describe": {
+ "columns": [],
+ "parameters": {
+ "Left": [
+ "Uuid",
+ "Varchar",
+ "Int2",
+ "Int2",
+ "Bool",
+ "Int4",
+ "Int8",
+ "Varchar"
+ ]
+ },
+ "nullable": []
+ },
+ "hash": "04abdef9c07ae1a30bb6f22abcfb4dcdf2e218e48e0fd8a247e1b7ae0f04aee3"
+}
diff --git a/rust/.sqlx/query-2b9a8c4b8d323e1673d805125b4073799ecba84594ca04cfb24481cffbf6f6ca.json b/rust/.sqlx/query-2b9a8c4b8d323e1673d805125b4073799ecba84594ca04cfb24481cffbf6f6ca.json
new file mode 100644
index 0000000000000..785a13a6d1ce7
--- /dev/null
+++ b/rust/.sqlx/query-2b9a8c4b8d323e1673d805125b4073799ecba84594ca04cfb24481cffbf6f6ca.json
@@ -0,0 +1,18 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n INSERT INTO posthog_eventdefinition (id, name, volume_30_day, query_usage_30_day, team_id, project_id, last_seen_at, created_at)\n VALUES ($1, $2, NULL, NULL, $3, $4, $5, NOW()) ON CONFLICT\n ON CONSTRAINT posthog_eventdefinition_team_id_name_80fa0b87_uniq\n DO UPDATE SET last_seen_at = $5\n ",
+ "describe": {
+ "columns": [],
+ "parameters": {
+ "Left": [
+ "Uuid",
+ "Varchar",
+ "Int4",
+ "Int8",
+ "Timestamptz"
+ ]
+ },
+ "nullable": []
+ },
+ "hash": "2b9a8c4b8d323e1673d805125b4073799ecba84594ca04cfb24481cffbf6f6ca"
+}
diff --git a/rust/.sqlx/query-917e3d14c15558a1e0bb1d7015ed687eb545ee9d4ccbb8b69c958a357d49f687.json b/rust/.sqlx/query-917e3d14c15558a1e0bb1d7015ed687eb545ee9d4ccbb8b69c958a357d49f687.json
deleted file mode 100644
index 6f3e42a0a8b9b..0000000000000
--- a/rust/.sqlx/query-917e3d14c15558a1e0bb1d7015ed687eb545ee9d4ccbb8b69c958a357d49f687.json
+++ /dev/null
@@ -1,17 +0,0 @@
-{
- "db_name": "PostgreSQL",
- "query": "\n INSERT INTO posthog_eventdefinition (id, name, volume_30_day, query_usage_30_day, team_id, last_seen_at, created_at)\n VALUES ($1, $2, NULL, NULL, $3, $4, NOW()) ON CONFLICT\n ON CONSTRAINT posthog_eventdefinition_team_id_name_80fa0b87_uniq\n DO UPDATE SET last_seen_at = $4\n ",
- "describe": {
- "columns": [],
- "parameters": {
- "Left": [
- "Uuid",
- "Varchar",
- "Int4",
- "Timestamptz"
- ]
- },
- "nullable": []
- },
- "hash": "917e3d14c15558a1e0bb1d7015ed687eb545ee9d4ccbb8b69c958a357d49f687"
-}
diff --git a/rust/.sqlx/query-42e393046a686e6a69daa920dc2ab521aa6f393027c399a0c40139f5f8a0a45e.json b/rust/.sqlx/query-9e0e25b9966a23792427c27a80888a75efdb8abe195339e0a1676ebed6fc61ef.json
similarity index 57%
rename from rust/.sqlx/query-42e393046a686e6a69daa920dc2ab521aa6f393027c399a0c40139f5f8a0a45e.json
rename to rust/.sqlx/query-9e0e25b9966a23792427c27a80888a75efdb8abe195339e0a1676ebed6fc61ef.json
index 890675aa24d0d..f2582dca5c9b4 100644
--- a/rust/.sqlx/query-42e393046a686e6a69daa920dc2ab521aa6f393027c399a0c40139f5f8a0a45e.json
+++ b/rust/.sqlx/query-9e0e25b9966a23792427c27a80888a75efdb8abe195339e0a1676ebed6fc61ef.json
@@ -1,16 +1,17 @@
{
"db_name": "PostgreSQL",
- "query": "INSERT INTO posthog_eventproperty (event, property, team_id) VALUES ($1, $2, $3) ON CONFLICT DO NOTHING",
+ "query": "INSERT INTO posthog_eventproperty (event, property, team_id, project_id) VALUES ($1, $2, $3, $4) ON CONFLICT DO NOTHING",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Varchar",
"Varchar",
- "Int4"
+ "Int4",
+ "Int8"
]
},
"nullable": []
},
- "hash": "42e393046a686e6a69daa920dc2ab521aa6f393027c399a0c40139f5f8a0a45e"
+ "hash": "9e0e25b9966a23792427c27a80888a75efdb8abe195339e0a1676ebed6fc61ef"
}
diff --git a/rust/.sqlx/query-eecef0ce664dfe65dff4452d92a29c948a291ea8218bbbb4e25cd1ad36dbe9f4.json b/rust/.sqlx/query-eecef0ce664dfe65dff4452d92a29c948a291ea8218bbbb4e25cd1ad36dbe9f4.json
deleted file mode 100644
index a54bb9565ea4f..0000000000000
--- a/rust/.sqlx/query-eecef0ce664dfe65dff4452d92a29c948a291ea8218bbbb4e25cd1ad36dbe9f4.json
+++ /dev/null
@@ -1,20 +0,0 @@
-{
- "db_name": "PostgreSQL",
- "query": "\n INSERT INTO posthog_propertydefinition (id, name, type, group_type_index, is_numerical, volume_30_day, query_usage_30_day, team_id, property_type)\n VALUES ($1, $2, $3, $4, $5, NULL, NULL, $6, $7)\n ON CONFLICT (team_id, name, type, coalesce(group_type_index, -1))\n DO UPDATE SET property_type=EXCLUDED.property_type WHERE posthog_propertydefinition.property_type IS NULL\n ",
- "describe": {
- "columns": [],
- "parameters": {
- "Left": [
- "Uuid",
- "Varchar",
- "Int2",
- "Int2",
- "Bool",
- "Int4",
- "Varchar"
- ]
- },
- "nullable": []
- },
- "hash": "eecef0ce664dfe65dff4452d92a29c948a291ea8218bbbb4e25cd1ad36dbe9f4"
-}
diff --git a/rust/common/kafka/src/config.rs b/rust/common/kafka/src/config.rs
index 81ef7402ab429..8096efce9d6f4 100644
--- a/rust/common/kafka/src/config.rs
+++ b/rust/common/kafka/src/config.rs
@@ -25,6 +25,10 @@ pub struct KafkaConfig {
pub struct ConsumerConfig {
pub kafka_consumer_group: String,
pub kafka_consumer_topic: String,
+
+ // We default to "earliest" for this, but if you're bringing up a new service, you probably want "latest"
+ #[envconfig(default = "earliest")]
+ pub kafka_consumer_offset_reset: String, // earliest, latest
}
impl ConsumerConfig {
diff --git a/rust/common/kafka/src/kafka_consumer.rs b/rust/common/kafka/src/kafka_consumer.rs
index 79c3be7f986d5..1dcbf21a206a1 100644
--- a/rust/common/kafka/src/kafka_consumer.rs
+++ b/rust/common/kafka/src/kafka_consumer.rs
@@ -47,7 +47,11 @@ impl SingleTopicConsumer {
client_config
.set("bootstrap.servers", &common_config.kafka_hosts)
.set("statistics.interval.ms", "10000")
- .set("group.id", consumer_config.kafka_consumer_group);
+ .set("group.id", consumer_config.kafka_consumer_group)
+ .set(
+ "auto.offset.reset",
+ &consumer_config.kafka_consumer_offset_reset,
+ );
client_config.set("enable.auto.offset.store", "false");
diff --git a/rust/cymbal/src/hack/kafka.rs b/rust/cymbal/src/hack/kafka.rs
index cb26faede2165..977928af79b80 100644
--- a/rust/cymbal/src/hack/kafka.rs
+++ b/rust/cymbal/src/hack/kafka.rs
@@ -44,6 +44,10 @@ pub struct KafkaConfig {
pub struct ConsumerConfig {
pub kafka_consumer_group: String,
pub kafka_consumer_topic: String,
+
+ // We default to "earliest" for this, but if you're bringing up a new service, you probably want "latest"
+ #[envconfig(default = "earliest")]
+ pub kafka_consumer_offset_reset: String, // earliest, latest
}
impl ConsumerConfig {
@@ -97,7 +101,11 @@ impl SingleTopicConsumer {
client_config
.set("bootstrap.servers", &common_config.kafka_hosts)
.set("statistics.interval.ms", "10000")
- .set("group.id", consumer_config.kafka_consumer_group);
+ .set("group.id", consumer_config.kafka_consumer_group)
+ .set(
+ "auto.offset.reset",
+ &consumer_config.kafka_consumer_offset_reset,
+ );
client_config.set("enable.auto.offset.store", "false");